code
stringlengths 3
1.04M
| repo_name
stringlengths 5
109
| path
stringlengths 6
306
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.04M
|
---|---|---|---|---|---|
/*******************************************************************************
* Copyright (c) 2012-2013 University of Stuttgart.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* and the Apache License 2.0 which both accompany this distribution,
* and are available at http://www.eclipse.org/legal/epl-v10.html
* and http://www.apache.org/licenses/LICENSE-2.0
*
* Contributors:
* Oliver Kopp - initial API and implementation
*******************************************************************************/
/**
* This package contains the REST resources
*
* Mostly, they produces Viewables, where a JSP and the current resource is
* passed As the JSP itself handles plain Java objects and not Responses, the
* resources have also methods returning POJOs. This might be ugly design, but
* was quick to implement.
*
* The package structure is mirrored in src/main/webapp/jsp to ease finding the
* JSPs belonging to a resource.
*
* The resources are <em>not</em> in line with the resource model of the TOSCA
* container. Especially, we do not employ HATEOAS here.
*/
package org.eclipse.winery.repository.resources;
| YannicSowoidnich/winery | org.eclipse.winery.repository/src/main/java/org/eclipse/winery/repository/resources/package-info.java | Java | apache-2.0 | 1,234 |
/**
*
Package: MAG - VistA Imaging
WARNING: Per VHA Directive 2004-038, this routine should not be modified.
Date Created: Jul 10, 2012
Site Name: Washington OI Field Office, Silver Spring, MD
Developer: VHAISWWERFEJ
Description:
;; +--------------------------------------------------------------------+
;; Property of the US Government.
;; No permission to copy or redistribute this software is given.
;; Use of unreleased versions of this software requires the user
;; to execute a written test agreement with the VistA Imaging
;; Development Office of the Department of Veterans Affairs,
;; telephone (301) 734-0100.
;;
;; The Food and Drug Administration classifies this software as
;; a Class II medical device. As such, it may not be changed
;; in any way. Modifications to this software may result in an
;; adulterated medical device under 21CFR820, the use of which
;; is considered to be a violation of US Federal Statutes.
;; +--------------------------------------------------------------------+
*/
package gov.va.med.imaging.pathology.rest.translator;
import java.util.Date;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* @author VHAISWWERFEJ
*
*/
public class PathologyRestTranslatorTest
{
@Test
public void testDateTranslation()
{
try
{
Date date = PathologyRestTranslator.translateDate("201207101435");
System.out.println("Date: " + date);
}
catch(Exception ex)
{
ex.printStackTrace();
fail(ex.getMessage());
}
}
}
| VHAINNOVATIONS/Telepathology | Source/Java/PathologyWebApp/main/test/java/gov/va/med/imaging/pathology/rest/translator/PathologyRestTranslatorTest.java | Java | apache-2.0 | 1,621 |
package io.quarkus.grpc.examples.hello;
import static io.restassured.RestAssured.get;
import static org.assertj.core.api.Assertions.assertThat;
import org.junit.jupiter.api.Test;
import io.quarkus.test.junit.QuarkusTest;
@QuarkusTest
class HelloWorldMutualTlsEndpointTest {
@Test
public void testHelloWorldServiceUsingBlockingStub() {
String response = get("/hello/blocking/neo").asString();
assertThat(response).isEqualTo("Hello neo");
}
@Test
public void testHelloWorldServiceUsingMutinyStub() {
String response = get("/hello/mutiny/neo-mutiny").asString();
assertThat(response).isEqualTo("Hello neo-mutiny");
}
}
| quarkusio/quarkus | integration-tests/grpc-mutual-auth/src/test/java/io/quarkus/grpc/examples/hello/HelloWorldMutualTlsEndpointTest.java | Java | apache-2.0 | 679 |
/**
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* under the License.
*/
package org.apache.hadoop.hbase.filter;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import com.google.common.collect.Lists;
/**
*/
@Category(MediumTests.class)
public class TestFuzzyRowAndColumnRangeFilter {
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final Log LOG = LogFactory.getLog(this.getClass());
/**
* @throws java.lang.Exception
*/
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster();
}
/**
* @throws java.lang.Exception
*/
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
/**
* @throws java.lang.Exception
*/
@Before
public void setUp() throws Exception {
// Nothing to do.
}
/**
* @throws java.lang.Exception
*/
@After
public void tearDown() throws Exception {
// Nothing to do.
}
@Test
public void Test() throws Exception {
String cf = "f";
String table = "TestFuzzyAndColumnRangeFilterClient";
HTable ht = TEST_UTIL.createTable(Bytes.toBytes(table),
Bytes.toBytes(cf), Integer.MAX_VALUE);
// 10 byte row key - (2 bytes 4 bytes 4 bytes)
// 4 byte qualifier
// 4 byte value
for (int i1 = 0; i1 < 2; i1++) {
for (int i2 = 0; i2 < 5; i2++) {
byte[] rk = new byte[10];
ByteBuffer buf = ByteBuffer.wrap(rk);
buf.clear();
buf.putShort((short) 2);
buf.putInt(i1);
buf.putInt(i2);
for (int c = 0; c < 5; c++) {
byte[] cq = new byte[4];
Bytes.putBytes(cq, 0, Bytes.toBytes(c), 0, 4);
Put p = new Put(rk);
p.setDurability(Durability.SKIP_WAL);
p.add(cf.getBytes(), cq, Bytes.toBytes(c));
ht.put(p);
LOG.info("Inserting: rk: " + Bytes.toStringBinary(rk) + " cq: "
+ Bytes.toStringBinary(cq));
}
}
}
TEST_UTIL.flush();
// test passes
runTest(ht, 0, 10);
// test fails
runTest(ht, 1, 8);
}
private void runTest(HTable hTable, int cqStart, int expectedSize) throws IOException {
// [0, 2, ?, ?, ?, ?, 0, 0, 0, 1]
byte[] fuzzyKey = new byte[10];
ByteBuffer buf = ByteBuffer.wrap(fuzzyKey);
buf.clear();
buf.putShort((short) 2);
for (int i = 0; i < 4; i++)
buf.put((byte)63);
buf.putInt((short)1);
byte[] mask = new byte[] {0 , 0, 1, 1, 1, 1, 0, 0, 0, 0};
Pair<byte[], byte[]> pair = new Pair<byte[], byte[]>(fuzzyKey, mask);
FuzzyRowFilter fuzzyRowFilter = new FuzzyRowFilter(Lists.newArrayList(pair));
ColumnRangeFilter columnRangeFilter = new ColumnRangeFilter(Bytes.toBytes(cqStart), true
, Bytes.toBytes(4), true);
//regular test
runScanner(hTable, expectedSize, fuzzyRowFilter, columnRangeFilter);
//reverse filter order test
runScanner(hTable, expectedSize, columnRangeFilter, fuzzyRowFilter);
}
private void runScanner(HTable hTable, int expectedSize, Filter... filters) throws IOException {
String cf = "f";
Scan scan = new Scan();
scan.addFamily(cf.getBytes());
FilterList filterList = new FilterList(filters);
scan.setFilter(filterList);
ResultScanner scanner = hTable.getScanner(scan);
List<Cell> results = new ArrayList<Cell>();
Result result;
long timeBeforeScan = System.currentTimeMillis();
while ((result = scanner.next()) != null) {
for (Cell kv : result.listCells()) {
LOG.info("Got rk: " + Bytes.toStringBinary(CellUtil.cloneRow(kv)) + " cq: "
+ Bytes.toStringBinary(CellUtil.cloneQualifier(kv)));
results.add(kv);
}
}
long scanTime = System.currentTimeMillis() - timeBeforeScan;
scanner.close();
LOG.info("scan time = " + scanTime + "ms");
LOG.info("found " + results.size() + " results");
assertEquals(expectedSize, results.size());
}
} | intel-hadoop/hbase-rhino | hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowAndColumnRangeFilter.java | Java | apache-2.0 | 5,447 |
package pl.matisoft.soy.config;
import com.google.template.soy.jssrc.SoyJsSrcOptions;
import com.google.template.soy.tofu.SoyTofuOptions;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.context.support.ServletContextResource;
import org.springframework.web.servlet.ViewResolver;
import pl.matisoft.soy.ContentNegotiator;
import pl.matisoft.soy.DefaultContentNegotiator;
import pl.matisoft.soy.SoyTemplateViewResolver;
import pl.matisoft.soy.bundle.DefaultSoyMsgBundleResolver;
import pl.matisoft.soy.bundle.SoyMsgBundleResolver;
import pl.matisoft.soy.compile.DefaultTofuCompiler;
import pl.matisoft.soy.compile.TofuCompiler;
import pl.matisoft.soy.data.DefaultToSoyDataConverter;
import pl.matisoft.soy.data.ToSoyDataConverter;
import pl.matisoft.soy.data.adjust.ModelAdjuster;
import pl.matisoft.soy.data.adjust.SpringModelAdjuster;
import pl.matisoft.soy.global.compile.CompileTimeGlobalModelResolver;
import pl.matisoft.soy.global.compile.EmptyCompileTimeGlobalModelResolver;
import pl.matisoft.soy.global.runtime.EmptyGlobalRuntimeModelResolver;
import pl.matisoft.soy.global.runtime.GlobalRuntimeModelResolver;
import pl.matisoft.soy.holder.CompiledTemplatesHolder;
import pl.matisoft.soy.holder.DefaultCompiledTemplatesHolder;
import pl.matisoft.soy.locale.LocaleProvider;
import pl.matisoft.soy.locale.SpringLocaleProvider;
import pl.matisoft.soy.render.DefaultTemplateRenderer;
import pl.matisoft.soy.render.TemplateRenderer;
import pl.matisoft.soy.template.DefaultTemplateFilesResolver;
import pl.matisoft.soy.template.TemplateFilesResolver;
import javax.inject.Inject;
import javax.servlet.ServletContext;
/**
* Created with IntelliJ IDEA.
* User: mati
* Date: 12/11/2013
* Time: 19:55
*/
@Configuration
public class SpringSoyViewBaseConfig {
@Value("${soy.hot.reload.mode:false}")
private boolean hotReloadMode;
@Value("${soy.templates.resolve.recursively:true}")
private boolean recursive;
@Value("${soy.templates.file.extension:soy}")
private String fileExtension;
@Value("${soy.templates.directory:/WEB-INF/templates}")
private String templatesPath;
@Value("${soy.i18n.xliff.path:xliffs/messages}")
private String messagesPath;
@Value("${soy.encoding:utf-8}")
private String encoding;
@Value("${soy.i18n.fallback.to.english:true}")
private boolean fallbackToEnglish;
@Value("${soy.preCompile.templates:false}")
private boolean preCompileTemplates;
@Value("${soy.indexView:index}")
private String indexView;
@Value("${soy.logical.prefix:soy:}")
private String logicalPrefix;
@Value("${soy.resolver.order:2147483647}")
private int order;
@Inject
private ServletContext servletContext;
@Bean
public LocaleProvider soyLocaleProvider() {
return new SpringLocaleProvider();
}
@Bean
public DefaultTemplateFilesResolver soyTemplateFilesResolver() throws Exception {
final DefaultTemplateFilesResolver defaultTemplateFilesResolver = new DefaultTemplateFilesResolver();
defaultTemplateFilesResolver.setHotReloadMode(hotReloadMode);
defaultTemplateFilesResolver.setRecursive(recursive);
defaultTemplateFilesResolver.setFilesExtension(fileExtension);
defaultTemplateFilesResolver.setTemplatesLocation(new ServletContextResource(servletContext, templatesPath));
return defaultTemplateFilesResolver;
}
@Bean
public CompileTimeGlobalModelResolver soyCompileTimeGlobalModelResolver() {
return new EmptyCompileTimeGlobalModelResolver();
}
@Bean
public ToSoyDataConverter soyToSoyDataConverter() {
return new DefaultToSoyDataConverter();
}
@Bean
public SoyJsSrcOptions soyJsSourceOptions() {
return new SoyJsSrcOptions();
}
@Bean
public SoyTofuOptions soyTofuOptions() {
final SoyTofuOptions soyTofuOptions = new SoyTofuOptions();
soyTofuOptions.setUseCaching(!hotReloadMode);
return soyTofuOptions;
}
@Bean
public TofuCompiler soyTofuCompiler(final CompileTimeGlobalModelResolver compileTimeGlobalModelResolver, final SoyJsSrcOptions soyJsSrcOptions, final SoyTofuOptions soyTofuOptions) {
final DefaultTofuCompiler defaultTofuCompiler = new DefaultTofuCompiler();
defaultTofuCompiler.setHotReloadMode(hotReloadMode);
defaultTofuCompiler.setCompileTimeGlobalModelResolver(compileTimeGlobalModelResolver);
defaultTofuCompiler.setSoyJsSrcOptions(soyJsSrcOptions);
defaultTofuCompiler.setSoyTofuOptions(soyTofuOptions);
return defaultTofuCompiler;
}
@Bean
public SoyMsgBundleResolver soyMsgBundleResolver() {
final DefaultSoyMsgBundleResolver defaultSoyMsgBundleResolver = new DefaultSoyMsgBundleResolver();
defaultSoyMsgBundleResolver.setHotReloadMode(hotReloadMode);
defaultSoyMsgBundleResolver.setMessagesPath(messagesPath);
defaultSoyMsgBundleResolver.setFallbackToEnglish(fallbackToEnglish);
return defaultSoyMsgBundleResolver;
}
@Bean
public CompiledTemplatesHolder soyTemplatesHolder(final TemplateFilesResolver templateFilesResolver, final TofuCompiler tofuCompiler) throws Exception {
final DefaultCompiledTemplatesHolder defaultCompiledTemplatesHolder = new DefaultCompiledTemplatesHolder();
defaultCompiledTemplatesHolder.setHotReloadMode(hotReloadMode);
defaultCompiledTemplatesHolder.setPreCompileTemplates(preCompileTemplates);
defaultCompiledTemplatesHolder.setTemplatesFileResolver(templateFilesResolver);
defaultCompiledTemplatesHolder.setTofuCompiler(tofuCompiler);
return defaultCompiledTemplatesHolder;
}
@Bean
public TemplateRenderer soyTemplateRenderer(final ToSoyDataConverter toSoyDataConverter) {
final DefaultTemplateRenderer defaultTemplateRenderer = new DefaultTemplateRenderer();
defaultTemplateRenderer.setHotReloadMode(hotReloadMode);
defaultTemplateRenderer.setToSoyDataConverter(toSoyDataConverter);
return defaultTemplateRenderer;
}
@Bean
public ModelAdjuster soySpringModelAdjuster() {
return new SpringModelAdjuster();
}
@Bean
public GlobalRuntimeModelResolver soyGlobalRuntimeModelResolver() {
return new EmptyGlobalRuntimeModelResolver();
}
@Bean
public ContentNegotiator contentNegotiator() {
return new DefaultContentNegotiator();
}
@Bean
public ViewResolver soyViewResolver(final CompiledTemplatesHolder compiledTemplatesHolder,
final ModelAdjuster modelAdjuster,
final TemplateRenderer templateRenderer,
final LocaleProvider localeProvider,
final GlobalRuntimeModelResolver globalRuntimeModelResolver,
final ContentNegotiator contentNegotiator,
final SoyMsgBundleResolver msgBundleResolver)
throws Exception {
final SoyTemplateViewResolver soyTemplateViewResolver = new SoyTemplateViewResolver();
soyTemplateViewResolver.setSoyMsgBundleResolver(msgBundleResolver);
soyTemplateViewResolver.setCompiledTemplatesHolder(compiledTemplatesHolder);
soyTemplateViewResolver.setEncoding(encoding);
soyTemplateViewResolver.setGlobalRuntimeModelResolver(globalRuntimeModelResolver);
soyTemplateViewResolver.setHotReloadMode(hotReloadMode);
soyTemplateViewResolver.setIndexView(indexView);
soyTemplateViewResolver.setLocaleProvider(localeProvider);
soyTemplateViewResolver.setModelAdjuster(modelAdjuster);
soyTemplateViewResolver.setTemplateRenderer(templateRenderer);
soyTemplateViewResolver.setPrefix(logicalPrefix);
soyTemplateViewResolver.setOrder(order);
soyTemplateViewResolver.setRedirectContextRelative(true);
soyTemplateViewResolver.setRedirectHttp10Compatible(true);
soyTemplateViewResolver.setContentNegotiator(contentNegotiator);
return soyTemplateViewResolver;
}
}
| matiwinnetou/spring-soy-view | spring-soy-view/src/main/java/pl/matisoft/soy/config/SpringSoyViewBaseConfig.java | Java | apache-2.0 | 8,365 |
package userstoreauth.servlets;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import userstoreauth.model.UserVer2;
import userstoreauth.service.UserStoreMb;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.sql.Timestamp;
import java.time.LocalDateTime;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
class EditUserTest {
@BeforeEach
void setUp() {
UserStoreMb us = new UserStoreMb();
us.deleteAll();
}
@Test
void editUser() throws ServletException, IOException {
EditUser editUser = new EditUser();
UserStoreMb us = new UserStoreMb();
HttpServletRequest request = mock(HttpServletRequest.class);
HttpServletResponse response = mock(HttpServletResponse.class);
when(request.getParameter("login")).thenReturn("login");
when(request.getParameter("password")).thenReturn("password0");
when(request.getParameter("name")).thenReturn("name0");
when(request.getParameter("email")).thenReturn("email0");
when(request.getParameter("role")).thenReturn("admin");
when(request.getParameter("country")).thenReturn("Россия");
when(request.getParameter("city")).thenReturn("Москва");
UserVer2 user = new UserVer2("login", "password", "name", "email", "Россия", "Москва", Timestamp.valueOf(LocalDateTime.now()), "user");
us.addUser(user);
assertEquals(user, us.getByLogin("login"));
editUser.doPost(request, response);
user.setPassword("password0");
user.setName("name0");
user.setEmail("email0");
user.setRole("admin");
assertEquals(user, us.getByLogin("login"));
}
}
| HeTyDeHer/ZapovA | chapter_009/src/test/java/userstoreauth/servlets/EditUserTest.java | Java | apache-2.0 | 1,929 |
// Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.execution.testframework.sm.runner;
import com.intellij.execution.testframework.sm.SMTestRunnerConnectionUtil;
import com.intellij.execution.testframework.sm.runner.events.*;
import com.intellij.openapi.application.Application;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.Key;
import com.intellij.util.containers.ContainerUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.annotations.TestOnly;
import java.util.*;
/**
* This class fires events to SMTRunnerEventsListener in event dispatch thread.
*
* @author: Roman Chernyatchik
*/
public class GeneralToSMTRunnerEventsConvertor extends GeneralTestEventsProcessor {
private final Map<String, SMTestProxy> myRunningTestsFullNameToProxy = ContainerUtil.newConcurrentMap();
private final TestSuiteStack mySuitesStack;
private final Map<String, List<SMTestProxy>> myCurrentChildren = new HashMap<>();
private boolean myIsTestingFinished;
public GeneralToSMTRunnerEventsConvertor(Project project, @NotNull SMTestProxy.SMRootTestProxy testsRootNode,
@NotNull String testFrameworkName) {
super(project, testFrameworkName, testsRootNode);
mySuitesStack = new TestSuiteStack(testFrameworkName);
}
@Override
protected SMTestProxy createProxy(String testName, String locationHint, String metaInfo, String id, String parentNodeId) {
SMTestProxy proxy = super.createProxy(testName, locationHint, metaInfo, id, parentNodeId);
SMTestProxy currentSuite = getCurrentSuite();
currentSuite.addChild(proxy);
return proxy;
}
@Override
protected SMTestProxy createSuite(String suiteName, String locationHint, String metaInfo, String id, String parentNodeId) {
SMTestProxy newSuite = super.createSuite(suiteName, locationHint, metaInfo, id, parentNodeId);
final SMTestProxy parentSuite = getCurrentSuite();
parentSuite.addChild(newSuite);
mySuitesStack.pushSuite(newSuite);
return newSuite;
}
@Override
public void onSuiteTreeEnded(String suiteName) {
myBuildTreeRunnables.add(() -> mySuitesStack.popSuite(suiteName));
super.onSuiteTreeEnded(suiteName);
}
@Override
public void onStartTesting() {
//fire
mySuitesStack.pushSuite(myTestsRootProxy);
myTestsRootProxy.setStarted();
//fire
fireOnTestingStarted(myTestsRootProxy);
}
@Override
public void onTestsReporterAttached() {
fireOnTestsReporterAttached(myTestsRootProxy);
}
@Override
public void onFinishTesting() {
fireOnBeforeTestingFinished(myTestsRootProxy);
// has been already invoked!
// We don't know whether process was destroyed by user
// or it finished after all tests have been run
// Lets assume, if at finish all suites except root suite are passed
// then all is ok otherwise process was terminated by user
if (myIsTestingFinished) {
// has been already invoked!
return;
}
myIsTestingFinished = true;
// We don't know whether process was destroyed by user
// or it finished after all tests have been run
// Lets assume, if at finish all suites except root suite are passed
// then all is ok otherwise process was terminated by user
if (!isTreeComplete(myRunningTestsFullNameToProxy.keySet(), myTestsRootProxy)) {
myTestsRootProxy.setTerminated();
myRunningTestsFullNameToProxy.clear();
}
mySuitesStack.clear();
myTestsRootProxy.setFinished();
myCurrentChildren.clear();
//fire events
fireOnTestingFinished(myTestsRootProxy);
super.onFinishTesting();
}
@Override
public void setPrinterProvider(@NotNull TestProxyPrinterProvider printerProvider) {
}
@Override
public void onTestStarted(@NotNull final TestStartedEvent testStartedEvent) {
//Duplicated event
// creates test
// adds to running tests map
//Progress started
//fire events
final String testName = testStartedEvent.getName();
final String locationUrl = testStartedEvent.getLocationUrl();
final boolean isConfig = testStartedEvent.isConfig();
final String fullName = getFullTestName(testName);
if (myRunningTestsFullNameToProxy.containsKey(fullName)) {
//Duplicated event
logProblem("Test [" + fullName + "] has been already started");
if (SMTestRunnerConnectionUtil.isInDebugMode()) {
return;
}
}
SMTestProxy parentSuite = getCurrentSuite();
SMTestProxy testProxy = findChild(parentSuite, locationUrl != null ? locationUrl : fullName, false);
if (testProxy == null) {
// creates test
testProxy = new SMTestProxy(testName, false, locationUrl, testStartedEvent.getMetainfo(), false);
testProxy.setConfig(isConfig);
if (myTreeBuildBeforeStart) testProxy.setTreeBuildBeforeStart();
if (myLocator != null) {
testProxy.setLocator(myLocator);
}
parentSuite.addChild(testProxy);
}
// adds to running tests map
myRunningTestsFullNameToProxy.put(fullName, testProxy);
//Progress started
testProxy.setStarted();
//fire events
fireOnTestStarted(testProxy);
}
@Override
public void onSuiteStarted(@NotNull final TestSuiteStartedEvent suiteStartedEvent) {
//new suite
//Progress started
//fire event
final String suiteName = suiteStartedEvent.getName();
final String locationUrl = suiteStartedEvent.getLocationUrl();
SMTestProxy parentSuite = getCurrentSuite();
SMTestProxy newSuite = findChild(parentSuite, locationUrl != null ? locationUrl : suiteName, true);
if (newSuite == null) {
//new suite
newSuite = new SMTestProxy(suiteName, true, locationUrl, suiteStartedEvent.getMetainfo(), parentSuite.isPreservePresentableName());
if (myTreeBuildBeforeStart) {
newSuite.setTreeBuildBeforeStart();
}
if (myLocator != null) {
newSuite.setLocator(myLocator);
}
parentSuite.addChild(newSuite);
}
initCurrentChildren(newSuite, true);
mySuitesStack.pushSuite(newSuite);
//Progress started
newSuite.setSuiteStarted();
//fire event
fireOnSuiteStarted(newSuite);
}
private void initCurrentChildren(SMTestProxy newSuite, boolean preferSuite) {
if (myTreeBuildBeforeStart) {
for (SMTestProxy proxy : newSuite.getChildren()) {
if (!proxy.isFinal() || preferSuite && proxy.isSuite()) {
String url = proxy.getLocationUrl();
if (url != null) {
myCurrentChildren.computeIfAbsent(url, l -> new ArrayList<>()).add(proxy);
}
myCurrentChildren.computeIfAbsent(proxy.getName(), l -> new ArrayList<>()).add(proxy);
}
}
}
}
private SMTestProxy findChild(SMTestProxy parentSuite, String fullName, boolean preferSuite) {
if (myTreeBuildBeforeStart) {
Set<SMTestProxy> acceptedProxies = new LinkedHashSet<>();
Collection<? extends SMTestProxy> children = myCurrentChildren.get(fullName);
if (children == null) {
initCurrentChildren(parentSuite, preferSuite);
children = myCurrentChildren.get(fullName);
}
if (children != null) { //null if child started second time
for (SMTestProxy proxy : children) {
if (!proxy.isFinal() || preferSuite && proxy.isSuite()) {
acceptedProxies.add(proxy);
}
}
if (!acceptedProxies.isEmpty()) {
return acceptedProxies.stream()
.filter(proxy -> proxy.isSuite() == preferSuite && proxy.getParent() == parentSuite)
.findFirst()
.orElse(acceptedProxies.iterator().next());
}
}
}
return null;
}
@Override
public void onTestFinished(@NotNull final TestFinishedEvent testFinishedEvent) {
final String testName = testFinishedEvent.getName();
final Long duration = testFinishedEvent.getDuration();
final String fullTestName = getFullTestName(testName);
final SMTestProxy testProxy = getProxyByFullTestName(fullTestName);
if (testProxy == null) {
logProblem("Test wasn't started! TestFinished event: name = {" + testName + "}. " +
cannotFindFullTestNameMsg(fullTestName));
return;
}
testProxy.setDuration(duration != null ? duration : 0);
testProxy.setFrameworkOutputFile(testFinishedEvent.getOutputFile());
testProxy.setFinished();
myRunningTestsFullNameToProxy.remove(fullTestName);
clearCurrentChildren(fullTestName, testProxy);
//fire events
fireOnTestFinished(testProxy);
}
private void clearCurrentChildren(String fullTestName, SMTestProxy testProxy) {
myCurrentChildren.remove(fullTestName);
String url = testProxy.getLocationUrl();
if (url != null) {
myCurrentChildren.remove(url);
}
}
@Override
public void onSuiteFinished(@NotNull final TestSuiteFinishedEvent suiteFinishedEvent) {
//fire events
final String suiteName = suiteFinishedEvent.getName();
final SMTestProxy mySuite = mySuitesStack.popSuite(suiteName);
if (mySuite != null) {
mySuite.setFinished();
myCurrentChildren.remove(suiteName);
String locationUrl = mySuite.getLocationUrl();
if (locationUrl != null) {
myCurrentChildren.remove(locationUrl);
}
//fire events
fireOnSuiteFinished(mySuite);
}
}
@Override
public void onUncapturedOutput(@NotNull final String text, final Key outputType) {
final SMTestProxy currentProxy = findCurrentTestOrSuite();
currentProxy.addOutput(text, outputType);
}
@Override
public void onError(@NotNull final String localizedMessage,
@Nullable final String stackTrace,
final boolean isCritical) {
final SMTestProxy currentProxy = findCurrentTestOrSuite();
currentProxy.addError(localizedMessage, stackTrace, isCritical);
}
@Override
public void onTestFailure(@NotNull final TestFailedEvent testFailedEvent) {
// if hasn't been already reported
// 1. report
// 2. add failure
// fire event
final String testName = testFailedEvent.getName();
if (testName == null) {
logProblem("No test name specified in " + testFailedEvent);
return;
}
final String localizedMessage = testFailedEvent.getLocalizedFailureMessage();
final String stackTrace = testFailedEvent.getStacktrace();
final boolean isTestError = testFailedEvent.isTestError();
final String comparisionFailureActualText = testFailedEvent.getComparisonFailureActualText();
final String comparisionFailureExpectedText = testFailedEvent.getComparisonFailureExpectedText();
final boolean inDebugMode = SMTestRunnerConnectionUtil.isInDebugMode();
final String fullTestName = getFullTestName(testName);
SMTestProxy testProxy = getProxyByFullTestName(fullTestName);
if (testProxy == null) {
logProblem("Test wasn't started! TestFailure event: name = {" + testName + "}" +
", message = {" + localizedMessage + "}" +
", stackTrace = {" + stackTrace + "}. " +
cannotFindFullTestNameMsg(fullTestName));
if (inDebugMode) {
return;
}
else {
// if hasn't been already reported
// 1. report
onTestStarted(new TestStartedEvent(testName, null));
// 2. add failure
testProxy = getProxyByFullTestName(fullTestName);
}
}
if (testProxy == null) {
return;
}
if (comparisionFailureActualText != null && comparisionFailureExpectedText != null) {
testProxy.setTestComparisonFailed(localizedMessage, stackTrace, comparisionFailureActualText, comparisionFailureExpectedText,
testFailedEvent);
}
else if (comparisionFailureActualText == null && comparisionFailureExpectedText == null) {
testProxy.setTestFailed(localizedMessage, stackTrace, isTestError);
}
else {
testProxy.setTestFailed(localizedMessage, stackTrace, isTestError);
logProblem("Comparison failure actual and expected texts should be both null or not null.\n"
+ "Expected:\n"
+ comparisionFailureExpectedText + "\n"
+ "Actual:\n"
+ comparisionFailureActualText);
}
// fire event
fireOnTestFailed(testProxy);
}
@Override
public void onTestIgnored(@NotNull final TestIgnoredEvent testIgnoredEvent) {
// try to fix
// 1. report test opened
// 2. report failure
// fire event
final String testName = testIgnoredEvent.getName();
if (testName == null) {
logProblem("TestIgnored event: no name");
}
String ignoreComment = testIgnoredEvent.getIgnoreComment();
final String stackTrace = testIgnoredEvent.getStacktrace();
final String fullTestName = getFullTestName(testName);
SMTestProxy testProxy = getProxyByFullTestName(fullTestName);
if (testProxy == null) {
final boolean debugMode = SMTestRunnerConnectionUtil.isInDebugMode();
logProblem("Test wasn't started! " +
"TestIgnored event: name = {" + testName + "}, " +
"message = {" + ignoreComment + "}. " +
cannotFindFullTestNameMsg(fullTestName));
if (debugMode) {
return;
}
else {
// try to fix
// 1. report test opened
onTestStarted(new TestStartedEvent(testName, null));
// 2. report failure
testProxy = getProxyByFullTestName(fullTestName);
}
}
if (testProxy == null) {
return;
}
testProxy.setTestIgnored(ignoreComment, stackTrace);
// fire event
fireOnTestIgnored(testProxy);
}
@Override
public void onTestOutput(@NotNull final TestOutputEvent testOutputEvent) {
final String testName = testOutputEvent.getName();
final String text = testOutputEvent.getText();
final Key outputType = testOutputEvent.getOutputType();
final String fullTestName = getFullTestName(testName);
final SMTestProxy testProxy = getProxyByFullTestName(fullTestName);
if (testProxy == null) {
logProblem("Test wasn't started! TestOutput event: name = {" + testName + "}, " +
"outputType = " + outputType + ", " +
"text = {" + text + "}. " +
cannotFindFullTestNameMsg(fullTestName));
return;
}
testProxy.addOutput(text, outputType);
}
@Override
public void onTestsCountInSuite(final int count) {
fireOnTestsCountInSuite(count);
}
@NotNull
protected final SMTestProxy getCurrentSuite() {
final SMTestProxy currentSuite = mySuitesStack.getCurrentSuite();
if (currentSuite != null) {
return currentSuite;
}
// current suite shouldn't be null otherwise test runner isn't correct
// or may be we are in debug mode
logProblem("Current suite is undefined. Root suite will be used.");
return myTestsRootProxy;
}
protected String getFullTestName(final String testName) {
// Test name should be unique
return testName;
}
protected int getRunningTestsQuantity() {
return myRunningTestsFullNameToProxy.size();
}
@Nullable
protected SMTestProxy getProxyByFullTestName(final String fullTestName) {
return myRunningTestsFullNameToProxy.get(fullTestName);
}
@TestOnly
protected void clearInternalSuitesStack() {
mySuitesStack.clear();
}
private String cannotFindFullTestNameMsg(String fullTestName) {
return "Cant find running test for ["
+ fullTestName
+ "]. Current running tests: {"
+ dumpRunningTestsNames() + "}";
}
private StringBuilder dumpRunningTestsNames() {
final Set<String> names = myRunningTestsFullNameToProxy.keySet();
final StringBuilder namesDump = new StringBuilder();
for (String name : names) {
namesDump.append('[').append(name).append(']').append(',');
}
return namesDump;
}
/*
* Remove listeners, etc
*/
@Override
public void dispose() {
super.dispose();
if (!myRunningTestsFullNameToProxy.isEmpty()) {
final Application application = ApplicationManager.getApplication();
if (!application.isHeadlessEnvironment() && !application.isUnitTestMode()) {
logProblem("Not all events were processed! " + dumpRunningTestsNames());
}
}
myRunningTestsFullNameToProxy.clear();
mySuitesStack.clear();
}
private SMTestProxy findCurrentTestOrSuite() {
//if we can locate test - we will send output to it, otherwise to current test suite
SMTestProxy currentProxy = null;
Iterator<SMTestProxy> iterator = myRunningTestsFullNameToProxy.values().iterator();
if (iterator.hasNext()) {
//current test
currentProxy = iterator.next();
if (iterator.hasNext()) { //if there are multiple tests running call put output to the suite
currentProxy = null;
}
}
if (currentProxy == null) {
//current suite
//
// ProcessHandler can fire output available event before processStarted event
final SMTestProxy currentSuite = mySuitesStack.getCurrentSuite();
currentProxy = currentSuite != null ? currentSuite : myTestsRootProxy;
}
return currentProxy;
}
}
| msebire/intellij-community | platform/smRunner/src/com/intellij/execution/testframework/sm/runner/GeneralToSMTRunnerEventsConvertor.java | Java | apache-2.0 | 17,626 |
package br.copacabana;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.logging.Level;
import javax.cache.Cache;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.springframework.web.servlet.ModelAndView;
import br.com.copacabana.cb.entities.Address;
import br.com.copacabana.cb.entities.Client;
import br.com.copacabana.cb.entities.MealOrder;
import br.com.copacabana.cb.entities.OrderedPlate;
import br.com.copacabana.cb.entities.Plate;
import br.com.copacabana.cb.entities.Restaurant;
import br.com.copacabana.cb.entities.TurnType;
import br.com.copacabana.cb.entities.WorkingHours.DayOfWeek;
import br.copacabana.order.paypal.PayPalProperties.PayPalConfKeys;
import br.copacabana.spring.AddressManager;
import br.copacabana.spring.ClientManager;
import br.copacabana.spring.ConfigurationManager;
import br.copacabana.spring.PlateManager;
import br.copacabana.spring.RestaurantManager;
import br.copacabana.usecase.control.UserActionManager;
import br.copacabana.util.TimeController;
import com.google.appengine.api.datastore.Key;
import com.google.appengine.api.datastore.KeyFactory;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import com.google.gson.JsonPrimitive;
/**
* @author Rafael Coutinho
*/
public class PlaceOrderController extends JsonViewController {
private String formView;
private String successView;
@Override
protected ModelAndView handleRequestInternal(HttpServletRequest request, HttpServletResponse response) throws Exception {
Map<String, Object> model = new HashMap<String, Object>();
model.put("mode", "view");
try {
Cache cache = CacheController.getCache();
if (cache.get(PayPalConfKeys.pppFixedRate.name()) == null) {
ConfigurationManager cm = new ConfigurationManager();
cache.put(PayPalConfKeys.pppFixedRate.name(), cm.getConfigurationValue(PayPalConfKeys.pppFixedRate.name()));
cache.put(PayPalConfKeys.pppPercentageValue.name(), cm.getConfigurationValue(PayPalConfKeys.pppPercentageValue.name()));
}
if (!Authentication.isUserLoggedIn(request.getSession())) {
String orderData = request.getParameter("orderData");
request.getSession().setAttribute("orderData", orderData);
model.put("forwardUrl", "/continueOrder.jsp");
UserActionManager.startOrderNotLogged(orderData, request.getSession().getId());
return new ModelAndView(getFormView(), model);
} else {
String orderData = "";
JsonObject user = Authentication.getLoggedUser(request.getSession());
String loggedUserId = user.get("entity").getAsJsonObject().get("id").getAsString();
if (request.getParameter("orderData") == null) {
orderData = (String) request.getSession().getAttribute("orderData");
} else {
orderData = request.getParameter("orderData");
}
log.log(Level.INFO, "OrderJSon: {0}", orderData);
JsonParser pa = new JsonParser();
JsonObject orderDataJson = (JsonObject) pa.parse(orderData);
ClientManager cman = new ClientManager();
Client c = cman.find(KeyFactory.stringToKey(loggedUserId), Client.class);
MealOrder mo = getMealOrder(c, orderDataJson);
request.getSession().setAttribute("clientPhone", "");
DateSerializer dateSerializer = new DateSerializer(request);
DateDeSerializer dateDeSerializer = new DateDeSerializer(request);
GsonBuilder gsonBuilder = GsonBuilderFactory.getInstance();// new
// GsonBuilder().setPrettyPrinting().serializeNulls().excludeFieldsWithoutExposeAnnotation();
gsonBuilder.registerTypeAdapter(Date.class, dateSerializer);
gsonBuilder.registerTypeAdapter(Date.class, dateDeSerializer);
gsonBuilder.registerTypeAdapter(Key.class, new KeyDeSerializer());
gsonBuilder.registerTypeAdapter(Key.class, new KeySerializer());
Gson gson = gsonBuilder.create();
model.putAll(updateModelData(mo, c, gson));
String json = gson.toJson(mo); // Or use new
json = GsonBuilderFactory.escapeString(json);
request.getSession().setAttribute("orderData", json);
UserActionManager.startOrder(json, loggedUserId, request.getSession().getId());
return new ModelAndView(getSuccessView(), model);
}
} catch (Exception e) {
log.log(Level.SEVERE, "Failed to place order.");
try {
String orderData = "";
log.log(Level.SEVERE, "Checking logged user.");
JsonObject user = Authentication.getLoggedUser(request.getSession());
if (user == null) {
log.log(Level.SEVERE, "user is not logged in.");
}
String loggedUserId = user.get("entity").getAsJsonObject().get("id").getAsString();
log.log(Level.SEVERE, "logged user id {0}", loggedUserId);
if (request.getParameter("orderData") == null) {
log.log(Level.SEVERE, "Order is not in request, checking session");
orderData = (String) request.getSession().getAttribute("orderData");
} else {
log.log(Level.SEVERE, "Order is in request");
orderData = request.getParameter("orderData");
}
if (orderData == null) {
log.log(Level.SEVERE, "Order was null!");
}
log.log(Level.SEVERE, "Order is order :" + orderData);
log.log(Level.SEVERE, "Exception was {0}.", e);
log.log(Level.SEVERE, "Error was {0}.", e.getMessage());
UserActionManager.registerMajorError(request, e, loggedUserId, request.getSession().getId(), "placing order");
} catch (Exception ex) {
log.log(Level.SEVERE, "Failed during loggin of error was {0}.", e);
UserActionManager.registerMajorError(request, e, "placing order 2");
}
throw e;
}
}
public static Map<String, Object> updateModelData(MealOrder mo, Client c, Gson gson) {
Map<String, Object> model = new HashMap<String, Object>();
RestaurantManager rman = new RestaurantManager();
Restaurant r = rman.getRestaurant(mo.getRestaurant());
Boolean b = r.getOnlyForRetrieval();
if (b != null && true == b) {
model.put("onlyForRetrieval", Boolean.TRUE);
} else {
model.put("onlyForRetrieval", Boolean.FALSE);
}
model.put("restaurantAddressKey", KeyFactory.keyToString(r.getAddress()));
model.put("clientCpf", c.getCpf());
model.put("level", c.getLevel().ordinal());
JsonObject json = new JsonObject();
ConfigurationManager cm = new ConfigurationManager();
String hasSpecificLogic = cm.getConfigurationValue("hasSpecificLogic");
model.put("noTakeAwayOrders", "false");
if (hasSpecificLogic != null && hasSpecificLogic.endsWith("true")) {
json = getSteakHouseSpecificData(mo, c, gson);
getMakisSpecificLogic(mo, c, gson, json);
getPapagaiosSpecificLogic(mo, c, gson, json);
getPizzadoroSpecificLogic(mo,c,gson,json);
if (noTakeAwayOrders(mo) == true) {
model.put("noTakeAwayOrders", "true");
}
}
model.put("hasSpecificLogic", json.toString());
if (json.get("javascript") != null && json.get("javascript").getAsString().length() > 0) {
model.put("hasSpecificLogicJavascript", json.get("javascript").getAsString());
}
Address restAddress = new AddressManager().getAddress(r.getAddress());
model.put("restaurantAddress", gson.toJson(restAddress));
return model;
}
private static boolean noTakeAwayOrders(MealOrder mo) {
ConfigurationManager cm = new ConfigurationManager();
String ids = cm.getConfigurationValue("no.takeaway.ids");
String restId = KeyFactory.keyToString(mo.getRestaurant());
if (ids.contains(restId)) {
return true;
}
return false;
}
private static void getPapagaiosSpecificLogic(MealOrder mo, Client c, Gson gson, JsonObject json) {
ConfigurationManager cm = new ConfigurationManager();
String idStr = cm.getConfigurationValue("papagaios.id");
if (idStr != null && idStr.length() > 0) {
Key k = KeyFactory.stringToKey(idStr);
if (k.equals(mo.getRestaurant())) {
json.add("javascript", new JsonPrimitive("/scripts/custom/papagaios.js"));
}
}
}
private static void getPizzadoroSpecificLogic(MealOrder mo, Client c, Gson gson, JsonObject json) {
ConfigurationManager cm = new ConfigurationManager();
String idStr = cm.getConfigurationValue("pizzadoro.id");
if (idStr != null && idStr.length() > 0) {
Key k = KeyFactory.stringToKey(idStr);
if (k.equals(mo.getRestaurant())) {
json.add("javascript", new JsonPrimitive("/scripts/custom/pizzadoro.js"));
}
}
}
private static void getMakisSpecificLogic(MealOrder mo, Client c, Gson gson, JsonObject json) {
try {
ConfigurationManager cm = new ConfigurationManager();
PlateManager pm = new PlateManager();
String makisIdStr = cm.getConfigurationValue("makis.Id");
if (makisIdStr != null && makisIdStr.length() > 0) {
Key makis = KeyFactory.stringToKey(makisIdStr);
if (makis != null && makis.equals(mo.getRestaurant())) {
String packageId = cm.getConfigurationValue("makis.package.id");
if (packageId != null && packageId.length() > 0) {
json.add("makisPackageCostId", new JsonPrimitive(packageId));
json.add("makisMsg", new JsonPrimitive(cm.getConfigurationValue("makis.msg")));
boolean isIncluded = false;
Key packageKey = KeyFactory.stringToKey(packageId);
for (Iterator<OrderedPlate> iterator = mo.getPlates().iterator(); iterator.hasNext();) {
OrderedPlate plate = (OrderedPlate) iterator.next();
if (Boolean.FALSE.equals(plate.getIsFraction()) && plate.getPlate().equals(packageKey)) {
isIncluded = true;
break;
}
}
if (isIncluded == false) {
Plate packagePlate = pm.get(packageKey);
OrderedPlate oplate = new OrderedPlate();
oplate.setName(packagePlate.getName());
oplate.setPrice(packagePlate.getPrice());
oplate.setPriceInCents(packagePlate.getPriceInCents());
oplate.setQty(1);
oplate.setPlate(packageKey);
mo.getPlates().add(oplate);
}
}
}
}
} catch (Exception e) {
log.log(Level.SEVERE, "failed to add makis specific logic", e);
}
}
private static JsonObject getSteakHouseSpecificData(MealOrder mo, Client c, Gson gson) {
JsonObject json = new JsonObject();
json.add("freeDelivery", new JsonPrimitive("false"));
try {
ConfigurationManager cm = new ConfigurationManager();
String steakIdStr = cm.getConfigurationValue("steakHouse.Id");
if (steakIdStr != null && steakIdStr.length() > 0) {
Key steak = KeyFactory.stringToKey(steakIdStr);
if (steak.equals(mo.getRestaurant())) {
if (!TimeController.getDayOfWeek().equals(DayOfWeek.SATURDAY) && !TimeController.getDayOfWeek().equals(DayOfWeek.SUNDAY)) {
if (TimeController.getCurrentTurn().equals(TurnType.LUNCH)) {
String foodCatsStr = cm.getConfigurationValue("steakHouse.FoodCats");
if (foodCatsStr != null && foodCatsStr.length() > 0) {
String[] foodCatsArray = foodCatsStr.split("\\|");
Set<Key> foodCats = new HashSet<Key>();
for (int i = 0; i < foodCatsArray.length; i++) {
if (foodCatsArray[i].length() > 0) {
foodCats.add(KeyFactory.stringToKey(foodCatsArray[i]));
}
}
List<OrderedPlate> plates = mo.getPlates();
PlateManager pm = new PlateManager();
for (Iterator iterator = plates.iterator(); iterator.hasNext();) {
OrderedPlate orderedPlate = (OrderedPlate) iterator.next();
Plate p = null;
if (Boolean.TRUE.equals(orderedPlate.getIsFraction())) {
p = pm.getPlate(orderedPlate.getFractionPlates().iterator().next());
} else {
p = pm.getPlate(orderedPlate.getPlate());
}
if (!foodCats.contains(p.getFoodCategory())) {
json.add("freeDelivery", new JsonPrimitive("false"));
return json;
}
}
json.add("freeDelivery", new JsonPrimitive("true"));
json.add("msg", new JsonPrimitive(cm.getConfigurationValue("steakHouse.msg")));
}
}
}
}
}
} catch (Exception e) {
log.log(Level.SEVERE, "Could not set up things for SteakHouse", e);
}
return json;
}
public MealOrder getMealOrder(Client c, JsonObject sessionOderData) {
MealOrder mo = new MealOrder();
mo.setClient(c);
if (c.getContact() != null) {
mo.setClientPhone(c.getContact().getPhone());
}
mo.setAddress(getAddress(sessionOderData, c));
mo.setObservation(getObservation(sessionOderData));
mo.setRestaurant(getRestKey(sessionOderData));
mo.setPlates(getPlates(sessionOderData));
return mo;
}
private Key getAddress(JsonObject sessionOderData, Client c) {
try {
if (sessionOderData.get("address") == null) {
if (c.getMainAddress() != null) {
return c.getMainAddress();
} else {
return null;
}
} else {
if (sessionOderData.get("address") != null && !sessionOderData.get("address").isJsonNull() ) {
return KeyFactory.stringToKey(sessionOderData.get("address").getAsString());
}else{
return null;
}
}
} catch (Exception e) {
log.log(Level.SEVERE, "no address da sessão havia {0}", sessionOderData.get("address"));
log.log(Level.SEVERE, "Error ao buscar endereço de cliente ou em sessão", e);
return null;
}
}
public List<OrderedPlate> getPlates(JsonObject sessionOderData) {
List<OrderedPlate> orderedPlates = new ArrayList<OrderedPlate>();
JsonArray array = sessionOderData.get("plates").getAsJsonArray();
for (int i = 0; i < array.size(); i++) {
JsonObject pjson = array.get(i).getAsJsonObject();
orderedPlates.add(getOrdered(pjson));
}
return orderedPlates;
}
private OrderedPlate getOrdered(JsonObject pjson) {
OrderedPlate oplate = new OrderedPlate();
oplate.setName(pjson.get("name").getAsString());
oplate.setPrice(pjson.get("price").getAsDouble());
oplate.setPriceInCents(Double.valueOf(pjson.get("price").getAsDouble() * 100.0).intValue());
oplate.setQty(pjson.get("qty").getAsInt());
if (pjson.get("isFraction").getAsBoolean() == true) {
oplate.setIsFraction(Boolean.TRUE);
Set<Key> fractionPlates = new HashSet<Key>();
JsonArray fractionKeys = pjson.get("fractionKeys").getAsJsonArray();
for (int i = 0; i < fractionKeys.size(); i++) {
Key fractionKey = KeyFactory.stringToKey(fractionKeys.get(i).getAsString());
fractionPlates.add(fractionKey);
}
oplate.setFractionPlates(fractionPlates);
return oplate;
} else {
String pkey = "";
if (pjson.get("plate").isJsonObject()) {
pkey = pjson.get("plate").getAsJsonObject().get("id").getAsString();
} else {
pkey = pjson.get("plate").getAsString();
}
oplate.setPlate(KeyFactory.stringToKey(pkey));
return oplate;
}
}
public Key getRestKey(JsonObject sessionOderData) {
String restKey;
if (sessionOderData.get("restaurant") != null) {
if (sessionOderData.get("restaurant").isJsonObject()) {
restKey = sessionOderData.get("restaurant").getAsJsonObject().get("id").getAsString();
} else {
restKey = sessionOderData.get("restaurant").getAsString();
}
} else {
restKey = sessionOderData.get("plates").getAsJsonArray().get(0).getAsJsonObject().get("plate").getAsJsonObject().get("value").getAsJsonObject().get("restaurant").getAsString();
}
return KeyFactory.stringToKey(restKey);
}
public String getObservation(JsonObject sessionOderData) {
return sessionOderData.get("observation").getAsString();
}
public String getFormView() {
return formView;
}
public void setFormView(String formView) {
this.formView = formView;
}
public String getSuccessView() {
return successView;
}
public void setSuccessView(String successView) {
this.successView = successView;
}
} | rafaelcoutinho/comendobemdelivery | src/br/copacabana/PlaceOrderController.java | Java | apache-2.0 | 16,224 |
package hska.iwi.eShopMaster.model.businessLogic.manager.impl;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.sun.jersey.api.client.Client;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.WebResource;
import hska.iwi.eShopMaster.model.businessLogic.manager.CategoryManager;
import hska.iwi.eShopMaster.model.businessLogic.manager.entity.Category;
import hska.iwi.eShopMaster.model.businessLogic.manager.entity.User;
import java.util.List;
import javax.ws.rs.core.MediaType;
import org.apache.log4j.Logger;
public class CategoryManagerImpl implements CategoryManager {
private final static String BASIS_URL_CATEGORY = "http://localhost:8081/api/catalog/category/";
private final Logger logger = Logger.getLogger(CategoryManagerImpl.class);
private final ObjectMapper parser = new ObjectMapper();
private final User currentUser;
public CategoryManagerImpl(User currentUser) {
this.currentUser = currentUser;
}
@Override
public List<Category> getCategories() {
List<Category> categories = null;
try {
Client client = Client.create();
WebResource webResource = client
.resource(BASIS_URL_CATEGORY);
ClientResponse response = webResource.accept(MediaType.APPLICATION_JSON_TYPE)
.get(ClientResponse.class);
categories = parser.readValue(response.getEntity(String.class), List.class);
} catch (Exception ex) {
logger.error(ex);
}
return categories;
}
@Override
public Category getCategory(int id) {
Category category = null;
try {
Client client = Client.create();
WebResource webResource = client
.resource(BASIS_URL_CATEGORY)
.path(String.valueOf(id));
ClientResponse response = webResource.accept(MediaType.APPLICATION_JSON_TYPE)
.get(ClientResponse.class);
category = parser.readValue(response.getEntity(String.class), Category.class);
} catch (Exception ex) {
logger.error(ex);
}
return category;
}
@Override
public void addCategory(String name) {
Category category = new Category(name);
try {
Client client = Client.create();
WebResource webResource = client
.resource(BASIS_URL_CATEGORY);
webResource.type(MediaType.APPLICATION_JSON_TYPE)
.accept(MediaType.APPLICATION_JSON_TYPE)
.header("usr", currentUser.getUsername())
.header("pass", currentUser.getPassword())
.post(ClientResponse.class, parser.writeValueAsString(category));
} catch (Exception ex) {
logger.error(ex);
}
}
@Override
public void delCategoryById(int id) {
try {
Client client = Client.create();
WebResource webResource = client
.resource(BASIS_URL_CATEGORY)
.path(String.valueOf(id));
webResource.accept(MediaType.APPLICATION_JSON_TYPE)
.header("usr", currentUser.getUsername())
.header("pass", currentUser.getPassword())
.delete();
} catch (Exception ex) {
logger.error(ex);
}
}
}
| Am3o/eShop | WebShopStart/src/main/java/hska/iwi/eShopMaster/model/businessLogic/manager/impl/CategoryManagerImpl.java | Java | apache-2.0 | 3,084 |
/**
* Created by txs on 2016/10/17.
*/
public class Student {
String name;
int grade;
@Override
public String toString() {
String temp = "";
temp += "name: " + name + "\n";
temp += "grade: " + grade + "\n";
return temp;
}
@Override
public boolean equals(Object obj) {
if(this==obj) return true;
boolean r = false;
if(obj instanceof Student){
Student temp = (Student)obj;
if(this.name.equals(temp.name)
&& this.grade == temp.grade)
r = true;
}
return r;
}
}
| txs72/BUPTJava | slides/06/overrding/Student.java | Java | apache-2.0 | 625 |
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.openapi.vcs;
import com.intellij.execution.ui.ConsoleView;
import com.intellij.execution.ui.ConsoleViewContentType;
import com.intellij.util.containers.ContainerUtil;
import consulo.util.lang.Pair;
import consulo.util.lang.StringUtil;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.Collections;
import java.util.List;
public final class VcsConsoleLine {
private final List<Pair<String, ConsoleViewContentType>> myChunks;
private VcsConsoleLine(@Nonnull List<Pair<String, ConsoleViewContentType>> chunks) {
myChunks = chunks;
}
public void print(@Nonnull ConsoleView console) {
ConsoleViewContentType lastType = ConsoleViewContentType.NORMAL_OUTPUT;
for (Pair<String, ConsoleViewContentType> chunk : myChunks) {
console.print(chunk.first, chunk.second);
lastType = chunk.second;
}
console.print("\n", lastType);
}
@Nullable
public static VcsConsoleLine create(@Nullable String message, @Nonnull ConsoleViewContentType contentType) {
return create(Collections.singletonList(Pair.create(message, contentType)));
}
@Nullable
public static VcsConsoleLine create(@Nonnull List<Pair<String, ConsoleViewContentType>> lineChunks) {
List<Pair<String, ConsoleViewContentType>> chunks = ContainerUtil.filter(lineChunks, it -> !StringUtil.isEmptyOrSpaces(it.first));
if (chunks.isEmpty()) return null;
return new VcsConsoleLine(chunks);
}
}
| consulo/consulo | modules/base/vcs-api/src/main/java/com/intellij/openapi/vcs/VcsConsoleLine.java | Java | apache-2.0 | 1,604 |
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.brixcms.web.nodepage;
import org.apache.wicket.IRequestTarget;
import org.apache.wicket.Page;
import org.apache.wicket.PageParameters;
import org.apache.wicket.RequestCycle;
import org.apache.wicket.model.IModel;
import org.apache.wicket.request.target.component.IPageRequestTarget;
import org.apache.wicket.util.lang.Objects;
import org.apache.wicket.util.string.StringValue;
import org.brixcms.exception.BrixException;
import org.brixcms.jcr.wrapper.BrixNode;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
public class BrixPageParameters implements Serializable {
// ------------------------------ FIELDS ------------------------------
private static final long serialVersionUID = 1L;
private List<String> indexedParameters = null;
;
private List<QueryStringParameter> queryStringParameters = null;
// -------------------------- STATIC METHODS --------------------------
public static boolean equals(BrixPageParameters p1, BrixPageParameters p2) {
if (Objects.equal(p1, p2)) {
return true;
}
if (p1 == null && p2.getIndexedParamsCount() == 0 && p2.getQueryParamKeys().isEmpty()) {
return true;
}
if (p2 == null && p1.getIndexedParamsCount() == 0 && p1.getQueryParamKeys().isEmpty()) {
return true;
}
return false;
}
public int getIndexedParamsCount() {
return indexedParameters != null ? indexedParameters.size() : 0;
}
public static BrixPageParameters getCurrent() {
IRequestTarget target = RequestCycle.get().getRequestTarget();
// this is required for getting current page parameters from page constructor
// (the actual page instance is not constructed yet.
if (target instanceof PageParametersRequestTarget) {
return ((PageParametersRequestTarget) target).getPageParameters();
} else {
return getCurrentPage().getBrixPageParameters();
}
}
// --------------------------- CONSTRUCTORS ---------------------------
public BrixPageParameters() {
}
public BrixPageParameters(PageParameters params) {
if (params != null) {
for (String name : params.keySet()) {
addQueryParam(name, params.get(name));
}
}
}
public void addQueryParam(String name, Object value) {
addQueryParam(name, value, -1);
}
public BrixPageParameters(BrixPageParameters copy) {
if (copy == null) {
throw new IllegalArgumentException("Copy argument may not be null.");
}
if (copy.indexedParameters != null)
this.indexedParameters = new ArrayList<String>(copy.indexedParameters);
if (copy.queryStringParameters != null)
this.queryStringParameters = new ArrayList<QueryStringParameter>(
copy.queryStringParameters);
}
// ------------------------ CANONICAL METHODS ------------------------
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof BrixPageParameters == false) {
return false;
}
BrixPageParameters rhs = (BrixPageParameters) obj;
if (!Objects.equal(indexedParameters, rhs.indexedParameters)) {
return false;
}
if (queryStringParameters == null || rhs.queryStringParameters == null) {
return rhs.queryStringParameters == queryStringParameters;
}
if (queryStringParameters.size() != rhs.queryStringParameters.size()) {
return false;
}
for (String key : getQueryParamKeys()) {
List<StringValue> values1 = getQueryParams(key);
Set<String> v1 = new TreeSet<String>();
List<StringValue> values2 = rhs.getQueryParams(key);
Set<String> v2 = new TreeSet<String>();
for (StringValue sv : values1) {
v1.add(sv.toString());
}
for (StringValue sv : values2) {
v2.add(sv.toString());
}
if (v1.equals(v2) == false) {
return false;
}
}
return true;
}
public Set<String> getQueryParamKeys() {
if (queryStringParameters == null || queryStringParameters.isEmpty()) {
return Collections.emptySet();
}
Set<String> set = new TreeSet<String>();
for (QueryStringParameter entry : queryStringParameters) {
set.add(entry.key);
}
return Collections.unmodifiableSet(set);
}
public List<StringValue> getQueryParams(String name) {
if (name == null) {
throw new IllegalArgumentException("Parameter name may not be null.");
}
if (queryStringParameters != null) {
List<StringValue> result = new ArrayList<StringValue>();
for (QueryStringParameter entry : queryStringParameters) {
if (entry.key.equals(name)) {
result.add(StringValue.valueOf(entry.value));
}
}
return Collections.unmodifiableList(result);
} else {
return Collections.emptyList();
}
}
// -------------------------- OTHER METHODS --------------------------
public void addQueryParam(String name, Object value, int index) {
if (name == null) {
throw new IllegalArgumentException("Parameter name may not be null.");
}
if (value == null) {
throw new IllegalArgumentException("Parameter value may not be null.");
}
if (queryStringParameters == null)
queryStringParameters = new ArrayList<QueryStringParameter>(1);
QueryStringParameter entry = new QueryStringParameter(name, value.toString());
if (index == -1)
queryStringParameters.add(entry);
else
queryStringParameters.add(index, entry);
}
void assign(BrixPageParameters other) {
if (this != other) {
this.indexedParameters = other.indexedParameters;
this.queryStringParameters = other.queryStringParameters;
}
}
public void clearIndexedParams() {
this.indexedParameters = null;
}
public void clearQueryParams() {
this.queryStringParameters = null;
}
public StringValue getIndexedParam(int index) {
if (indexedParameters != null) {
if (index >= 0 && index < indexedParameters.size()) {
String value = indexedParameters.get(index);
return StringValue.valueOf(value);
}
}
return StringValue.valueOf((String) null);
}
public StringValue getQueryParam(String name) {
if (name == null) {
throw new IllegalArgumentException("Parameter name may not be null.");
}
if (queryStringParameters != null) {
for (QueryStringParameter entry : queryStringParameters) {
if (entry.key.equals(name)) {
return StringValue.valueOf(entry.value);
}
}
}
return StringValue.valueOf((String) null);
}
public List<QueryStringParameter> getQueryStringParams() {
if (queryStringParameters == null) {
return Collections.emptyList();
} else {
return Collections.unmodifiableList(new ArrayList<QueryStringParameter>(
queryStringParameters));
}
}
;
public void removeIndexedParam(int index) {
if (indexedParameters != null) {
if (index >= 0 && index < indexedParameters.size()) {
indexedParameters.remove(index);
}
}
}
public void setIndexedParam(int index, Object object) {
if (indexedParameters == null)
indexedParameters = new ArrayList<String>(index);
for (int i = indexedParameters.size(); i <= index; ++i) {
indexedParameters.add(null);
}
String value = object != null ? object.toString() : null;
indexedParameters.set(index, value);
}
public void setQueryParam(String name, Object value) {
setQueryParam(name, value, -1);
}
public void setQueryParam(String name, Object value, int index) {
removeQueryParam(name);
if (value != null) {
addQueryParam(name, value);
}
}
public void removeQueryParam(String name) {
if (name == null) {
throw new IllegalArgumentException("Parameter name may not be null.");
}
if (queryStringParameters != null) {
for (Iterator<QueryStringParameter> i = queryStringParameters.iterator(); i.hasNext();) {
QueryStringParameter e = i.next();
if (e.key.equals(name)) {
i.remove();
}
}
}
}
public String toCallbackURL() {
return urlFor(getCurrentPage());
}
/**
* Constructs a url to the specified page appending these page parameters
*
* @param page
* @return url
*/
public String urlFor(BrixNodeWebPage page) {
IRequestTarget target = new BrixNodeRequestTarget(page, this);
return RequestCycle.get().urlFor(target).toString();
}
static BrixNodeWebPage getCurrentPage() {
IRequestTarget target = RequestCycle.get().getRequestTarget();
BrixNodeWebPage page = null;
if (target != null && target instanceof IPageRequestTarget) {
Page p = ((IPageRequestTarget) target).getPage();
if (p instanceof BrixNodeWebPage) {
page = (BrixNodeWebPage) p;
}
}
if (page == null) {
throw new BrixException(
"Couldn't obtain the BrixNodeWebPage instance from RequestTarget.");
}
return page;
}
/**
* Constructs a url to the specified page appending these page parameters
*
* @param
* @return url
*/
public String urlFor(IModel<BrixNode> node) {
IRequestTarget target = new BrixNodeRequestTarget(node, this);
return RequestCycle.get().urlFor(target).toString();
}
// -------------------------- INNER CLASSES --------------------------
public static class QueryStringParameter implements Serializable {
private static final long serialVersionUID = 1L;
private final String key;
private final String value;
public QueryStringParameter(String key, String value) {
this.key = key;
this.value = value;
}
public String getKey() {
return key;
}
public String getValue() {
return value;
}
}
}
| kbachl/brix-cms-backup | brix-core/src/main/java/org/brixcms/web/nodepage/BrixPageParameters.java | Java | apache-2.0 | 11,587 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.test.recovery;
import org.apache.flink.api.common.JobID;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.io.DiscardingOutputFormat;
import org.apache.flink.client.program.ProgramInvocationException;
import org.apache.flink.configuration.AkkaOptions;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.JobManagerOptions;
import org.apache.flink.runtime.akka.AkkaUtils;
import org.apache.flink.runtime.client.JobStatusMessage;
import org.apache.flink.runtime.highavailability.HighAvailabilityServices;
import org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils;
import org.apache.flink.runtime.jobmanager.JobManager;
import org.apache.flink.runtime.jobmanager.MemoryArchivist;
import org.apache.flink.runtime.messages.JobManagerMessages;
import org.apache.flink.runtime.metrics.NoOpMetricRegistry;
import org.apache.flink.runtime.testingUtils.TestingUtils;
import org.apache.flink.runtime.testutils.CommonTestUtils;
import org.apache.flink.util.NetUtils;
import org.apache.flink.util.TestLogger;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.pattern.Patterns;
import akka.util.Timeout;
import org.junit.Test;
import java.io.File;
import java.io.StringWriter;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import scala.Option;
import scala.Some;
import scala.Tuple2;
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.FiniteDuration;
import static org.apache.flink.runtime.testutils.CommonTestUtils.getCurrentClasspath;
import static org.apache.flink.runtime.testutils.CommonTestUtils.getJavaCommandPath;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* This test makes sure that jobs are canceled properly in cases where
* the task manager went down and did not respond to cancel messages.
*/
@SuppressWarnings("serial")
public class ProcessFailureCancelingITCase extends TestLogger {
@Test
public void testCancelingOnProcessFailure() throws Exception {
final StringWriter processOutput = new StringWriter();
ActorSystem jmActorSystem = null;
Process taskManagerProcess = null;
HighAvailabilityServices highAvailabilityServices = null;
try {
// check that we run this test only if the java command
// is available on this machine
String javaCommand = getJavaCommandPath();
if (javaCommand == null) {
System.out.println("---- Skipping Process Failure test : Could not find java executable ----");
return;
}
// create a logging file for the process
File tempLogFile = File.createTempFile(getClass().getSimpleName() + "-", "-log4j.properties");
tempLogFile.deleteOnExit();
CommonTestUtils.printLog4jDebugConfig(tempLogFile);
// find a free port to start the JobManager
final int jobManagerPort = NetUtils.getAvailablePort();
// start a JobManager
Tuple2<String, Object> localAddress = new Tuple2<String, Object>("localhost", jobManagerPort);
Configuration jmConfig = new Configuration();
jmConfig.setString(AkkaOptions.WATCH_HEARTBEAT_INTERVAL, "5 s");
jmConfig.setString(AkkaOptions.WATCH_HEARTBEAT_PAUSE, "2000 s");
jmConfig.setInteger(AkkaOptions.WATCH_THRESHOLD, 10);
jmConfig.setString(AkkaOptions.ASK_TIMEOUT, "100 s");
jmConfig.setString(JobManagerOptions.ADDRESS, localAddress._1());
jmConfig.setInteger(JobManagerOptions.PORT, jobManagerPort);
highAvailabilityServices = HighAvailabilityServicesUtils.createHighAvailabilityServices(
jmConfig,
TestingUtils.defaultExecutor(),
HighAvailabilityServicesUtils.AddressResolution.NO_ADDRESS_RESOLUTION);
jmActorSystem = AkkaUtils.createActorSystem(jmConfig, new Some<>(localAddress));
ActorRef jmActor = JobManager.startJobManagerActors(
jmConfig,
jmActorSystem,
TestingUtils.defaultExecutor(),
TestingUtils.defaultExecutor(),
highAvailabilityServices,
new NoOpMetricRegistry(),
Option.empty(),
JobManager.class,
MemoryArchivist.class)._1();
// the TaskManager java command
String[] command = new String[] {
javaCommand,
"-Dlog.level=DEBUG",
"-Dlog4j.configuration=file:" + tempLogFile.getAbsolutePath(),
"-Xms80m", "-Xmx80m",
"-classpath", getCurrentClasspath(),
AbstractTaskManagerProcessFailureRecoveryTest.TaskManagerProcessEntryPoint.class.getName(),
String.valueOf(jobManagerPort)
};
// start the first two TaskManager processes
taskManagerProcess = new ProcessBuilder(command).start();
new CommonTestUtils.PipeForwarder(taskManagerProcess.getErrorStream(), processOutput);
// we wait for the JobManager to have the two TaskManagers available
// since some of the CI environments are very hostile, we need to give this a lot of time (2 minutes)
waitUntilNumTaskManagersAreRegistered(jmActor, 1, 120000);
final Throwable[] errorRef = new Throwable[1];
// start the test program, which infinitely blocks
Runnable programRunner = new Runnable() {
@Override
public void run() {
try {
ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment("localhost", jobManagerPort);
env.setParallelism(2);
env.setRestartStrategy(RestartStrategies.noRestart());
env.getConfig().disableSysoutLogging();
env.generateSequence(0, Long.MAX_VALUE)
.map(new MapFunction<Long, Long>() {
@Override
public Long map(Long value) throws Exception {
synchronized (this) {
wait();
}
return 0L;
}
})
.output(new DiscardingOutputFormat<Long>());
env.execute();
}
catch (Throwable t) {
errorRef[0] = t;
}
}
};
Thread programThread = new Thread(programRunner);
// kill the TaskManager
taskManagerProcess.destroy();
taskManagerProcess = null;
// immediately submit the job. this should hit the case
// where the JobManager still thinks it has the TaskManager and tries to send it tasks
programThread.start();
// try to cancel the job
cancelRunningJob(jmActor);
// we should see a failure within reasonable time (10s is the ask timeout).
// since the CI environment is often slow, we conservatively give it up to 2 minutes,
// to fail, which is much lower than the failure time given by the heartbeats ( > 2000s)
programThread.join(120000);
assertFalse("The program did not cancel in time (2 minutes)", programThread.isAlive());
Throwable error = errorRef[0];
assertNotNull("The program did not fail properly", error);
assertTrue(error instanceof ProgramInvocationException);
// all seems well :-)
}
catch (Exception e) {
printProcessLog("TaskManager", processOutput.toString());
throw e;
}
catch (Error e) {
printProcessLog("TaskManager 1", processOutput.toString());
throw e;
}
finally {
if (taskManagerProcess != null) {
taskManagerProcess.destroy();
}
if (jmActorSystem != null) {
jmActorSystem.shutdown();
}
if (highAvailabilityServices != null) {
highAvailabilityServices.closeAndCleanupAllData();
}
}
}
private void cancelRunningJob(ActorRef jobManager) throws Exception {
final FiniteDuration askTimeout = new FiniteDuration(10, TimeUnit.SECONDS);
// try at most for 30 seconds
final long deadline = System.currentTimeMillis() + 30000;
JobID jobId = null;
do {
Future<Object> response = Patterns.ask(jobManager,
JobManagerMessages.getRequestRunningJobsStatus(), new Timeout(askTimeout));
Object result;
try {
result = Await.result(response, askTimeout);
}
catch (Exception e) {
throw new Exception("Could not retrieve running jobs from the JobManager.", e);
}
if (result instanceof JobManagerMessages.RunningJobsStatus) {
List<JobStatusMessage> jobs = ((JobManagerMessages.RunningJobsStatus) result).getStatusMessages();
if (jobs.size() == 1) {
jobId = jobs.get(0).getJobId();
break;
}
}
}
while (System.currentTimeMillis() < deadline);
if (jobId == null) {
// we never found it running, must have failed already
return;
}
// tell the JobManager to cancel the job
jobManager.tell(
new JobManagerMessages.LeaderSessionMessage(
HighAvailabilityServices.DEFAULT_LEADER_ID,
new JobManagerMessages.CancelJob(jobId)),
ActorRef.noSender());
}
private void waitUntilNumTaskManagersAreRegistered(ActorRef jobManager, int numExpected, long maxDelay)
throws Exception {
final long deadline = System.currentTimeMillis() + maxDelay;
while (true) {
long remaining = deadline - System.currentTimeMillis();
if (remaining <= 0) {
fail("The TaskManagers did not register within the expected time (" + maxDelay + "msecs)");
}
FiniteDuration timeout = new FiniteDuration(remaining, TimeUnit.MILLISECONDS);
try {
Future<?> result = Patterns.ask(jobManager,
JobManagerMessages.getRequestNumberRegisteredTaskManager(),
new Timeout(timeout));
Integer numTMs = (Integer) Await.result(result, timeout);
if (numTMs == numExpected) {
break;
}
}
catch (TimeoutException e) {
// ignore and retry
}
catch (ClassCastException e) {
fail("Wrong response: " + e.getMessage());
}
}
}
private void printProcessLog(String processName, String log) {
if (log == null || log.length() == 0) {
return;
}
System.out.println("-----------------------------------------");
System.out.println(" BEGIN SPAWNED PROCESS LOG FOR " + processName);
System.out.println("-----------------------------------------");
System.out.println(log);
System.out.println("-----------------------------------------");
System.out.println(" END SPAWNED PROCESS LOG");
System.out.println("-----------------------------------------");
}
}
| zimmermatt/flink | flink-tests/src/test/java/org/apache/flink/test/recovery/ProcessFailureCancelingITCase.java | Java | apache-2.0 | 10,977 |
//
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, vJAXB 2.1.10 in JDK 6
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2011.09.09 at 01:22:27 PM CEST
//
package test;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlSchemaType;
import javax.xml.bind.annotation.XmlType;
import javax.xml.bind.annotation.XmlValue;
/**
* <p>Java class for anonymous complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType>
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <attribute name="content-type" type="{http://www.w3.org/2001/XMLSchema}anySimpleType" />
* <attribute name="seq" type="{http://www.w3.org/2001/XMLSchema}anySimpleType" />
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "", propOrder = {
"content"
})
@XmlRootElement(name = "fpage")
public class Fpage {
@XmlValue
protected String content;
@XmlAttribute(name = "content-type")
@XmlSchemaType(name = "anySimpleType")
protected String contentType;
@XmlAttribute
@XmlSchemaType(name = "anySimpleType")
protected String seq;
/**
* Gets the value of the content property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getContent() {
return content;
}
/**
* Sets the value of the content property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setContent(String value) {
this.content = value;
}
/**
* Gets the value of the contentType property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getContentType() {
return contentType;
}
/**
* Sets the value of the contentType property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setContentType(String value) {
this.contentType = value;
}
/**
* Gets the value of the seq property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getSeq() {
return seq;
}
/**
* Sets the value of the seq property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setSeq(String value) {
this.seq = value;
}
}
| BlueBrain/bluima | modules/bluima_xml/src/test/Fpage.java | Java | apache-2.0 | 3,031 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.oozie.action.hadoop;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.oozie.action.ActionExecutorException;
import org.apache.oozie.util.XLog;
import org.jdom.Element;
import org.jdom.Namespace;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.List;
public abstract class ScriptLanguageActionExecutor extends JavaActionExecutor {
public ScriptLanguageActionExecutor(String type) {
super(type);
}
@Override
public List<Class<?>> getLauncherClasses() {
return null;
}
protected boolean shouldAddScriptToCache(){
return true;
}
@Override
protected Configuration setupLauncherConf(Configuration conf, Element actionXml, Path appPath, Context context)
throws ActionExecutorException {
super.setupLauncherConf(conf, actionXml, appPath, context);
if(shouldAddScriptToCache()) {
addScriptToCache(conf, actionXml, appPath, context);
}
return conf;
}
protected void addScriptToCache(Configuration conf, Element actionXml, Path appPath, Context context)
throws ActionExecutorException {
Namespace ns = actionXml.getNamespace();
String script = actionXml.getChild("script", ns).getTextTrim();
String name = new Path(script).getName();
String scriptContent = context.getProtoActionConf().get(this.getScriptName());
Path scriptFile = null;
if (scriptContent != null) { // Create script on filesystem if this is
// an http submission job;
FSDataOutputStream dos = null;
try {
Path actionPath = context.getActionDir();
scriptFile = new Path(actionPath, script);
FileSystem fs = context.getAppFileSystem();
dos = fs.create(scriptFile);
dos.write(scriptContent.getBytes(StandardCharsets.UTF_8));
addToCache(conf, actionPath, script + "#" + name, false);
}
catch (Exception ex) {
throw new ActionExecutorException(ActionExecutorException.ErrorType.ERROR, "FAILED_OPERATION", XLog
.format("Not able to write script file {0} on hdfs", scriptFile), ex);
}
finally {
try {
if (dos != null) {
dos.close();
}
}
catch (IOException ex) {
XLog.getLog(getClass()).error("Error: " + ex.getMessage());
}
}
}
else {
addToCache(conf, appPath, script + "#" + name, false);
}
}
protected abstract String getScriptName();
}
| cbaenziger/oozie | core/src/main/java/org/apache/oozie/action/hadoop/ScriptLanguageActionExecutor.java | Java | apache-2.0 | 3,702 |
/*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.lang.ant.config.execution;
import com.intellij.execution.filters.Filter;
import com.intellij.execution.filters.OpenFileHyperlinkInfo;
import com.intellij.execution.filters.TextConsoleBuilder;
import com.intellij.execution.filters.TextConsoleBuilderFactory;
import com.intellij.execution.process.ProcessHandler;
import com.intellij.execution.process.ProcessOutputTypes;
import com.intellij.execution.ui.ConsoleView;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.Disposer;
import com.intellij.openapi.util.Key;
import com.intellij.openapi.vfs.LocalFileSystem;
import com.intellij.openapi.vfs.VirtualFile;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import java.io.File;
import java.io.OutputStream;
public final class PlainTextView implements AntOutputView {
private final ConsoleView myConsole;
private final Project myProject;
private String myCommandLine;
private final LightProcessHandler myProcessHandler = new LightProcessHandler();
public PlainTextView(Project project) {
myProject = project;
TextConsoleBuilder builder = TextConsoleBuilderFactory.getInstance().createBuilder(project);
builder.addFilter(new AntMessageFilter());
builder.addFilter(new JUnitFilter());
myConsole = builder.getConsole();
myConsole.attachToProcess(myProcessHandler);
}
public void dispose() {
Disposer.dispose(myConsole);
}
@Override
public String getId() {
return "_text_view_";
}
@Override
public JComponent getComponent() {
return myConsole.getComponent();
}
@Override
@Nullable
public Object addMessage(AntMessage message) {
print(message.getText() + "\n", ProcessOutputTypes.STDOUT);
return null;
}
private void print(String text, Key type) {
myProcessHandler.notifyTextAvailable(text, type);
}
public void addMessages(AntMessage[] messages) {
for (AntMessage message : messages) {
addMessage(message);
}
}
@Override
public void addJavacMessage(AntMessage message, String url) {
if (message.getLine() > 0) {
String msg = TreeView.printMessage(message, url);
print(msg, ProcessOutputTypes.STDOUT);
}
print(message.getText(), ProcessOutputTypes.STDOUT);
}
@Override
public void addException(AntMessage exception, boolean showFullTrace) {
String text = exception.getText();
if (!showFullTrace) {
int index = text.indexOf("\r\n");
if (index != -1) {
text = text.substring(0, index) + "\n";
}
}
print(text, ProcessOutputTypes.STDOUT);
}
public void clearAllMessages() {
myConsole.clear();
}
@Override
public void startBuild(AntMessage message) {
print(myCommandLine + "\n", ProcessOutputTypes.SYSTEM);
addMessage(message);
}
@Override
public void buildFailed(AntMessage message) {
print(myCommandLine + "\n", ProcessOutputTypes.SYSTEM);
addMessage(message);
}
@Override
public void startTarget(AntMessage message) {
addMessage(message);
}
@Override
public void startTask(AntMessage message) {
addMessage(message);
}
@Override
public void finishBuild(String messageText) {
print("\n" + messageText + "\n", ProcessOutputTypes.SYSTEM);
}
@Override
public void finishTarget() {
}
@Override
public void finishTask() {
}
@Override
@Nullable
public Object getData(@NotNull String dataId) {
return null;
}
public void setBuildCommandLine(String commandLine) {
myCommandLine = commandLine;
}
private final class JUnitFilter implements Filter {
@Override
@Nullable
public Result applyFilter(String line, int entireLength) {
HyperlinkUtil.PlaceInfo placeInfo = HyperlinkUtil.parseJUnitMessage(myProject, line);
if (placeInfo == null) {
return null;
}
int textStartOffset = entireLength - line.length();
int highlightStartOffset = textStartOffset + placeInfo.getLinkStartIndex();
int highlightEndOffset = textStartOffset + placeInfo.getLinkEndIndex() + 1;
OpenFileHyperlinkInfo info = new OpenFileHyperlinkInfo(myProject, placeInfo.getFile(), placeInfo.getLine(), placeInfo.getColumn());
return new Result(highlightStartOffset, highlightEndOffset, info);
}
}
private final class AntMessageFilter implements Filter {
@Override
public Result applyFilter(String line, int entireLength) {
int afterLineNumberIndex = line.indexOf(": "); // end of file_name_and_line_number sequence
if (afterLineNumberIndex == -1) {
return null;
}
String fileAndLineNumber = line.substring(0, afterLineNumberIndex);
int index = fileAndLineNumber.lastIndexOf(':');
if (index == -1) {
return null;
}
final String fileName = fileAndLineNumber.substring(0, index);
String lineNumberStr = fileAndLineNumber.substring(index + 1).trim();
int lineNumber;
try {
lineNumber = Integer.parseInt(lineNumberStr);
}
catch (NumberFormatException e) {
return null;
}
final VirtualFile file = LocalFileSystem.getInstance().findFileByPath(fileName.replace(File.separatorChar, '/'));
if (file == null) {
return null;
}
int textStartOffset = entireLength - line.length();
int highlightEndOffset = textStartOffset + afterLineNumberIndex;
OpenFileHyperlinkInfo info = new OpenFileHyperlinkInfo(myProject, file, lineNumber - 1);
return new Result(textStartOffset, highlightEndOffset, info);
}
}
private static class LightProcessHandler extends ProcessHandler {
@Override
protected void destroyProcessImpl() {
throw new UnsupportedOperationException();
}
@Override
protected void detachProcessImpl() {
throw new UnsupportedOperationException();
}
@Override
public boolean detachIsDefault() {
return false;
}
@Override
@Nullable
public OutputStream getProcessInput() {
return null;
}
}
}
| mdanielwork/intellij-community | plugins/ant/src/com/intellij/lang/ant/config/execution/PlainTextView.java | Java | apache-2.0 | 6,686 |
/*
* Copyright 2010-2012 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.command.script;
import java.util.Map;
import java.util.Map.Entry;
import javax.script.Bindings;
import javax.script.Invocable;
import javax.script.ScriptContext;
import javax.script.ScriptEngine;
import javax.script.ScriptException;
import com.orientechnologies.orient.core.Orient;
import com.orientechnologies.orient.core.command.OCommandExecutorAbstract;
import com.orientechnologies.orient.core.command.OCommandRequest;
import com.orientechnologies.orient.core.db.record.ODatabaseRecordTx;
import com.orientechnologies.orient.core.metadata.function.OFunction;
/**
* Executes Script Commands.
*
* @see OCommandScript
* @author Luca Garulli
*
*/
public class OCommandExecutorFunction extends OCommandExecutorAbstract {
protected OCommandFunction request;
public OCommandExecutorFunction() {
}
@SuppressWarnings("unchecked")
public OCommandExecutorFunction parse(final OCommandRequest iRequest) {
request = (OCommandFunction) iRequest;
return this;
}
public Object execute(final Map<Object, Object> iArgs) {
return executeInContext(null, iArgs);
}
public Object executeInContext(final Map<String, Object> iContext, final Map<Object, Object> iArgs) {
parserText = request.getText();
final ODatabaseRecordTx db = (ODatabaseRecordTx) getDatabase();
final OFunction f = db.getMetadata().getFunctionLibrary().getFunction(parserText);
final OScriptManager scriptManager = Orient.instance().getScriptManager();
final ScriptEngine scriptEngine = scriptManager.getEngine(f.getLanguage());
final Bindings binding = scriptManager.bind(scriptEngine, db, iContext, iArgs);
try {
scriptEngine.setBindings(binding, ScriptContext.ENGINE_SCOPE);
// COMPILE FUNCTION LIBRARY
scriptEngine.eval(scriptManager.getLibrary(db, f.getLanguage()));
if (scriptEngine instanceof Invocable) {
// INVOKE AS FUNCTION. PARAMS ARE PASSED BY POSITION
final Invocable invocableEngine = (Invocable) scriptEngine;
Object[] args = null;
if (iArgs != null) {
args = new Object[iArgs.size()];
int i = 0;
for (Entry<Object, Object> arg : iArgs.entrySet())
args[i++] = arg.getValue();
}
return invocableEngine.invokeFunction(parserText, args);
} else {
// INVOKE THE CODE SNIPPET
return scriptEngine.eval(invokeFunction(f, iArgs.values().toArray()), binding);
}
} catch (ScriptException e) {
throw new OCommandScriptException("Error on execution of the script", request.getText(), e.getColumnNumber(), e);
} catch (NoSuchMethodException e) {
throw new OCommandScriptException("Error on execution of the script", request.getText(), 0, e);
} finally {
scriptManager.unbind(binding);
}
}
public boolean isIdempotent() {
return false;
}
@Override
protected void throwSyntaxErrorException(String iText) {
throw new OCommandScriptException("Error on execution of the script: " + iText, request.getText(), 0);
}
protected String invokeFunction(final OFunction f, Object[] iArgs) {
final StringBuilder code = new StringBuilder();
code.append(f.getName());
code.append('(');
int i = 0;
for (Object a : iArgs) {
if (i++ > 0)
code.append(',');
code.append(a);
}
code.append(");");
return code.toString();
}
}
| redox/OrientDB | core/src/main/java/com/orientechnologies/orient/core/command/script/OCommandExecutorFunction.java | Java | apache-2.0 | 4,208 |
import java.util.Scanner;
/**
* @author Oleg Cherednik
* @since 13.07.2018
*/
public class Solution {
static int palindromeIndex(String s) {
for (int i = 0, j = s.length() - 1; i < j; i++, j--) {
if (s.charAt(i) == s.charAt(j))
continue;
for (int k = i, m = j - 1; k < m; k++, m--)
if (s.charAt(k) != s.charAt(m))
return i;
return j;
}
return -1;
}
private static final Scanner scanner = new Scanner(System.in);
public static void main(String[] args) {
int q = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int qItr = 0; qItr < q; qItr++) {
String s = scanner.nextLine();
int result = palindromeIndex(s);
System.out.println(String.valueOf(result));
}
scanner.close();
}
}
| oleg-cherednik/hackerrank | Algorithms/Strings/Palindrome Index/Solution.java | Java | apache-2.0 | 921 |
package de.newsarea.homecockpit.connector.facade.registration.util;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
import java.net.URL;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
public final class ClassLoaderHelper {
private static Logger log = LoggerFactory.getLogger(ClassLoaderHelper.class);
private ClassLoaderHelper() { }
public static Constructor<?> determineFirstConstructor(Class<?> clazz) {
try {
for(Constructor<?> constructor : clazz.getConstructors()) {
return constructor;
}
} catch (SecurityException e) {
log.error(e.getMessage(), e);
}
return null;
}
public static Constructor<?> determineConstructorByArgumentTypes(Class<?> clazz, Class<?>[] argumentTypes) {
try {
for(Constructor<?> constructor : clazz.getConstructors()) {
if(isAssignableFrom(constructor, argumentTypes)) {
return constructor;
}
}
} catch (SecurityException e) {
log.error(e.getMessage(), e);
}
return null;
}
private static boolean isAssignableFrom(Constructor<?> constructor, Class<?>[] argumentTypes) {
Class<?>[] constructorArgTypes = constructor.getParameterTypes();
if(constructorArgTypes.length != argumentTypes.length) {
return false;
}
// ~
for(int i=0; i < argumentTypes.length; i++) {
if(!argumentTypes[i].isAssignableFrom(constructorArgTypes[i])) {
return false;
}
}
return true;
}
public static List<Class<?>> determineClasses(String packageName) throws ClassNotFoundException, IOException {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
assert classLoader != null;
String path = packageName.replace('.', '/');
Enumeration<URL> resources = classLoader.getResources(path);
List<File> dirs = new ArrayList<>();
while (resources.hasMoreElements()) {
URL resource = resources.nextElement();
dirs.add(new File(resource.getFile().replaceAll("%20", " ")));
}
ArrayList<Class<?>> classes = new ArrayList<>();
for (File directory : dirs) {
classes.addAll(findClasses(directory, packageName));
}
return classes;
}
public static List<Class<?>> findClasses(File directory, String packageName) throws ClassNotFoundException {
List<Class<?>> classes = new ArrayList<>();
if (!directory.exists()) {
return classes;
}
File[] files = directory.listFiles();
for (File file : files) {
if (file.isDirectory()) {
assert !file.getName().contains(".");
classes.addAll(findClasses(file, packageName + "." + file.getName()));
} else if (file.getName().endsWith(".class")) {
classes.add(Class.forName(packageName + '.' + file.getName().substring(0, file.getName().length() - 6)));
}
}
return classes;
}
public static Method determineSetterMethod(Class<?> clazz, String name) {
for(Method method : clazz.getMethods()) {
if(method.getName().equalsIgnoreCase("set" + name)) {
return method;
}
}
return null;
}
}
| RBernhardt/homecockpit-connectors | connectors-facade/src/main/java/de/newsarea/homecockpit/connector/facade/registration/util/ClassLoaderHelper.java | Java | apache-2.0 | 3,378 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @author Upendra Jariya
* @sponsor Douglas Johnson
* @version 1.0
* @since 2014-11-10
*/
package tools.datasync.utils;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.log4j.Logger;
import tools.datasync.api.utils.HashGenerator;
public class Md5HashGenerator implements HashGenerator {
private static Md5HashGenerator instance = null;
private static Logger LOG = Logger.getLogger(Md5HashGenerator.class
.getName());
private Md5HashGenerator() {
}
public static synchronized Md5HashGenerator getInstance() {
if (instance == null) {
instance = new Md5HashGenerator();
}
return instance;
}
public String generate(String data) {
try {
byte[] digest = DigestUtils.md5(data);
return (DigestUtils.md5Hex(digest));
} catch (Exception e) {
LOG.warn("Error while generating checksum on value [" + data + "]",
e);
return null;
}
}
public boolean validate(String data, String hash) {
String newHash = generate(data);
return newHash.equals(hash);
}
}
| datasynctools/sync-tools-prototype | data-sync-tools-core/src/main/java/tools/datasync/utils/Md5HashGenerator.java | Java | apache-2.0 | 1,858 |
package com.github.database.rider.core.script;
import org.assertj.core.api.SoftAssertions;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import javax.script.ScriptException;
import static org.assertj.core.api.Java6Assertions.assertThat;
public class ScriptEngineManagerWrapperTest {
@Rule
public ExpectedException exceptionRule = ExpectedException.none();
private ScriptEngineManagerWrapper scriptEngineManager = ScriptEngineManagerWrapper.getInstance();
private SoftAssertions softly = new SoftAssertions();
@Before
public void init() {
softly = new SoftAssertions();
}
@Test
public void shouldGetJsScriptResult() throws ScriptException {
Object scriptResult = ScriptEngineManagerWrapper.getInstance().getScriptResult("js: 1+1");
assertThat(scriptResult).isEqualTo(2);
}
@Test
public void shouldGetGroovyScriptResult() throws ScriptException {
Object scriptResult = scriptEngineManager.getScriptResult("groovy: 1+1");
assertThat(scriptResult).isEqualTo(2);
}
@Test
public void shouldNotGetScriptResultFromUnknownEngine() throws ScriptException {
exceptionRule.expect(RuntimeException.class);
exceptionRule.expectMessage("Could not find script engine by name 'kotlin'");
scriptEngineManager.getScriptResult("kotlin: 1+1");
}
@Test
public void shouldAssertValueGreaterThanZero() throws ScriptException {
String script = "js:(value > 0)";
softly.assertThat(scriptEngineManager.getScriptAssert(script, 2)).as("js script with value=2").isTrue();
softly.assertThat(scriptEngineManager.getScriptAssert(script, 0)).as("js script with value=0").isFalse();
softly.assertThat(scriptEngineManager.getScriptAssert(script, -1)).as("js script with value=-1").isFalse();
script = "groovy:(value > 0)";
softly.assertThat(scriptEngineManager.getScriptAssert(script, 2)).as("groovy script with value=2").isTrue();
softly.assertThat(scriptEngineManager.getScriptAssert(script, 0)).as("groovy script with value=0").isFalse();
softly.assertThat(scriptEngineManager.getScriptAssert(script, -1)).as("groovy script with value=-1").isFalse();
softly.assertAll();
}
@Test
public void shouldAssertNullValue() throws ScriptException {
SoftAssertions soft = new SoftAssertions();
String script = "js:(value == null)";
soft.assertThat(scriptEngineManager.getScriptAssert(script, null)).as("js script with null value").isTrue();
soft.assertThat(scriptEngineManager.getScriptAssert(script, 1)).as("js script with non-null value").isFalse();
script = "groovy:(value == null)";
soft.assertThat(scriptEngineManager.getScriptAssert(script, null)).as("groovy script with null value").isTrue();
soft.assertThat(scriptEngineManager.getScriptAssert(script, 1)).as("groovy script with non-null value").isFalse();
soft.assertAll();
}
@Test
public void shouldAssertContainsValue() throws ScriptException {
SoftAssertions soft = new SoftAssertions();
String script = "js:(value.contains('dbunit'))";
soft.assertThat(scriptEngineManager.getScriptAssert(script, "dbunit rules")).as("js script with 'dbunit rules' value").isTrue();
soft.assertThat(scriptEngineManager.getScriptAssert(script, "database rider rules")).as("js script 'database rider' value").isFalse();
script = "groovy:(value.contains('dbunit'))";
soft.assertThat(scriptEngineManager.getScriptAssert(script, "dbunit rules")).as("groovy script with 'dbunit rules' value").isTrue();
soft.assertThat(scriptEngineManager.getScriptAssert(script, "database rider rules")).as("groovy script 'database rider' value").isFalse();
soft.assertAll();
}
@Test
public void shouldNotAssertInvalidScript() throws ScriptException {
exceptionRule.expect(ScriptException.class);
exceptionRule.expectMessage("value.includes is not a function");
String script = "js:(value.includes('dbunit'))";
scriptEngineManager.getScriptAssert(script, "dbunit rules");
}
}
| database-rider/database-rider | rider-core/src/test/java/com/github/database/rider/core/script/ScriptEngineManagerWrapperTest.java | Java | apache-2.0 | 4,246 |
/*
* Copyright 2018 Google LLC.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.cloud.tools.jib.api;
import java.util.Objects;
/** Holds credentials (username and password). */
public class Credential {
// If the username is set to <token>, the secret would be a refresh token.
// https://github.com/docker/cli/blob/master/docs/reference/commandline/login.md#credential-helper-protocol
public static final String OAUTH2_TOKEN_USER_NAME = "<token>";
/**
* Gets a {@link Credential} configured with a username and password.
*
* @param username the username
* @param password the password
* @return a new {@link Credential}
*/
public static Credential from(String username, String password) {
return new Credential(username, password);
}
private final String username;
private final String password;
private Credential(String username, String password) {
this.username = username;
this.password = password;
}
/**
* Gets the username.
*
* @return the username
*/
public String getUsername() {
return username;
}
/**
* Gets the password.
*
* @return the password
*/
public String getPassword() {
return password;
}
/**
* Check whether this credential is an OAuth 2.0 refresh token.
*
* @return true if this credential is an OAuth 2.0 refresh token.
*/
public boolean isOAuth2RefreshToken() {
return OAUTH2_TOKEN_USER_NAME.equals(username);
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (!(other instanceof Credential)) {
return false;
}
Credential otherCredential = (Credential) other;
return username.equals(otherCredential.username) && password.equals(otherCredential.password);
}
@Override
public int hashCode() {
return Objects.hash(username, password);
}
@Override
public String toString() {
return username + ":" + password;
}
}
| GoogleContainerTools/jib | jib-core/src/main/java/com/google/cloud/tools/jib/api/Credential.java | Java | apache-2.0 | 2,498 |
/*
* Copyright (C) 2016 Mkhytar Mkhoian
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.justplay1.shoppist.interactor.units;
import com.justplay1.shoppist.executor.PostExecutionThread;
import com.justplay1.shoppist.executor.ThreadExecutor;
import com.justplay1.shoppist.models.UnitModel;
import com.justplay1.shoppist.repository.UnitsRepository;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import java.util.Collections;
import java.util.List;
import static com.justplay1.shoppist.ModelUtil.createFakeUnitModel;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.verifyZeroInteractions;
public class UpdateUnitsTest {
private UpdateUnits useCase;
@Mock private ThreadExecutor mockThreadExecutor;
@Mock private PostExecutionThread mockPostExecutionThread;
@Mock private UnitsRepository mockUnitsRepository;
private List<UnitModel> models;
@Before
public void setUp() {
MockitoAnnotations.initMocks(this);
useCase = new UpdateUnits(mockUnitsRepository, mockThreadExecutor, mockPostExecutionThread);
models = Collections.singletonList(createFakeUnitModel());
useCase.init(models);
}
@Test
public void updateUnitsUseCase_HappyCase() {
useCase.buildUseCaseObservable().subscribe();
verify(mockUnitsRepository).update(models);
verifyNoMoreInteractions(mockUnitsRepository);
verifyZeroInteractions(mockThreadExecutor);
verifyZeroInteractions(mockPostExecutionThread);
}
}
| justplay1/Shoppist | domain/src/test/java/com/justplay1/shoppist/interactor/units/UpdateUnitsTest.java | Java | apache-2.0 | 2,190 |
package fi.rivermouth.talous.auth;
import java.util.ArrayList;
import java.util.List;
import org.springframework.security.authentication.AuthenticationManager;
import org.springframework.security.authentication.UsernamePasswordAuthenticationToken;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.GrantedAuthority;
import org.springframework.security.core.authority.SimpleGrantedAuthority;
import fi.rivermouth.talous.domain.User;
public class UserAuthenticationManager implements AuthenticationManager {
@Override
public Authentication authenticate(Authentication authentication) {
List<GrantedAuthority> grantedAuths = new ArrayList<GrantedAuthority>();
grantedAuths.add(new SimpleGrantedAuthority(User.ROLE));
return new UsernamePasswordAuthenticationToken(authentication.getName(), authentication.getCredentials(), grantedAuths);
}
}
| Rivermouth/Rivermouth-Talous | src/main/java/fi/rivermouth/talous/auth/UserAuthenticationManager.java | Java | apache-2.0 | 906 |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.rest.action.admin.indices;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.cluster.metadata.AliasMetadata;
import org.elasticsearch.cluster.metadata.DataStreamAlias;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.BytesRestResponse;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.rest.action.RestBuilderListener;
import java.io.IOException;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import static org.elasticsearch.rest.RestRequest.Method.GET;
import static org.elasticsearch.rest.RestRequest.Method.HEAD;
/**
* The REST handler for get alias and head alias APIs.
*/
public class RestGetAliasesAction extends BaseRestHandler {
@Override
public List<Route> routes() {
return List.of(
new Route(GET, "/_alias"),
new Route(GET, "/_aliases"),
new Route(GET, "/_alias/{name}"),
new Route(HEAD, "/_alias/{name}"),
new Route(GET, "/{index}/_alias"),
new Route(HEAD, "/{index}/_alias"),
new Route(GET, "/{index}/_alias/{name}"),
new Route(HEAD, "/{index}/_alias/{name}"));
}
@Override
public String getName() {
return "get_aliases_action";
}
static RestResponse buildRestResponse(boolean aliasesExplicitlyRequested, String[] requestedAliases,
ImmutableOpenMap<String, List<AliasMetadata>> responseAliasMap,
Map<String, List<DataStreamAlias>> dataStreamAliases, XContentBuilder builder) throws Exception {
final Set<String> indicesToDisplay = new HashSet<>();
final Set<String> returnedAliasNames = new HashSet<>();
for (final ObjectObjectCursor<String, List<AliasMetadata>> cursor : responseAliasMap) {
for (final AliasMetadata aliasMetadata : cursor.value) {
if (aliasesExplicitlyRequested) {
// only display indices that have aliases
indicesToDisplay.add(cursor.key);
}
returnedAliasNames.add(aliasMetadata.alias());
}
}
// compute explicitly requested aliases that have are not returned in the result
final SortedSet<String> missingAliases = new TreeSet<>();
// first wildcard index, leading "-" as an alias name after this index means
// that it is an exclusion
int firstWildcardIndex = requestedAliases.length;
for (int i = 0; i < requestedAliases.length; i++) {
if (Regex.isSimpleMatchPattern(requestedAliases[i])) {
firstWildcardIndex = i;
break;
}
}
for (int i = 0; i < requestedAliases.length; i++) {
if (Metadata.ALL.equals(requestedAliases[i]) || Regex.isSimpleMatchPattern(requestedAliases[i])
|| (i > firstWildcardIndex && requestedAliases[i].charAt(0) == '-')) {
// only explicitly requested aliases will be called out as missing (404)
continue;
}
// check if aliases[i] is subsequently excluded
int j = Math.max(i + 1, firstWildcardIndex);
for (; j < requestedAliases.length; j++) {
if (requestedAliases[j].charAt(0) == '-') {
// this is an exclude pattern
if (Regex.simpleMatch(requestedAliases[j].substring(1), requestedAliases[i])
|| Metadata.ALL.equals(requestedAliases[j].substring(1))) {
// aliases[i] is excluded by aliases[j]
break;
}
}
}
if (j == requestedAliases.length) {
// explicitly requested aliases[i] is not excluded by any subsequent "-" wildcard in expression
if (false == returnedAliasNames.contains(requestedAliases[i])) {
// aliases[i] is not in the result set
missingAliases.add(requestedAliases[i]);
}
}
}
final RestStatus status;
builder.startObject();
{
if (missingAliases.isEmpty()) {
status = RestStatus.OK;
} else {
status = RestStatus.NOT_FOUND;
final String message;
if (missingAliases.size() == 1) {
message = String.format(Locale.ROOT, "alias [%s] missing", Strings.collectionToCommaDelimitedString(missingAliases));
} else {
message = String.format(Locale.ROOT, "aliases [%s] missing", Strings.collectionToCommaDelimitedString(missingAliases));
}
builder.field("error", message);
builder.field("status", status.getStatus());
}
for (final var entry : responseAliasMap) {
if (aliasesExplicitlyRequested == false || (aliasesExplicitlyRequested && indicesToDisplay.contains(entry.key))) {
builder.startObject(entry.key);
{
builder.startObject("aliases");
{
for (final AliasMetadata alias : entry.value) {
AliasMetadata.Builder.toXContent(alias, builder, ToXContent.EMPTY_PARAMS);
}
}
builder.endObject();
}
builder.endObject();
}
}
for (var entry : dataStreamAliases.entrySet()) {
builder.startObject(entry.getKey());
{
builder.startObject("aliases");
{
for (DataStreamAlias alias : entry.getValue()) {
builder.startObject(alias.getName());
builder.endObject();
}
}
builder.endObject();
}
builder.endObject();
}
}
builder.endObject();
return new BytesRestResponse(status, builder);
}
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
// The TransportGetAliasesAction was improved do the same post processing as is happening here.
// We can't remove this logic yet to support mixed clusters. We should be able to remove this logic here
// in when 8.0 becomes the new version in the master branch.
final boolean namesProvided = request.hasParam("name");
final String[] aliases = request.paramAsStringArrayOrEmptyIfAll("name");
final GetAliasesRequest getAliasesRequest = new GetAliasesRequest(aliases);
final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
getAliasesRequest.indices(indices);
getAliasesRequest.indicesOptions(IndicesOptions.fromRequest(request, getAliasesRequest.indicesOptions()));
getAliasesRequest.local(request.paramAsBoolean("local", getAliasesRequest.local()));
//we may want to move this logic to TransportGetAliasesAction but it is based on the original provided aliases, which will
//not always be available there (they may get replaced so retrieving request.aliases is not quite the same).
return channel -> client.admin().indices().getAliases(getAliasesRequest, new RestBuilderListener<GetAliasesResponse>(channel) {
@Override
public RestResponse buildResponse(GetAliasesResponse response, XContentBuilder builder) throws Exception {
return buildRestResponse(namesProvided, aliases, response.getAliases(), response.getDataStreamAliases(), builder);
}
});
}
}
| robin13/elasticsearch | server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java | Java | apache-2.0 | 9,110 |
package com.ejlchina.searcher.implement;
import com.ejlchina.searcher.*;
import com.ejlchina.searcher.bean.InheritType;
import java.lang.reflect.Field;
import java.util.*;
import java.lang.reflect.Modifier;
import java.util.concurrent.ConcurrentHashMap;
/***
* 默认元信息解析器
* @author Troy.Zhou @ 2021-10-30
* @since v3.0.0
*/
public class DefaultMetaResolver implements MetaResolver {
private final Map<Class<?>, BeanMeta<?>> cache = new ConcurrentHashMap<>();
private SnippetResolver snippetResolver = new DefaultSnippetResolver();
private DbMapping dbMapping;
public DefaultMetaResolver() {
this(new DefaultDbMapping());
}
public DefaultMetaResolver(DbMapping dbMapping) {
this.dbMapping = dbMapping;
}
@Override
public <T> BeanMeta<T> resolve(Class<T> beanClass) {
@SuppressWarnings("unchecked")
BeanMeta<T> beanMeta = (BeanMeta<T>) cache.get(beanClass);
if (beanMeta != null) {
return beanMeta;
}
synchronized (cache) {
beanMeta = resolveMetadata(beanClass);
cache.put(beanClass, beanMeta);
return beanMeta;
}
}
protected <T> BeanMeta<T> resolveMetadata(Class<T> beanClass) {
DbMapping.Table table = dbMapping.table(beanClass);
if (table == null) {
throw new SearchException("The class [" + beanClass.getName() + "] can not be searched, because it can not be resolved by " + dbMapping.getClass());
}
BeanMeta<T> beanMeta = new BeanMeta<>(beanClass, table.getDataSource(),
snippetResolver.resolve(table.getTables()),
snippetResolver.resolve(table.getJoinCond()),
snippetResolver.resolve(table.getGroupBy()),
table.isDistinct());
// 字段解析
Field[] fields = getBeanFields(beanClass);
for (int index = 0; index < fields.length; index++) {
Field field = fields[index];
if (Modifier.isStatic(field.getModifiers())) {
continue;
}
DbMapping.Column column = dbMapping.column(beanClass, fields[index]);
if (column == null) {
continue;
}
field.setAccessible(true);
SqlSnippet snippet = snippetResolver.resolve(column.getFieldSql());
// 注意:Oracle 数据库的别名不能以下划线开头
FieldMeta fieldMeta = new FieldMeta(beanMeta, field, snippet, "c_" + index,
column.isConditional(), column.getOnlyOn());
beanMeta.addFieldMeta(field.getName(), fieldMeta);
}
if (beanMeta.getFieldCount() == 0) {
throw new SearchException("[" + beanClass.getName() + "] is not a valid SearchBean, because there is no field mapping to database.");
}
return beanMeta;
}
protected Field[] getBeanFields(Class<?> beanClass) {
InheritType iType = dbMapping.inheritType(beanClass);
List<Field> fieldList = new ArrayList<>();
Set<String> fieldNames = new HashSet<>();
while (beanClass != Object.class) {
for (Field field : beanClass.getDeclaredFields()) {
String name = field.getName();
int modifiers = field.getModifiers();
if (field.isSynthetic() || Modifier.isStatic(modifiers)
|| Modifier.isTransient(modifiers)
|| fieldNames.contains(name)) {
continue;
}
fieldList.add(field);
fieldNames.add(name);
}
if (iType != InheritType.FIELD && iType != InheritType.ALL) {
break;
}
beanClass = beanClass.getSuperclass();
}
return fieldList.toArray(new Field[0]);
}
public SnippetResolver getSnippetResolver() {
return snippetResolver;
}
public void setSnippetResolver(SnippetResolver snippetResolver) {
this.snippetResolver = Objects.requireNonNull(snippetResolver);
}
public DbMapping getDbMapping() {
return dbMapping;
}
public void setDbMapping(DbMapping dbMapping) {
this.dbMapping = Objects.requireNonNull(dbMapping);
}
}
| ejlchina/bean-searcher | bean-searcher/src/main/java/com/ejlchina/searcher/implement/DefaultMetaResolver.java | Java | apache-2.0 | 4,320 |
/*
* Copyright 2017-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.lettuce.core.cluster.api.async;
import java.util.List;
import java.util.Set;
import io.lettuce.core.GeoAddArgs;
import io.lettuce.core.GeoArgs;
import io.lettuce.core.GeoCoordinates;
import io.lettuce.core.GeoRadiusStoreArgs;
import io.lettuce.core.GeoSearch;
import io.lettuce.core.GeoValue;
import io.lettuce.core.GeoWithin;
import io.lettuce.core.Value;
/**
* Asynchronous executed commands on a node selection for the Geo-API.
*
* @author Mark Paluch
* @since 4.0
* @generated by io.lettuce.apigenerator.CreateAsyncNodeSelectionClusterApi
*/
public interface NodeSelectionGeoAsyncCommands<K, V> {
/**
* Single geo add.
*
* @param key the key of the geo set.
* @param longitude the longitude coordinate according to WGS84.
* @param latitude the latitude coordinate according to WGS84.
* @param member the member to add.
* @return Long integer-reply the number of elements that were added to the set.
*/
AsyncExecutions<Long> geoadd(K key, double longitude, double latitude, V member);
/**
* Single geo add.
*
* @param key the key of the geo set.
* @param longitude the longitude coordinate according to WGS84.
* @param latitude the latitude coordinate according to WGS84.
* @param member the member to add.
* @param args additional arguments.
* @return Long integer-reply the number of elements that were added to the set.
* @since 6.1
*/
AsyncExecutions<Long> geoadd(K key, double longitude, double latitude, V member, GeoAddArgs args);
/**
* Multi geo add.
*
* @param key the key of the geo set.
* @param lngLatMember triplets of double longitude, double latitude and V member.
* @return Long integer-reply the number of elements that were added to the set.
*/
AsyncExecutions<Long> geoadd(K key, Object... lngLatMember);
/**
* Multi geo add.
*
* @param key the key of the geo set.
* @param values {@link io.lettuce.core.GeoValue} values to add.
* @return Long integer-reply the number of elements that were added to the set.
* @since 6.1
*/
AsyncExecutions<Long> geoadd(K key, GeoValue<V>... values);
/**
* Multi geo add.
*
* @param key the key of the geo set.
* @param args additional arguments.
* @param lngLatMember triplets of double longitude, double latitude and V member.
* @return Long integer-reply the number of elements that were added to the set.
* @since 6.1
*/
AsyncExecutions<Long> geoadd(K key, GeoAddArgs args, Object... lngLatMember);
/**
* Multi geo add.
*
* @param key the key of the geo set.
* @param args additional arguments.
* @param values {@link io.lettuce.core.GeoValue} values to add.
* @return Long integer-reply the number of elements that were added to the set.
* @since 6.1
*/
AsyncExecutions<Long> geoadd(K key, GeoAddArgs args, GeoValue<V>... values);
/**
* Retrieve distance between points {@code from} and {@code to}. If one or more elements are missing {@code null} is
* returned. Default in meters by, otherwise according to {@code unit}
*
* @param key the key of the geo set.
* @param from from member.
* @param to to member.
* @param unit distance unit.
* @return distance between points {@code from} and {@code to}. If one or more elements are missing {@code null} is
* returned.
*/
AsyncExecutions<Double> geodist(K key, V from, V to, GeoArgs.Unit unit);
/**
* Retrieve Geohash strings representing the position of one or more elements in a sorted set value representing a
* geospatial index.
*
* @param key the key of the geo set.
* @param members the members.
* @return bulk reply Geohash strings in the order of {@code members}. Returns {@code null} if a member is not found.
*/
AsyncExecutions<List<Value<String>>> geohash(K key, V... members);
/**
* Get geo coordinates for the {@code members}.
*
* @param key the key of the geo set.
* @param members the members.
* @return a list of {@link GeoCoordinates}s representing the x,y position of each element specified in the arguments. For
* missing elements {@code null} is returned.
*/
AsyncExecutions<List<GeoCoordinates>> geopos(K key, V... members);
/**
* Retrieve members selected by distance with the center of {@code longitude} and {@code latitude}.
*
* @param key the key of the geo set.
* @param longitude the longitude coordinate according to WGS84.
* @param latitude the latitude coordinate according to WGS84.
* @param distance radius distance.
* @param unit distance unit.
* @return bulk reply.
*/
AsyncExecutions<Set<V>> georadius(K key, double longitude, double latitude, double distance, GeoArgs.Unit unit);
/**
* Retrieve members selected by distance with the center of {@code longitude} and {@code latitude}.
*
* @param key the key of the geo set.
* @param longitude the longitude coordinate according to WGS84.
* @param latitude the latitude coordinate according to WGS84.
* @param distance radius distance.
* @param unit distance unit.
* @param geoArgs args to control the result.
* @return nested multi-bulk reply. The {@link GeoWithin} contains only fields which were requested by {@link GeoArgs}.
*/
AsyncExecutions<List<GeoWithin<V>>> georadius(K key, double longitude, double latitude, double distance, GeoArgs.Unit unit, GeoArgs geoArgs);
/**
* Perform a {@link #georadius(Object, double, double, double, GeoArgs.Unit, GeoArgs)} query and store the results in a
* sorted set.
*
* @param key the key of the geo set.
* @param longitude the longitude coordinate according to WGS84.
* @param latitude the latitude coordinate according to WGS84.
* @param distance radius distance.
* @param unit distance unit.
* @param geoRadiusStoreArgs args to store either the resulting elements with their distance or the resulting elements with
* their locations a sorted set.
* @return Long integer-reply the number of elements in the result.
*/
AsyncExecutions<Long> georadius(K key, double longitude, double latitude, double distance, GeoArgs.Unit unit, GeoRadiusStoreArgs<K> geoRadiusStoreArgs);
/**
* Retrieve members selected by distance with the center of {@code member}. The member itself is always contained in the
* results.
*
* @param key the key of the geo set.
* @param member reference member.
* @param distance radius distance.
* @param unit distance unit.
* @return set of members.
*/
AsyncExecutions<Set<V>> georadiusbymember(K key, V member, double distance, GeoArgs.Unit unit);
/**
* Retrieve members selected by distance with the center of {@code member}. The member itself is always contained in the
* results.
*
* @param key the key of the geo set.
* @param member reference member.
* @param distance radius distance.
* @param unit distance unit.
* @param geoArgs args to control the result.
* @return nested multi-bulk reply. The {@link GeoWithin} contains only fields which were requested by {@link GeoArgs}.
*/
AsyncExecutions<List<GeoWithin<V>>> georadiusbymember(K key, V member, double distance, GeoArgs.Unit unit, GeoArgs geoArgs);
/**
* Perform a {@link #georadiusbymember(Object, Object, double, GeoArgs.Unit, GeoArgs)} query and store the results in a
* sorted set.
*
* @param key the key of the geo set.
* @param member reference member.
* @param distance radius distance.
* @param unit distance unit.
* @param geoRadiusStoreArgs args to store either the resulting elements with their distance or the resulting elements with
* their locations a sorted set.
* @return Long integer-reply the number of elements in the result.
*/
AsyncExecutions<Long> georadiusbymember(K key, V member, double distance, GeoArgs.Unit unit, GeoRadiusStoreArgs<K> geoRadiusStoreArgs);
/**
* Retrieve members selected by distance with the center of {@code reference} the search {@code predicate}.
* Use {@link GeoSearch} to create reference and predicate objects.
*
* @param key the key of the geo set.
* @param reference the reference member or longitude/latitude coordinates.
* @param predicate the bounding box or radius to search in.
* @return bulk reply.
* @since 6.1
*/
AsyncExecutions<Set<V>> geosearch(K key, GeoSearch.GeoRef<K> reference, GeoSearch.GeoPredicate predicate);
/**
* Retrieve members selected by distance with the center of {@code reference} the search {@code predicate}.
* Use {@link GeoSearch} to create reference and predicate objects.
*
* @param key the key of the geo set.
* @param reference the reference member or longitude/latitude coordinates.
* @param predicate the bounding box or radius to search in.
* @param geoArgs args to control the result.
* @return nested multi-bulk reply. The {@link GeoWithin} contains only fields which were requested by {@link GeoArgs}.
* @since 6.1
*/
AsyncExecutions<List<GeoWithin<V>>> geosearch(K key, GeoSearch.GeoRef<K> reference, GeoSearch.GeoPredicate predicate, GeoArgs geoArgs);
/**
* Perform a {@link #geosearch(Object, GeoSearch.GeoRef, GeoSearch.GeoPredicate, GeoArgs)} query and store the results in a
* sorted set.
*
* @param destination the destination where to store results.
* @param key the key of the geo set.
* @param reference the reference member or longitude/latitude coordinates.
* @param predicate the bounding box or radius to search in.
* @param geoArgs args to control the result.
* @param storeDist stores the items in a sorted set populated with their distance from the center of the circle or box, as a floating-point number, in the same unit specified for that shape.
* @return Long integer-reply the number of elements in the result.
* @since 6.1
*/
AsyncExecutions<Long> geosearchstore(K destination, K key, GeoSearch.GeoRef<K> reference, GeoSearch.GeoPredicate predicate, GeoArgs geoArgs, boolean storeDist);
}
| lettuce-io/lettuce-core | src/main/java/io/lettuce/core/cluster/api/async/NodeSelectionGeoAsyncCommands.java | Java | apache-2.0 | 11,078 |
/**
* Copyright (C) 2015 The Gravitee team (http://gravitee.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gravitee.gateway.services.sync.cache;
import com.hazelcast.core.HazelcastInstance;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.Map;
/**
* @author David BRASSELY (david.brassely at graviteesource.com)
* @author GraviteeSource Team
*/
public final class CacheManager {
@Autowired
private HazelcastInstance hzInstance;
public <K, V> Map<K, V> getCache(String name) {
return hzInstance.getMap(name);
}
}
| gravitee-io/gateway | gravitee-gateway-services/gravitee-gateway-services-sync/src/main/java/io/gravitee/gateway/services/sync/cache/CacheManager.java | Java | apache-2.0 | 1,110 |
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.codecommit.model;
import javax.annotation.Generated;
/**
* <p>
* The number of approvals required for the approval rule exceeds the maximum number allowed.
* </p>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class MaximumNumberOfApprovalsExceededException extends com.amazonaws.services.codecommit.model.AWSCodeCommitException {
private static final long serialVersionUID = 1L;
/**
* Constructs a new MaximumNumberOfApprovalsExceededException with the specified error message.
*
* @param message
* Describes the error encountered.
*/
public MaximumNumberOfApprovalsExceededException(String message) {
super(message);
}
}
| aws/aws-sdk-java | aws-java-sdk-codecommit/src/main/java/com/amazonaws/services/codecommit/model/MaximumNumberOfApprovalsExceededException.java | Java | apache-2.0 | 1,318 |
package org.myrobotlab.framework;
import static org.myrobotlab.framework.StatusLevel.DEBUG;
import static org.myrobotlab.framework.StatusLevel.ERROR;
import static org.myrobotlab.framework.StatusLevel.INFO;
import static org.myrobotlab.framework.StatusLevel.SUCCESS;
import static org.myrobotlab.framework.StatusLevel.WARN;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.Serializable;
import java.io.StringWriter;
import java.util.Objects;
import org.myrobotlab.codec.CodecUtils;
import org.myrobotlab.logging.Level;
import org.myrobotlab.logging.LoggerFactory;
import org.myrobotlab.logging.LoggingFactory;
import org.slf4j.Logger;
/**
* Goal is to have a very simple Pojo with only a few (native Java helper
* methods) WARNING !!! - this class used to extend Exception or Throwable - but
* the gson serializer would stack overflow with self reference issue
*
* TODO - allow radix tree searches for "keys" ???
*
*/
public class Status implements Serializable {// extends Exception {
private static final long serialVersionUID = 1L;
public final static Logger log = LoggerFactory.getLogger(Status.class);
public String name; // service name ???
/**
* FIXME - should probably be an enum now that serialization mostly works now
* with enums [debug|info|warn|error|success] - yes the last part is different
* than "logging" but could still be a status...
*
*/
public String level;
/**
* The key is the non changing part and good identifier of what went on... For
* Exceptions I would recommend the Exception.class.getSimpleName() for the
* key, whilst the "detail" is for "changing" detail. This becomes important
* when Stati are aggregated - and humans are interested in "high" counts of
* specific Status while the details are not important unless diagnosing one.
*
* Violating Servo limits is a good example - "key" can be "Outside servo
* limits". The key can contain spaces and punctuation - the important part is
* that it is STATIC.
*
* "details" contain dynamic specifics - for example: "key":"Outside servo
* limits", "detail":"servo01 moveTo(75) limit is greater than 100"
*/
public String key;
/**
* Dynamic of verbose explanation of the status. e.g. "detail":"servo01
* moveTo(75) limit is greater than 100" or complete stack trace from an
* exception
*/
public String detail;
/**
* optional source of status
*/
public Object source;
// --- static creation of typed Status objects ----
public static Status debug(String format, Object... args) {
Status status = new Status(String.format(format, args));
status.level = DEBUG;
return status;
}
public static Status error(Exception e) {
Status s = new Status(e);
s.level = ERROR;
return s;
}
public static Status error(String msg) {
Status s = new Status(msg);
s.level = ERROR;
return s;
}
public static Status error(String format, Object... args) {
Status status = new Status(String.format(format, args));
status.level = ERROR;
return status;
}
public static Status warn(String msg) {
Status s = new Status(msg);
s.level = ERROR;
return s;
}
public static Status warn(String format, Object... args) {
Status status = new Status(String.format(format, args));
status.level = WARN;
return status;
}
public static Status info(String msg) {
Status s = new Status(msg);
s.level = INFO;
return s;
}
public static Status info(String format, Object... args) {
String formattedInfo = String.format(format, args);
Status status = new Status(formattedInfo);
status.level = INFO;
return status;
}
public final static String stackToString(final Throwable e) {
StringWriter sw;
try {
sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
e.printStackTrace(pw);
} catch (Exception e2) {
return "bad stackToString";
}
return "------\r\n" + sw.toString() + "------\r\n";
}
public Status(Exception e) {
this.level = ERROR;
StringWriter sw;
try {
sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
e.printStackTrace(pw);
detail = sw.toString();
} catch (Exception e2) {
}
this.key = String.format("%s - %s", e.getClass().getSimpleName(), e.getMessage());
}
public Status(Status s) {
if (s == null) {
return;
}
this.name = s.name;
this.level = s.level;
this.key = s.key;
this.detail = s.detail;
}
/**
* for minimal amount of information error is assumed, and info is detail of
* an ERROR
*
* @param detail
* d
*/
public Status(String detail) {
this.level = ERROR;
this.detail = detail;
}
public Status(String name, String level, String key, String detail) {
this.name = name;
this.level = level;
this.key = key;
this.detail = detail;
}
public boolean isDebug() {
return DEBUG.equals(level);
}
public boolean isError() {
return ERROR.equals(level);
}
public boolean isInfo() {
return INFO.equals(level);
}
public boolean isWarn() {
return WARN.equals(level);
}
@Override
public String toString() {
StringBuffer sb = new StringBuffer();
if (name != null) {
sb.append(name);
sb.append(" ");
}
if (level != null) {
sb.append(level);
sb.append(" ");
}
if (key != null) {
sb.append(key);
sb.append(" ");
}
if (detail != null) {
sb.append(detail);
}
return sb.toString();
}
static public final Status newInstance(String name, String level, String key, String detail) {
Status s = new Status(name, level, key, detail);
return s;
}
@Override
public boolean equals(Object o) {
if (o == this)
return true;
if (!(o instanceof Status)) {
return false;
}
Status status = (Status) o;
return Objects.equals(name, status.name) && Objects.equals(level, status.level) && Objects.equals(key, status.key) && Objects.equals(detail, status.detail);
}
@Override
public int hashCode() {
return Objects.hash(name, level, key, detail);
}
public static void main(String[] args) throws IOException, InterruptedException {
LoggingFactory.init(Level.INFO);
Status test = new Status("i am pessimistic");
// Status subTest = new Status("i am sub pessimistic");
// test.add(subTest);
String json = CodecUtils.toJson(test);
Status z = CodecUtils.fromJson(json, Status.class);
log.info(json);
log.info(z.toString());
}
public static Status success() {
Status s = new Status(SUCCESS);
s.level = SUCCESS;
return s;
}
public boolean isSuccess() {
return SUCCESS.equals(level);
}
public static Status success(String detail) {
Status s = new Status(SUCCESS);
s.level = SUCCESS;
s.detail = detail;
return s;
}
}
| MyRobotLab/myrobotlab | src/main/java/org/myrobotlab/framework/Status.java | Java | apache-2.0 | 7,254 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tez.dag.api;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.tez.common.TezCommonUtils;
import org.apache.tez.dag.api.records.DAGProtos.TezEntityDescriptorProto;
import org.junit.Assert;
import org.junit.Test;
public class TestDagTypeConverters {
@Test(timeout = 5000)
public void testTezEntityDescriptorSerialization() throws IOException {
UserPayload payload = UserPayload.create(ByteBuffer.wrap(new String("Foobar").getBytes()), 100);
String historytext = "Bar123";
EntityDescriptor entityDescriptor =
InputDescriptor.create("inputClazz").setUserPayload(payload)
.setHistoryText(historytext);
TezEntityDescriptorProto proto =
DagTypeConverters.convertToDAGPlan(entityDescriptor);
Assert.assertEquals(payload.getVersion(), proto.getTezUserPayload().getVersion());
Assert.assertArrayEquals(payload.deepCopyAsArray(), proto.getTezUserPayload().getUserPayload().toByteArray());
Assert.assertTrue(proto.hasHistoryText());
Assert.assertNotEquals(historytext, proto.getHistoryText());
Assert.assertEquals(historytext, new String(
TezCommonUtils.decompressByteStringToByteArray(proto.getHistoryText())));
// Ensure that the history text is not deserialized
InputDescriptor inputDescriptor =
DagTypeConverters.convertInputDescriptorFromDAGPlan(proto);
Assert.assertNull(inputDescriptor.getHistoryText());
}
}
| Altiscale/tez | tez-api/src/test/java/org/apache/tez/dag/api/TestDagTypeConverters.java | Java | apache-2.0 | 2,256 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.jetty;
import java.util.Map;
import org.apache.camel.Exchange;
import org.apache.camel.Processor;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.impl.JndiRegistry;
import org.junit.Test;
/**
* @version
*/
public class HttpFilterCamelHeadersTest extends BaseJettyTest {
@Test
public void testFilterCamelHeaders() throws Exception {
Exchange out = template.send("http://localhost:{{port}}/test/filter", new Processor() {
public void process(Exchange exchange) throws Exception {
exchange.getIn().setBody("Claus");
exchange.getIn().setHeader("bar", 123);
}
});
assertNotNull(out);
assertEquals("Hi Claus", out.getOut().getBody(String.class));
// there should be no internal Camel headers
// except for the response code
Map<String, Object> headers = out.getOut().getHeaders();
for (String key : headers.keySet()) {
if (!key.equalsIgnoreCase(Exchange.HTTP_RESPONSE_CODE)) {
assertTrue("Should not contain any Camel internal headers", !key.toLowerCase().startsWith("camel"));
} else {
assertEquals(200, headers.get(Exchange.HTTP_RESPONSE_CODE));
}
}
}
@Override
protected JndiRegistry createRegistry() throws Exception {
JndiRegistry jndi = super.createRegistry();
jndi.bind("foo", new MyFooBean());
return jndi;
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
from("jetty:http://localhost:{{port}}/test/filter").beanRef("foo");
}
};
}
public static class MyFooBean {
public String hello(String name) {
return "Hi " + name;
}
}
}
| everttigchelaar/camel-svn | components/camel-jetty/src/test/java/org/apache/camel/component/jetty/HttpFilterCamelHeadersTest.java | Java | apache-2.0 | 2,847 |
package org.lightadmin.core.view.preparer;
import org.apache.tiles.AttributeContext;
import org.apache.tiles.context.TilesRequestContext;
import org.lightadmin.core.config.domain.DomainTypeAdministrationConfiguration;
public class FormViewPreparer extends ConfigurationAwareViewPreparer {
@Override
protected void execute(final TilesRequestContext tilesContext, final AttributeContext attributeContext, final DomainTypeAdministrationConfiguration configuration) {
super.execute(tilesContext, attributeContext, configuration);
addAttribute(attributeContext, "fields", configuration.getFormViewFragment().getFields());
}
} | pramoth/light-admin | lightadmin-core/src/main/java/org/lightadmin/core/view/preparer/FormViewPreparer.java | Java | apache-2.0 | 652 |
/*
* Licensed to The Apereo Foundation under one or more contributor license
* agreements. See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*
* The Apereo Foundation licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.unitime.timetable.solver.exam.ui;
import java.io.PrintWriter;
import java.io.Serializable;
import java.util.Collection;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.Vector;
import javax.servlet.jsp.JspWriter;
import org.cpsolver.exam.model.Exam;
import org.cpsolver.exam.model.ExamDistributionConstraint;
import org.cpsolver.exam.model.ExamInstructor;
import org.cpsolver.exam.model.ExamPlacement;
import org.cpsolver.exam.model.ExamRoom;
import org.cpsolver.exam.model.ExamRoomPlacement;
import org.cpsolver.exam.model.ExamStudent;
import org.cpsolver.ifs.extension.AssignedValue;
import org.cpsolver.ifs.extension.ConflictStatistics;
import org.cpsolver.ifs.model.Constraint;
import org.dom4j.Element;
import org.unitime.timetable.model.PreferenceLevel;
import org.unitime.timetable.solver.ui.TimetableInfo;
import org.unitime.timetable.webutil.timegrid.ExamGridTable;
/**
* @author Tomas Muller
*/
public class ExamConflictStatisticsInfo implements TimetableInfo, Serializable {
private static final long serialVersionUID = 7L;
public static int sVersion = 7; // to be able to do some changes in the future
public static final int sConstraintTypeRoom = 1;
public static final int sConstraintTypeInstructor = 2;
public static final int sConstraintTypeGroup = 3;
public static final int sConstraintTypeStudent = 4;
private Hashtable iVariables = new Hashtable();
public Collection getCBS() { return iVariables.values(); }
public CBSVariable getCBS(Long classId) { return (CBSVariable)iVariables.get(classId); }
public void load(ConflictStatistics cbs) {
load(cbs, null);
}
public ExamConflictStatisticsInfo getConflictStatisticsSubInfo(Vector variables) {
ExamConflictStatisticsInfo ret = new ExamConflictStatisticsInfo();
for (Enumeration e=variables.elements();e.hasMoreElements();) {
Exam exam = (Exam)e.nextElement();
CBSVariable var = (CBSVariable)iVariables.get(exam.getId());
if (var!=null)
ret.iVariables.put(exam.getId(),var);
}
return ret;
}
public void merge(ExamConflictStatisticsInfo info) {
if (info!=null) iVariables.putAll(info.iVariables);
}
public void load(ConflictStatistics cbs, Long examId) {
iVariables.clear();
for (Iterator i1=cbs.getNoGoods().entrySet().iterator();i1.hasNext();) {
Map.Entry entry = (Map.Entry)i1.next();
AssignedValue assignment = (AssignedValue)entry.getKey();
ExamPlacement placement = (ExamPlacement)assignment.getValue();
Exam exam = (Exam)placement.variable();
if (examId!=null && !examId.equals(exam.getId())) continue;
CBSVariable var = (CBSVariable)iVariables.get(exam.getId());
if (var==null) {
String pref = PreferenceLevel.sNeutral;//SolverGridModel.hardConflicts2pref(exam,null);
var = new CBSVariable(exam.getId(),exam.getName(),pref);
iVariables.put(exam.getId(),var);
}
Vector roomIds = new Vector();
Vector roomNames = new Vector();
Vector roomPrefs = new Vector();
for (Iterator i=new TreeSet(placement.getRoomPlacements()).iterator();i.hasNext();) {
ExamRoomPlacement room = (ExamRoomPlacement)i.next();
roomIds.add(room.getId());
roomNames.add(room.getName());
roomPrefs.add(exam.getRoomPlacements().size()==placement.getRoomPlacements().size()?PreferenceLevel.sIntLevelRequired:room.getPenalty(placement.getPeriod()));
}
CBSValue val = new CBSValue(var,
placement.getPeriod().getId(),
placement.getPeriod().getDayStr()+" "+placement.getPeriod().getTimeStr(),
(exam.getPeriodPlacements().size()==1?PreferenceLevel.sIntLevelRequired:placement.getPeriodPlacement().getPenalty()),
roomIds, roomNames, roomPrefs);
var.values().add(val);
List noGoods = (List)entry.getValue();
Hashtable constr2assignments = new Hashtable();
for (Iterator e2=noGoods.iterator();e2.hasNext();) {
AssignedValue noGood = (AssignedValue)e2.next();
if (noGood.getConstraint()==null) continue;
Vector aaa = (Vector)constr2assignments.get(noGood.getConstraint());
if (aaa == null) {
aaa = new Vector();
constr2assignments.put(noGood.getConstraint(), aaa);
}
aaa.addElement(noGood);
}
for (Iterator i2=constr2assignments.entrySet().iterator();i2.hasNext();) {
Map.Entry entry2 = (Map.Entry)i2.next();
Constraint constraint = (Constraint)entry2.getKey();
Vector noGoodsThisConstraint = (Vector)entry2.getValue();
CBSConstraint con = null;
if (constraint instanceof ExamRoom) {
con = new CBSConstraint(val, sConstraintTypeRoom, constraint.getId(), constraint.getName(), PreferenceLevel.sRequired);
} else if (constraint instanceof ExamInstructor) {
con = new CBSConstraint(val, sConstraintTypeInstructor, constraint.getId(), constraint.getName(), PreferenceLevel.sRequired);
} else if (constraint instanceof ExamStudent) {
con = new CBSConstraint(val, sConstraintTypeStudent, constraint.getId(), constraint.getName(), PreferenceLevel.sRequired);
} else if (constraint instanceof ExamDistributionConstraint) {
con = new CBSConstraint(val, sConstraintTypeGroup, constraint.getId(), ((ExamDistributionConstraint)constraint).getTypeString(), (constraint.isHard()?PreferenceLevel.sRequired:PreferenceLevel.int2prolog(((ExamDistributionConstraint)constraint).getWeight())));
} else {
con = new CBSConstraint(val, -1, constraint.getId(), constraint.getName(), PreferenceLevel.sRequired);
}
val.constraints().add(con);
for (Enumeration e3=noGoodsThisConstraint.elements();e3.hasMoreElements();) {
AssignedValue ass = (AssignedValue)e3.nextElement();
ExamPlacement p = (ExamPlacement)ass.getValue();
Exam x = (Exam)p.variable();
String pr = PreferenceLevel.sNeutral;//SolverGridModel.hardConflicts2pref(x,p);
Vector aroomIds = new Vector();
Vector aroomNames = new Vector();
Vector aroomPrefs = new Vector();
for (Iterator i=new TreeSet(p.getRoomPlacements()).iterator();i.hasNext();) {
ExamRoomPlacement room = (ExamRoomPlacement)i.next();
aroomIds.add(room.getId());
aroomNames.add(room.getName());
aroomPrefs.add(x.getRoomPlacements().size()==p.getRoomPlacements().size()?PreferenceLevel.sIntLevelRequired:room.getPenalty(p.getPeriod()));
}
CBSAssignment a = new CBSAssignment(con,
x.getId(),
x.getName(),
pr,
p.getPeriod().getId(),
p.getPeriod().getDayStr()+" "+p.getPeriod().getTimeStr(),
(x.getPeriodPlacements().size()==1?PreferenceLevel.sIntLevelRequired:p.getPeriodPlacement().getPenalty()),
aroomIds,
aroomNames,
aroomPrefs);
con.assignments().add(a);
a.incCounter((int)ass.getCounter(0));
}
}
}
}
public void load(Element root) {
int version = Integer.parseInt(root.attributeValue("version"));
if (version==sVersion) {
iVariables.clear();
for (Iterator i1=root.elementIterator("var");i1.hasNext();) {
CBSVariable var = new CBSVariable((Element)i1.next());
iVariables.put(Long.valueOf(var.getId()),var);
}
}
}
public void save(Element root) {
root.addAttribute("version", String.valueOf(sVersion));
for (Iterator i1=iVariables.values().iterator();i1.hasNext();) {
((CBSVariable)i1.next()).save(root.addElement("var"));
}
}
public static interface Counter {
public int getCounter();
public void incCounter(int value);
}
public static class CBSVariable implements Counter, Comparable, Serializable {
private static final long serialVersionUID = 1L;
int iCounter = 0;
long iExamId;
String iName;
HashSet iValues = new HashSet();
CBSConstraint iConstraint = null;
String iPref = null;
CBSVariable(long examId, String name, String pref) {
iExamId = examId;
iName = name;
iPref = pref;
}
CBSVariable(CBSConstraint constraint, long classId, String examId, String pref) {
iConstraint = constraint;
iExamId = classId;
iName = examId;
iPref = pref;
}
CBSVariable(Element element) {
iExamId = Long.parseLong(element.attributeValue("exam"));
iName = element.attributeValue("name");
iPref = element.attributeValue("pref");
for (Iterator i=element.elementIterator("val");i.hasNext();)
iValues.add(new CBSValue(this,(Element)i.next()));
}
public long getId() { return iExamId; }
public int getCounter() { return iCounter; }
public String getName() { return iName; }
public String getPref() { return iPref; }
public void incCounter(int value) {
iCounter+=value;
if (iConstraint!=null) iConstraint.incCounter(value);
}
public Set values() { return iValues; }
public int hashCode() {
return (Long.valueOf(iExamId)).hashCode();
}
public boolean equals(Object o) {
if (o==null || !(o instanceof CBSVariable)) return false;
return ((CBSVariable)o).getId()==getId();
}
public int compareTo(Object o) {
if (o==null || !(o instanceof CBSVariable)) return -1;
int ret = -(Integer.valueOf(iCounter)).compareTo(Integer.valueOf(((CBSVariable)o).getCounter()));
if (ret!=0) return ret;
return toString().compareTo(o.toString());
}
public String toString() {
return iName;
}
public void save(Element element) {
element.addAttribute("exam",String.valueOf(iExamId));
element.addAttribute("name", iName);
if (iPref!=null)
element.addAttribute("pref", iPref);
for (Iterator i=iValues.iterator();i.hasNext();)
((CBSValue)i.next()).save(element.addElement("val"));
}
}
public static class CBSValue implements Counter, Comparable, Serializable {
private static final long serialVersionUID = 1L;
int iCounter = 0;
Long iPeriodId;
String iPeriodName;
int iPeriodPref;
Vector iRoomIds;
String iInstructorName = null;
Vector iRoomNames;
Vector iRoomPrefs;
CBSVariable iVariable = null;
HashSet iConstraints = new HashSet();
HashSet iAssignments = new HashSet();
int iLength;
CBSValue(CBSVariable var, Long periodId, String periodName, int periodPref, Vector roomIds, Vector roomNames, Vector roomPrefs) {
iVariable = var; iRoomIds = roomIds; iRoomNames = roomNames; iRoomPrefs = roomPrefs;
iPeriodId = periodId; iPeriodName = periodName; iPeriodPref = periodPref;
}
CBSValue(CBSVariable var, Element element) {
iVariable = var;
iPeriodId = Long.valueOf(element.attributeValue("period"));
iPeriodName = element.attributeValue("name");
iPeriodPref = Integer.parseInt(element.attributeValue("pref"));
iRoomIds = new Vector();
iRoomNames = new Vector();
iRoomPrefs = new Vector();
for (Iterator i=element.elementIterator("room");i.hasNext();) {
Element r = (Element)i.next();
iRoomIds.addElement(Integer.valueOf(r.attributeValue("id")));
iRoomNames.addElement(r.attributeValue("name"));
iRoomPrefs.addElement(Integer.valueOf(r.attributeValue("pref")));
}
for (Iterator i=element.elementIterator("cons");i.hasNext();)
iConstraints.add(new CBSConstraint(this,(Element)i.next()));
}
public CBSVariable variable() { return iVariable; }
public Long getPeriodId() { return iPeriodId; }
public String getPeriodName() { return iPeriodName; }
public int getPeriodPref() { return iPeriodPref; }
public Vector getRoomNames() { return iRoomNames; }
public Vector getRoomPrefs() { return iRoomPrefs; }
public String toString() {
return iPeriodName+" "+iRoomNames;
}
public int getCounter() { return iCounter; }
public void incCounter(int value) {
iCounter+=value;
if (iVariable!=null) iVariable.incCounter(value);
}
public Vector getRoomIds() {
return iRoomIds;
}
public Set constraints() { return iConstraints; }
public Set assignments() { return iAssignments; }
public int hashCode() {
return combine(iPeriodId.hashCode(), (iRoomIds==null?0:iRoomIds.hashCode()));
}
public boolean equals(Object o) {
if (o==null || !(o instanceof CBSValue)) return false;
CBSValue v = (CBSValue)o;
return v.getRoomIds().equals(getRoomIds()) && v.getPeriodId().equals(getPeriodId());
}
public int compareTo(Object o) {
if (o==null || !(o instanceof CBSValue)) return -1;
int ret = -(Integer.valueOf(iCounter)).compareTo(Integer.valueOf(((CBSValue)o).getCounter()));
if (ret!=0) return ret;
return toString().compareTo(o.toString());
}
public void save(Element element) {
element.addAttribute("period",String.valueOf(iPeriodId));
element.addAttribute("pref",String.valueOf(iPeriodPref));
element.addAttribute("name", iPeriodName);
for (int i=0;i<iRoomIds.size();i++) {
Element r = element.addElement("room");
r.addAttribute("id",iRoomIds.elementAt(i).toString());
r.addAttribute("name",iRoomNames.elementAt(i).toString());
r.addAttribute("pref",iRoomPrefs.elementAt(i).toString());
}
for (Iterator i=iConstraints.iterator();i.hasNext();)
((CBSConstraint)i.next()).save(element.addElement("cons"));
}
}
public static class CBSConstraint implements Counter, Comparable, Serializable {
private static final long serialVersionUID = 1L;
CBSValue iValue;
int iCounter = 0;
long iId;
String iName = null;
int iType;
HashSet iAssignments = new HashSet();
HashSet iVariables = new HashSet();
String iPref;
CBSConstraint(int type, long id, String name, String pref) {
iId = id;
iType = type;
iName = name;
iPref = pref;
}
CBSConstraint(CBSValue value, int type, long id, String name, String pref) {
iId = id;
iType = type;
iValue = value;
iName = name;
iPref = pref;
}
CBSConstraint(CBSValue value, Element element) {
iValue = value;
iId = Integer.parseInt(element.attributeValue("id"));
iType = Integer.parseInt(element.attributeValue("type"));
iName = element.attributeValue("name");
iPref = element.attributeValue("pref");
for (Iterator i=element.elementIterator("nogood");i.hasNext();)
iAssignments.add(new CBSAssignment(this,(Element)i.next()));
}
public long getId() { return iId; }
public int getType() { return iType; }
public String getName() { return iName; }
public CBSValue value() { return iValue; }
public Set variables() { return iVariables; }
public Set assignments() { return iAssignments; }
public String getPref() { return iPref; }
public int getCounter() { return iCounter; }
public void incCounter(int value) {
iCounter+=value;
if (iValue!=null) iValue.incCounter(value);
}
public int hashCode() {
return combine((int)iId,iType);
}
public boolean equals(Object o) {
if (o==null || !(o instanceof CBSConstraint)) return false;
CBSConstraint c = (CBSConstraint)o;
return c.getId()==getId() && c.getType()==getType();
}
public int compareTo(Object o) {
if (o==null || !(o instanceof CBSConstraint)) return -1;
int ret = -(Integer.valueOf(iCounter)).compareTo(Integer.valueOf(((CBSConstraint)o).getCounter()));
if (ret!=0) return ret;
return toString().compareTo(o.toString());
}
public void save(Element element) {
element.addAttribute("id",String.valueOf(iId));
element.addAttribute("type",String.valueOf(iType));
if (iName!=null)
element.addAttribute("name", iName);
if (iPref!=null)
element.addAttribute("pref", iPref);
for (Iterator i=iAssignments.iterator();i.hasNext();)
((CBSAssignment)i.next()).save(element.addElement("nogood"));
}
}
public static class CBSAssignment implements Counter, Comparable, Serializable {
private static final long serialVersionUID = 1L;
CBSConstraint iConstraint;
Long iExamId;
String iExamName;
String iExamPref;
Long iPeriodId;
String iPeriodName;
int iPeriodPref;
int iCounter = 0;
Vector iRoomIds;
Vector iRoomPrefs;
Vector iRoomNames;
CBSAssignment(CBSConstraint constraint, Long examId, String examName, String examPref, Long periodId, String periodName, int periodPref, Vector roomIds, Vector roomNames, Vector roomPrefs) {
iExamId = examId; iExamName = examName; iExamPref = examPref;
iPeriodId = periodId; iPeriodName = periodName; iPeriodPref = periodPref;
iRoomIds = roomIds; iRoomNames = roomNames; iRoomPrefs = roomPrefs;
iConstraint = constraint;
}
CBSAssignment(CBSConstraint constraint, Element element) {
iConstraint = constraint;
iExamId = Long.valueOf(element.attributeValue("exam"));
iExamName = element.attributeValue("name");
iExamPref = element.attributeValue("pref");
iRoomIds = new Vector();
iRoomNames = new Vector();
iRoomPrefs = new Vector();
for (Iterator i=element.elementIterator("room");i.hasNext();) {
Element r = (Element)i.next();
iRoomIds.addElement(Integer.valueOf(r.attributeValue("id")));
iRoomNames.addElement(r.attributeValue("name"));
iRoomPrefs.addElement(Integer.valueOf(r.attributeValue("pref")));
}
iPeriodId = Long.valueOf(element.attributeValue("period"));
iPeriodName = element.attributeValue("periodName");
iPeriodPref = Integer.parseInt(element.attributeValue("periodPref"));
incCounter(Integer.parseInt(element.attributeValue("cnt")));
}
public Long getId() { return iExamId; }
public CBSConstraint getConstraint() { return iConstraint; }
public String getName() { return iExamName; }
public String getPref() { return iExamPref; }
public Long getPeriodId() { return iPeriodId; }
public String getPeriodName() { return iPeriodName; }
public int getPeriodPref() { return iPeriodPref; }
public String toString() {
return iExamName+" "+iPeriodName+" "+iRoomNames;
}
public Vector getRoomNames() { return iRoomNames; }
public Vector getRoomIds() {
return iRoomIds;
}
public Vector getRoomPrefs() { return iRoomPrefs; }
public int hashCode() {
return combine(iExamId.hashCode(),combine(iRoomIds.hashCode(),iPeriodId.hashCode()));
}
public int getCounter() { return iCounter; }
public void incCounter(int value) {
iCounter+=value;
if (iConstraint!=null) iConstraint.incCounter(value);
}
public boolean equals(Object o) {
if (o==null || !(o instanceof CBSAssignment)) return false;
CBSAssignment a = (CBSAssignment)o;
return a.getId().equals(getId()) && a.getRoomIds().equals(getRoomIds()) && a.getPeriodId().equals(getPeriodId());
}
public int compareTo(Object o) {
if (o==null || !(o instanceof CBSAssignment)) return -1;
int ret = -(Integer.valueOf(iCounter)).compareTo(Integer.valueOf(((CBSAssignment)o).getCounter()));
if (ret!=0) return ret;
return toString().compareTo(o.toString());
}
public void save(Element element) {
element.addAttribute("exam",String.valueOf(iExamId));
element.addAttribute("name",iExamName);
element.addAttribute("pref",iExamPref);
for (int i=0;i<iRoomIds.size();i++) {
Element r = element.addElement("room");
r.addAttribute("id",iRoomIds.elementAt(i).toString());
r.addAttribute("name",iRoomNames.elementAt(i).toString());
r.addAttribute("pref",iRoomPrefs.elementAt(i).toString());
}
element.addAttribute("period", String.valueOf(iPeriodId));
element.addAttribute("periodName", iPeriodName);
element.addAttribute("periodPref", String.valueOf(iPeriodPref));
element.addAttribute("cnt", String.valueOf(iCounter));
}
}
private static int combine(int a, int b) {
int ret = 0;
for (int i=0;i<15;i++) ret = ret | ((a & (1<<i))<<i) | ((b & (1<<i))<<(i+1));
return ret;
}
//--------- toHtml -------------------------------------------------
private static String IMG_BASE = "images/";
private static String IMG_EXPAND = IMG_BASE+"expand_node_btn.gif";
private static String IMG_COLLAPSE = IMG_BASE+"collapse_node_btn.gif";
private static String IMG_LEAF = IMG_BASE+"end_node_btn.gif";
public static int TYPE_VARIABLE_BASED = 0;
public static int TYPE_CONSTRAINT_BASED = 1;
private void menu_item(PrintWriter out, String id, String name, String description, String page, boolean isCollapsed) {
out.println("<div style=\"margin-left:5px;\">");
out.println("<A style=\"border:0;background:0\" id=\"__idMenu"+id+"\" href=\"javascript:toggle('"+id+"')\" name=\""+name+"\">");
out.println("<img id=\"__idMenuImg"+id+"\" border=\"0\" src=\""+(isCollapsed ? IMG_EXPAND : IMG_COLLAPSE)+"\" align=\"absmiddle\"></A>");
out.println(" <A class='noFancyLinks' target=\"__idContentFrame\" "+(page == null ? "" : page+" onmouseover=\"this.style.cursor='hand';this.style.cursor='pointer';\" ")+"title=\""+(description == null ? "" : description)+"\" >"+ name+(description == null?"":" <font color='gray'>[" + description + "]</font>")+"</A><br>");
out.println("</div>");
out.println("<div ID=\"__idMenuDiv"+id+"\" style=\"display:"+(isCollapsed ? "none" : "block")+";position:relative;margin-left:18px;\">");
}
private void leaf_item(PrintWriter out, String name, String description, String page) {
out.println("<div style=\"margin-left:5px;\">");
out.println("<img border=\"0\" src=\""+IMG_LEAF+"\" align=\"absmiddle\">");
out.println(" <A class='noFancyLinks' target=\"__idContentFrame\" "+(page == null ? "" : page + " onmouseover=\"this.style.cursor='hand';this.style.cursor='pointer';\" ")+"title=\""+(description == null ? "" : description)+"\" >"+name+(description == null ? "" : " <font color='gray'>[" + description + "]</font>")+"</A><br>");
out.println("</div>");
}
private void end_item(PrintWriter out) {
out.println("</div>");
}
private void unassignedVariableMenuItem(PrintWriter out, String menuId, CBSVariable variable, boolean clickable) {
String name =
"<font color='"+PreferenceLevel.prolog2color(variable.getPref())+"'>"+
variable.getName()+
"</font>";
String description = null;
String onClick = null;
if (clickable)
onClick = "onclick=\"(parent ? parent : window).showGwtDialog('Examination Assignment', 'examInfo.do?examId="+variable.getId()+"&op=Reset','900','90%');\"";
menu_item(out, menuId, variable.getCounter() + "× " + name, description, onClick, true);
}
private void unassignmentMenuItem(PrintWriter out, String menuId, CBSValue value, boolean clickable) {
String name =
"<font color='"+PreferenceLevel.int2color(value.getPeriodPref())+"'>"+
value.getPeriodName()+
"</font> ";
String roomLink = "";
for (int i=0;i<value.getRoomIds().size();i++) {
name += (i>0?", ":"")+"<font color='"+PreferenceLevel.int2color(((Integer)value.getRoomPrefs().elementAt(i)).intValue())+"'>"+ value.getRoomNames().elementAt(i)+"</font>";
roomLink += (i>0?":":"")+value.getRoomIds().elementAt(i);
}
String description = null;
String onClick = null;
if (clickable)
onClick = "onclick=\"(parent ? parent : window).showGwtDialog('Examination Assignment', 'examInfo.do?examId="+value.variable().getId()+"&period="+value.getPeriodId()+"&room="+roomLink+"&op=Try&reset=1','900','90%');\"";
menu_item(out, menuId, value.getCounter() + "× " + name, description, onClick, true);
}
private void constraintMenuItem(PrintWriter out, String menuId, CBSConstraint constraint, boolean clickable) {
String name = "<font color='"+PreferenceLevel.prolog2color(constraint.getPref())+"'>";
String link = null;
switch (constraint.getType()) {
case sConstraintTypeGroup :
name += "Distribution "+constraint.getName();
break;
case sConstraintTypeInstructor :
name += "Instructor "+constraint.getName();
if (clickable) link = "examGrid.do?filter="+constraint.getName()+"&resource="+ExamGridTable.sResourceInstructor+"&op=Cbs";
break;
case sConstraintTypeRoom :
name += "Room "+constraint.getName();
if (clickable) link = "examGrid.do?filter="+constraint.getName()+"&resource="+ExamGridTable.sResourceRoom+"&op=Cbs";
break;
case sConstraintTypeStudent :
name += "Student "+constraint.getName();
break;
default :
name += (constraint.getName()==null?"Unknown":constraint.getName());
}
name += "</font>";
String description = null;
String onClick = null;
if (link!=null)
onClick = "href=\""+link+"\"";
menu_item(out, menuId, constraint.getCounter() + "× " + name, description, onClick, true);
}
private void assignmentLeafItem(PrintWriter out, CBSAssignment assignment, boolean clickable) {
String name =
"<font color='"+PreferenceLevel.prolog2color(assignment.getPref())+"'>"+
assignment.getName()+
"</font> ← "+
"<font color='"+PreferenceLevel.int2color(assignment.getPeriodPref())+"'>"+
assignment.getPeriodName()+
"</font> ";
String roomLink = "";
for (int i=0;i<assignment.getRoomIds().size();i++) {
name += (i>0?", ":"")+"<font color='"+PreferenceLevel.int2color(((Integer)assignment.getRoomPrefs().elementAt(i)).intValue())+"'>"+ assignment.getRoomNames().elementAt(i)+"</font>";
roomLink += (i>0?":":"")+assignment.getRoomIds().elementAt(i);
}
String onClick = null;
if (clickable)
onClick = "onclick=\"(parent ? parent : window).showGwtDialog('Examination Assignment', 'examInfo.do?examId="+assignment.getId()+"&period="+assignment.getPeriodId()+"&room="+roomLink+"&op=Try&reset=1','900','90%');\"";
leaf_item(out, assignment.getCounter()+"× "+name, null, onClick);
}
public static void printHtmlHeader(JspWriter jsp) {
PrintWriter out = new PrintWriter(jsp);
printHtmlHeader(out, false);
}
public static void printHtmlHeader(PrintWriter out, boolean style) {
if (style) {
out.println("<style type=\"text/css\">");
out.println("<!--");
out.println("A:link { color: blue; text-decoration: none; border:0; background:0; }");
out.println("A:visited { color: blue; text-decoration: none; border:0; background:0; }");
out.println("A:active { color: blue; text-decoration: none; border:0; background:0; }");
out.println("A:hover { color: blue; text-decoration: none; border:0; background:0; }");
out.println(".TextBody { background-color: white; color:black; font-size: 12px; }");
out.println(".WelcomeHead { color: black; margin-top: 0px; margin-left: 0px; font-weight: bold; text-align: right; font-size: 30px; font-family: Comic Sans MS}");
out.println("-->");
out.println("</style>");
out.println();
}
out.println("<script language=\"javascript\" type=\"text/javascript\">");
out.println("function toggle(item) {");
out.println(" obj=document.getElementById(\"__idMenuDiv\"+item);");
out.println(" visible=(obj.style.display!=\"none\");");
out.println(" img=document.getElementById(\"__idMenuImg\" + item);");
out.println(" menu=document.getElementById(\"__idMenu\" + item);");
out.println(" if (visible) {obj.style.display=\"none\";img.src=\""+IMG_EXPAND+"\";}");
out.println(" else {obj.style.display=\"block\";img.src=\""+IMG_COLLAPSE+"\";}");
out.println("}");
out.println("</script>");
out.flush();
}
private Vector filter(Collection counters, double limit) {
Vector cnt = new Vector(counters);
Collections.sort(cnt);
int total = 0;
for (Enumeration e=cnt.elements();e.hasMoreElements();)
total += ((Counter)e.nextElement()).getCounter();
int totalLimit = (int)Math.ceil(limit*total);
int current = 0;
Vector ret = new Vector();
for (Enumeration e=cnt.elements();e.hasMoreElements();) {
Counter c = (Counter)e.nextElement();
ret.addElement(c);
current += c.getCounter();
if (current>=totalLimit) break;
}
return ret;
}
/** Print conflict-based statistics in HTML format */
public void printHtml(JspWriter jsp, double limit, int type, boolean clickable) {
printHtml(jsp, null, new double[] {limit,limit,limit,limit}, type, clickable);
}
/** Print conflict-based statistics in HTML format */
public void printHtml(PrintWriter out, double limit, int type, boolean clickable) {
printHtml(out, null, new double[] {limit,limit,limit,limit}, type, clickable);
}
/** Print conflict-based statistics in HTML format */
public void printHtml(JspWriter jsp, double[] limit, int type, boolean clickable) {
printHtml(jsp, null, limit, type, clickable);
}
/** Print conflict-based statistics in HTML format */
public void printHtml(PrintWriter out, double[] limit, int type, boolean clickable) {
printHtml(out, null, limit, type, clickable);
}
/** Print conflict-based statistics in HTML format */
public void printHtml(JspWriter jsp, Long classId, double limit, int type, boolean clickable) {
printHtml(jsp, classId, new double[] {limit,limit,limit,limit}, type, clickable);
}
/** Print conflict-based statistics in HTML format */
public void printHtml(PrintWriter out, Long classId, double limit, int type, boolean clickable) {
printHtml(out, classId, new double[] {limit,limit,limit,limit}, type, clickable);
}
/** Print conflict-based statistics in HTML format */
public void printHtml(JspWriter jsp, Long classId, double[] limit, int type, boolean clickable) {
PrintWriter out = new PrintWriter(jsp);
printHtml(out, classId, limit, type, clickable);
}
/** Print conflict-based statistics in HTML format */
public void printHtml(PrintWriter out, Long classId, double[] limit, int type, boolean clickable) {
if (type == TYPE_VARIABLE_BASED) {
Vector vars = filter(iVariables.values(), limit[0]);
if (classId!=null) {
CBSVariable var = (CBSVariable)iVariables.get(classId);
vars.clear();
if (var!=null) vars.add(var);
}
for (Enumeration e1 = vars.elements(); e1.hasMoreElements();) {
CBSVariable variable = (CBSVariable)e1.nextElement();
String m1 = String.valueOf(variable.getId());
if (classId==null)
unassignedVariableMenuItem(out,m1,variable, clickable);
Vector vals = filter(variable.values(), limit[1]);
int id = 0;
for (Enumeration e2 = vals.elements();e2.hasMoreElements();) {
CBSValue value = (CBSValue)e2.nextElement();
String m2 = m1+"."+(id++);
unassignmentMenuItem(out,m2,value, clickable);
Vector constraints =filter(value.constraints(),limit[2]);
for (Enumeration e3 = constraints.elements(); e3.hasMoreElements();) {
CBSConstraint constraint = (CBSConstraint)e3.nextElement();
String m3 = m2 + constraint.getType()+"."+constraint.getId();
constraintMenuItem(out,m3,constraint, clickable);
Vector assignments = filter(constraint.assignments(),limit[3]);
for (Enumeration e4 = assignments.elements();e4.hasMoreElements();) {
CBSAssignment assignment = (CBSAssignment)e4.nextElement();
assignmentLeafItem(out, assignment, clickable);
}
end_item(out);
}
end_item(out);
}
end_item(out);
}
} else if (type == TYPE_CONSTRAINT_BASED) {
Hashtable constraints = new Hashtable();
for (Enumeration e1 = iVariables.elements(); e1.hasMoreElements();) {
CBSVariable variable = (CBSVariable)e1.nextElement();
if (classId!=null && classId.longValue()!=variable.getId())
continue;
for (Iterator e2=variable.values().iterator();e2.hasNext();) {
CBSValue value = (CBSValue)e2.next();
for (Iterator e3=value.constraints().iterator();e3.hasNext();) {
CBSConstraint constraint = (CBSConstraint)e3.next();
CBSConstraint xConstraint = (CBSConstraint)constraints.get(constraint.getType()+"."+constraint.getId());
if (xConstraint==null) {
xConstraint = new CBSConstraint(constraint.getType(),constraint.getId(),constraint.getName(),constraint.getPref());
constraints.put(constraint.getType()+"."+constraint.getId(),xConstraint);
}
CBSVariable xVariable = null;
for (Iterator i=xConstraint.variables().iterator();i.hasNext();) {
CBSVariable v = (CBSVariable)i.next();
if (v.getId()==variable.getId()) {
xVariable = v; break;
}
}
if (xVariable==null) {
xVariable = new CBSVariable(xConstraint,variable.getId(),variable.getName(),variable.getPref());
xConstraint.variables().add(xVariable);
}
CBSValue xValue = new CBSValue(xVariable,
value.getPeriodId(), value.getPeriodName(), value.getPeriodPref(),
value.getRoomIds(), value.getRoomNames(), value.getRoomPrefs());
xVariable.values().add(xValue);
for (Iterator e4=constraint.assignments().iterator();e4.hasNext();) {
CBSAssignment assignment = (CBSAssignment)e4.next();
xValue.assignments().add(assignment);
xValue.incCounter(assignment.getCounter());
}
}
}
}
Vector consts = filter(constraints.values(), limit[0]);
for (Enumeration e1 = consts.elements(); e1.hasMoreElements();) {
CBSConstraint constraint = (CBSConstraint)e1.nextElement();
String m1 = constraint.getType()+"."+constraint.getId();
constraintMenuItem(out,m1,constraint, clickable);
Vector variables = filter(constraint.variables(), limit[1]);
Collections.sort(variables);
for (Enumeration e2 = variables.elements(); e2.hasMoreElements();) {
CBSVariable variable = (CBSVariable)e2.nextElement();
String m2 = m1+"."+variable.getId();
if (classId==null)
unassignedVariableMenuItem(out,m2,variable, clickable);
Vector vals = filter(variable.values(), limit[2]);
int id = 0;
for (Enumeration e3 = vals.elements();e3.hasMoreElements();) {
CBSValue value = (CBSValue)e3.nextElement();
String m3 = m2+"."+(id++);
unassignmentMenuItem(out,m3,value, clickable);
Vector assignments = filter(value.assignments(), limit[3]);
for (Enumeration e4 = assignments.elements();e4.hasMoreElements();) {
CBSAssignment assignment = (CBSAssignment)e4.nextElement();
assignmentLeafItem(out, assignment, clickable);
}
end_item(out);
}
if (classId==null)
end_item(out);
}
end_item(out);
}
}
out.flush();
}
public boolean saveToFile() {
return true;
}
}
| UniTime/unitime | JavaSource/org/unitime/timetable/solver/exam/ui/ExamConflictStatisticsInfo.java | Java | apache-2.0 | 36,624 |
package no.nb.nna.veidemann.chrome.client.ws;
import no.nb.nna.veidemann.chrome.client.ws.GetBrowserVersionCmd.Response;
public class GetBrowserVersionCmd extends Command<Response> {
public GetBrowserVersionCmd(Cdp client) {
super(client, "Browser", "getVersion", Response.class);
}
public static class Response {
private String protocolVersion;
private String product;
private String revision;
private String userAgent;
private String jsVersion;
/**
* Protocol version.
*/
public String protocolVersion() {
return protocolVersion;
}
/**
* Product name.
*/
public String product() {
return product;
}
/**
* Product revision.
*/
public String revision() {
return revision;
}
/**
* User-Agent.
*/
public String userAgent() {
return userAgent;
}
/**
* V8 version.
*/
public String jsVersion() {
return jsVersion;
}
public String toString() {
return "Version{protocolVersion=" + protocolVersion + ", product=" + product + ", revision=" + revision + ", userAgent=" + userAgent + ", jsVersion=" + jsVersion + "}";
}
}
}
| nlnwa/broprox | veidemann-chrome-client/src/main/java/no/nb/nna/veidemann/chrome/client/ws/GetBrowserVersionCmd.java | Java | apache-2.0 | 1,385 |
package fr.javatronic.blog.massive.annotation1;
import fr.javatronic.blog.processor.Annotation_001;
@Annotation_001
public class Class_914 {
}
| lesaint/experimenting-annotation-processing | experimenting-rounds/massive-count-of-annotated-classes/src/main/java/fr/javatronic/blog/massive/annotation1/Class_914.java | Java | apache-2.0 | 145 |
package org.wikipedia.concurrency;
// Copied from Android 4.4.2_r2 source
// so we can use executeOnExecutor :P
//
// https://android.googlesource.com/platform/frameworks/base/+/android-4.4.2_r2/core/java/android/os/AsyncTask.java
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import android.os.Handler;
import android.os.Message;
import android.os.Process;
import android.support.annotation.NonNull;
import java.util.ArrayDeque;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Callable;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.FutureTask;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
/**
* <p>AsyncTask enables proper and easy use of the UI thread. This class allows to
* perform background operations and publish results on the UI thread without
* having to manipulate threads and/or handlers.</p>
*
* <p>AsyncTask is designed to be a helper class around {@link Thread} and {@link Handler}
* and does not constitute a generic threading framework. AsyncTasks should ideally be
* used for short operations (a few seconds at the most.) If you need to keep threads
* running for long periods of time, it is highly recommended you use the various APIs
* provided by the <code>java.util.concurrent</code> pacakge such as {@link Executor},
* {@link ThreadPoolExecutor} and {@link FutureTask}.</p>
*
* <p>An asynchronous task is defined by a computation that runs on a background thread and
* whose result is published on the UI thread. An asynchronous task is defined by 3 generic
* types, called <code>Params</code>, <code>Progress</code> and <code>Result</code>,
* and 4 steps, called <code>onPreExecute</code>, <code>doInBackground</code>,
* <code>onProgressUpdate</code> and <code>onPostExecute</code>.</p>
*
* <div class="special reference">
* <h3>Developer Guides</h3>
* <p>For more information about using tasks and threads, read the
* <a href="{@docRoot}guide/topics/fundamentals/processes-and-threads.html">Processes and
* Threads</a> developer guide.</p>
* </div>
*
* <h2>Usage</h2>
* <p>AsyncTask must be subclassed to be used. The subclass will override at least
* one method ({@link #doInBackground}), and most often will override a
* second one ({@link #onPostExecute}.)</p>
*
* <p>Here is an example of subclassing:</p>
* <pre class="prettyprint">
* private class DownloadFilesTask extends AsyncTask<URL, Integer, Long> {
* protected Long doInBackground(URL... urls) {
* int count = urls.length;
* long totalSize = 0;
* for (int i = 0; i < count; i++) {
* totalSize += Downloader.downloadFile(urls[i]);
* publishProgress((int) ((i / (float) count) * 100));
* // Escape early if cancel() is called
* if (isCancelled()) break;
* }
* return totalSize;
* }
*
* protected void onProgressUpdate(Integer... progress) {
* setProgressPercent(progress[0]);
* }
*
* protected void onPostExecute(Long result) {
* showDialog("Downloaded " + result + " bytes");
* }
* }
* </pre>
*
* <p>Once created, a task is executed very simply:</p>
* <pre class="prettyprint">
* new DownloadFilesTask().execute(url1, url2, url3);
* </pre>
*
* <h2>AsyncTask's generic types</h2>
* <p>The three types used by an asynchronous task are the following:</p>
* <ol>
* <li><code>Params</code>, the type of the parameters sent to the task upon
* execution.</li>
* <li><code>Progress</code>, the type of the progress units published during
* the background computation.</li>
* <li><code>Result</code>, the type of the result of the background
* computation.</li>
* </ol>
* <p>Not all types are always used by an asynchronous task. To mark a type as unused,
* simply use the type {@link Void}:</p>
* <pre>
* private class MyTask extends AsyncTask<Void, Void, Void> { ... }
* </pre>
*
* <h2>The 4 steps</h2>
* <p>When an asynchronous task is executed, the task goes through 4 steps:</p>
* <ol>
* <li>{@link #onPreExecute()}, invoked on the UI thread before the task
* is executed. This step is normally used to setup the task, for instance by
* showing a progress bar in the user interface.</li>
* <li>{@link #doInBackground}, invoked on the background thread
* immediately after {@link #onPreExecute()} finishes executing. This step is used
* to perform background computation that can take a long time. The parameters
* of the asynchronous task are passed to this step. The result of the computation must
* be returned by this step and will be passed back to the last step. This step
* can also use {@link #publishProgress} to publish one or more units
* of progress. These values are published on the UI thread, in the
* {@link #onProgressUpdate} step.</li>
* <li>{@link #onProgressUpdate}, invoked on the UI thread after a
* call to {@link #publishProgress}. The timing of the execution is
* undefined. This method is used to display any form of progress in the user
* interface while the background computation is still executing. For instance,
* it can be used to animate a progress bar or show logs in a text field.</li>
* <li>{@link #onPostExecute}, invoked on the UI thread after the background
* computation finishes. The result of the background computation is passed to
* this step as a parameter.</li>
* </ol>
*
* <h2>Cancelling a task</h2>
* <p>A task can be cancelled at any time by invoking {@link #cancel(boolean)}. Invoking
* this method will cause subsequent calls to {@link #isCancelled()} to return true.
* After invoking this method, {@link #onCancelled(Object)}, instead of
* {@link #onPostExecute(Object)} will be invoked after {@link #doInBackground(Object[])}
* returns. To ensure that a task is cancelled as quickly as possible, you should always
* check the return value of {@link #isCancelled()} periodically from
* {@link #doInBackground(Object[])}, if possible (inside a loop for instance.)</p>
*
* <h2>Threading rules</h2>
* <p>There are a few threading rules that must be followed for this class to
* work properly:</p>
* <ul>
* <li>The AsyncTask class must be loaded on the UI thread. This is done
* automatically as of {@link android.os.Build.VERSION_CODES#JELLY_BEAN}.</li>
* <li>The task instance must be created on the UI thread.</li>
* <li>{@link #execute} must be invoked on the UI thread.</li>
* <li>Do not call {@link #onPreExecute()}, {@link #onPostExecute},
* {@link #doInBackground}, {@link #onProgressUpdate} manually.</li>
* <li>The task can be executed only once (an exception will be thrown if
* a second execution is attempted.)</li>
* </ul>
*
* <h2>Memory observability</h2>
* <p>AsyncTask guarantees that all callback calls are synchronized in such a way that the following
* operations are safe without explicit synchronizations.</p>
* <ul>
* <li>Set member fields in the constructor or {@link #onPreExecute}, and refer to them
* in {@link #doInBackground}.
* <li>Set member fields in {@link #doInBackground}, and refer to them in
* {@link #onProgressUpdate} and {@link #onPostExecute}.
* </ul>
*
* <h2>Order of execution</h2>
* <p>When first introduced, AsyncTasks were executed serially on a single background
* thread. Starting with {@link android.os.Build.VERSION_CODES#DONUT}, this was changed
* to a pool of threads allowing multiple tasks to operate in parallel. Starting with
* {@link android.os.Build.VERSION_CODES#HONEYCOMB}, tasks are executed on a single
* thread to avoid common application errors caused by parallel execution.</p>
* <p>If you truly want parallel execution, you can invoke
* {@link #executeOnExecutor(java.util.concurrent.Executor, Object[])} with
* {@link #THREAD_POOL_EXECUTOR}.</p>
*/
public abstract class AsyncTask<Params, Progress, Result> {
private static final String LOG_TAG = "AsyncTask";
private static final int CPU_COUNT = Runtime.getRuntime().availableProcessors();
private static final int CORE_POOL_SIZE = CPU_COUNT + 1;
private static final int MAXIMUM_POOL_SIZE = CPU_COUNT * 2 + 1;
private static final int KEEP_ALIVE = 1;
private static final ThreadFactory sThreadFactory = new ThreadFactory() {
private final AtomicInteger mCount = new AtomicInteger(1);
public Thread newThread(@NonNull Runnable r) {
return new Thread(r, "AsyncTask #" + mCount.getAndIncrement());
}
};
private static final BlockingQueue<Runnable> sPoolWorkQueue = new LinkedBlockingQueue<>(128);
/**
* An {@link Executor} that can be used to execute tasks in parallel.
*/
public static final Executor THREAD_POOL_EXECUTOR
= new ThreadPoolExecutor(CORE_POOL_SIZE, MAXIMUM_POOL_SIZE, KEEP_ALIVE,
TimeUnit.SECONDS, sPoolWorkQueue, sThreadFactory);
/**
* An {@link Executor} that executes tasks one at a time in serial
* order. This serialization is global to a particular process.
*/
public static final Executor SERIAL_EXECUTOR = new SerialExecutor();
private static final int MESSAGE_POST_RESULT = 0x1;
private static final int MESSAGE_POST_PROGRESS = 0x2;
private static final InternalHandler sHandler = new InternalHandler();
private static volatile Executor sDefaultExecutor = SERIAL_EXECUTOR;
private final WorkerRunnable<Params, Result> mWorker;
private final FutureTask<Result> mFuture;
private volatile Status mStatus = Status.PENDING;
private final AtomicBoolean mCancelled = new AtomicBoolean();
private final AtomicBoolean mTaskInvoked = new AtomicBoolean();
private static class SerialExecutor implements Executor {
final ArrayDeque<Runnable> mTasks = new ArrayDeque<>();
Runnable mActive;
public synchronized void execute(@NonNull final Runnable r) {
mTasks.offer(new Runnable() {
public void run() {
try {
r.run();
} finally {
scheduleNext();
}
}
});
if (mActive == null) {
scheduleNext();
}
}
protected synchronized void scheduleNext() {
if ((mActive = mTasks.poll()) != null) {
THREAD_POOL_EXECUTOR.execute(mActive);
}
}
}
/**
* Indicates the current status of the task. Each status will be set only once
* during the lifetime of a task.
*/
public enum Status {
/**
* Indicates that the task has not been executed yet.
*/
PENDING,
/**
* Indicates that the task is running.
*/
RUNNING,
/**
* Indicates that {@link AsyncTask#onPostExecute} has finished.
*/
FINISHED,
}
/** @hide Used to force static handler to be created. */
public static void init() {
sHandler.getLooper();
}
/** @hide */
public static void setDefaultExecutor(Executor exec) {
sDefaultExecutor = exec;
}
/**
* Creates a new asynchronous task. This constructor must be invoked on the UI thread.
*/
public AsyncTask() {
mWorker = new WorkerRunnable<Params, Result>() {
public Result call() throws Exception {
mTaskInvoked.set(true);
Process.setThreadPriority(Process.THREAD_PRIORITY_BACKGROUND);
//noinspection unchecked
return postResult(doInBackground(mParams));
}
};
mFuture = new FutureTask<Result>(mWorker) {
@Override
protected void done() {
try {
postResultIfNotInvoked(get());
} catch (InterruptedException e) {
android.util.Log.w(LOG_TAG, e);
} catch (ExecutionException e) {
throw new RuntimeException("An error occured while executing doInBackground()",
e.getCause());
} catch (CancellationException e) {
postResultIfNotInvoked(null);
}
}
};
}
private void postResultIfNotInvoked(Result result) {
final boolean wasTaskInvoked = mTaskInvoked.get();
if (!wasTaskInvoked) {
postResult(result);
}
}
private Result postResult(Result result) {
@SuppressWarnings("unchecked")
Message message = sHandler.obtainMessage(MESSAGE_POST_RESULT,
new AsyncTaskResult<>(this, result));
message.sendToTarget();
return result;
}
/**
* Returns the current status of this task.
*
* @return The current status.
*/
public final Status getStatus() {
return mStatus;
}
/**
* Override this method to perform a computation on a background thread. The
* specified parameters are the parameters passed to {@link #execute}
* by the caller of this task.
*
* This method can call {@link #publishProgress} to publish updates
* on the UI thread.
*
* @param params The parameters of the task.
*
* @return A result, defined by the subclass of this task.
*
* @see #onPreExecute()
* @see #onPostExecute
* @see #publishProgress
*/
protected abstract Result doInBackground(Params... params);
/**
* Runs on the UI thread before {@link #doInBackground}.
*
* @see #onPostExecute
* @see #doInBackground
*/
protected void onPreExecute() {
}
/**
* <p>Runs on the UI thread after {@link #doInBackground}. The
* specified result is the value returned by {@link #doInBackground}.</p>
*
* <p>This method won't be invoked if the task was cancelled.</p>
*
* @param result The result of the operation computed by {@link #doInBackground}.
*
* @see #onPreExecute
* @see #doInBackground
* @see #onCancelled(Object)
*/
@SuppressWarnings({"UnusedDeclaration"})
protected void onPostExecute(Result result) {
}
/**
* Runs on the UI thread after {@link #publishProgress} is invoked.
* The specified values are the values passed to {@link #publishProgress}.
*
* @param values The values indicating progress.
*
* @see #publishProgress
* @see #doInBackground
*/
@SuppressWarnings({"UnusedDeclaration"})
protected void onProgressUpdate(Progress... values) {
}
/**
* <p>Runs on the UI thread after {@link #cancel(boolean)} is invoked and
* {@link #doInBackground(Object[])} has finished.</p>
*
* <p>The default implementation simply invokes {@link #onCancelled()} and
* ignores the result. If you write your own implementation, do not call
* <code>super.onCancelled(result)</code>.</p>
*
* @param result The result, if any, computed in
* {@link #doInBackground(Object[])}, can be null
*
* @see #cancel(boolean)
* @see #isCancelled()
*/
@SuppressWarnings({"UnusedParameters"})
protected void onCancelled(Result result) {
onCancelled();
}
/**
* <p>Applications should preferably override {@link #onCancelled(Object)}.
* This method is invoked by the default implementation of
* {@link #onCancelled(Object)}.</p>
*
* <p>Runs on the UI thread after {@link #cancel(boolean)} is invoked and
* {@link #doInBackground(Object[])} has finished.</p>
*
* @see #onCancelled(Object)
* @see #cancel(boolean)
* @see #isCancelled()
*/
protected void onCancelled() {
}
/**
* Returns <tt>true</tt> if this task was cancelled before it completed
* normally. If you are calling {@link #cancel(boolean)} on the task,
* the value returned by this method should be checked periodically from
* {@link #doInBackground(Object[])} to end the task as soon as possible.
*
* @return <tt>true</tt> if task was cancelled before it completed
*
* @see #cancel(boolean)
*/
public final boolean isCancelled() {
return mCancelled.get();
}
/**
* <p>Attempts to cancel execution of this task. This attempt will
* fail if the task has already completed, already been cancelled,
* or could not be cancelled for some other reason. If successful,
* and this task has not started when <tt>cancel</tt> is called,
* this task should never run. If the task has already started,
* then the <tt>mayInterruptIfRunning</tt> parameter determines
* whether the thread executing this task should be interrupted in
* an attempt to stop the task.</p>
*
* <p>Calling this method will result in {@link #onCancelled(Object)} being
* invoked on the UI thread after {@link #doInBackground(Object[])}
* returns. Calling this method guarantees that {@link #onPostExecute(Object)}
* is never invoked. After invoking this method, you should check the
* value returned by {@link #isCancelled()} periodically from
* {@link #doInBackground(Object[])} to finish the task as early as
* possible.</p>
*
* @param mayInterruptIfRunning <tt>true</tt> if the thread executing this
* task should be interrupted; otherwise, in-progress tasks are allowed
* to complete.
*
* @return <tt>false</tt> if the task could not be cancelled,
* typically because it has already completed normally;
* <tt>true</tt> otherwise
*
* @see #isCancelled()
* @see #onCancelled(Object)
*/
public final boolean cancel(boolean mayInterruptIfRunning) {
mCancelled.set(true);
return mFuture.cancel(mayInterruptIfRunning);
}
/**
* Waits if necessary for the computation to complete, and then
* retrieves its result.
*
* @return The computed result.
*
* @throws CancellationException If the computation was cancelled.
* @throws ExecutionException If the computation threw an exception.
* @throws InterruptedException If the current thread was interrupted
* while waiting.
*/
public final Result get() throws InterruptedException, ExecutionException {
return mFuture.get();
}
/**
* Waits if necessary for at most the given time for the computation
* to complete, and then retrieves its result.
*
* @param timeout Time to wait before cancelling the operation.
* @param unit The time unit for the timeout.
*
* @return The computed result.
*
* @throws CancellationException If the computation was cancelled.
* @throws ExecutionException If the computation threw an exception.
* @throws InterruptedException If the current thread was interrupted
* while waiting.
* @throws TimeoutException If the wait timed out.
*/
public final Result get(long timeout, TimeUnit unit) throws InterruptedException,
ExecutionException, TimeoutException {
return mFuture.get(timeout, unit);
}
/**
* Executes the task with the specified parameters. The task returns
* itself (this) so that the caller can keep a reference to it.
*
* <p>Note: this function schedules the task on a queue for a single background
* thread or pool of threads depending on the platform version. When first
* introduced, AsyncTasks were executed serially on a single background thread.
* Starting with {@link android.os.Build.VERSION_CODES#DONUT}, this was changed
* to a pool of threads allowing multiple tasks to operate in parallel. Starting
* {@link android.os.Build.VERSION_CODES#HONEYCOMB}, tasks are back to being
* executed on a single thread to avoid common application errors caused
* by parallel execution. If you truly want parallel execution, you can use
* the {@link #executeOnExecutor} version of this method
* with {@link #THREAD_POOL_EXECUTOR}; however, see commentary there for warnings
* on its use.
*
* <p>This method must be invoked on the UI thread.
*
* @param params The parameters of the task.
*
* @return This instance of AsyncTask.
*
* @throws IllegalStateException If {@link #getStatus()} returns either
* {@link AsyncTask.Status#RUNNING} or {@link AsyncTask.Status#FINISHED}.
*
* @see #executeOnExecutor(java.util.concurrent.Executor, Object[])
* @see #execute(Runnable)
*/
public final AsyncTask<Params, Progress, Result> execute(Params... params) {
return executeOnExecutor(sDefaultExecutor, params);
}
/**
* Executes the task with the specified parameters. The task returns
* itself (this) so that the caller can keep a reference to it.
*
* <p>This method is typically used with {@link #THREAD_POOL_EXECUTOR} to
* allow multiple tasks to run in parallel on a pool of threads managed by
* AsyncTask, however you can also use your own {@link Executor} for custom
* behavior.
*
* <p><em>Warning:</em> Allowing multiple tasks to run in parallel from
* a thread pool is generally <em>not</em> what one wants, because the order
* of their operation is not defined. For example, if these tasks are used
* to modify any state in common (such as writing a file due to a button click),
* there are no guarantees on the order of the modifications.
* Without careful work it is possible in rare cases for the newer version
* of the data to be over-written by an older one, leading to obscure data
* loss and stability issues. Such changes are best
* executed in serial; to guarantee such work is serialized regardless of
* platform version you can use this function with {@link #SERIAL_EXECUTOR}.
*
* <p>This method must be invoked on the UI thread.
*
* @param exec The executor to use. {@link #THREAD_POOL_EXECUTOR} is available as a
* convenient process-wide thread pool for tasks that are loosely coupled.
* @param params The parameters of the task.
*
* @return This instance of AsyncTask.
*
* @throws IllegalStateException If {@link #getStatus()} returns either
* {@link AsyncTask.Status#RUNNING} or {@link AsyncTask.Status#FINISHED}.
*
* @see #execute(Object[])
*/
public final AsyncTask<Params, Progress, Result> executeOnExecutor(Executor exec,
Params... params) {
if (mStatus != Status.PENDING) {
switch (mStatus) {
case RUNNING:
throw new IllegalStateException("Cannot execute task:"
+ " the task is already running.");
case FINISHED:
throw new IllegalStateException("Cannot execute task:"
+ " the task has already been executed "
+ "(a task can be executed only once)");
}
}
mStatus = Status.RUNNING;
onPreExecute();
mWorker.mParams = params;
exec.execute(mFuture);
return this;
}
/**
* Convenience version of {@link #execute(Object...)} for use with
* a simple Runnable object. See {@link #execute(Object[])} for more
* information on the order of execution.
*
* @see #execute(Object[])
* @see #executeOnExecutor(java.util.concurrent.Executor, Object[])
*/
public static void execute(Runnable runnable) {
sDefaultExecutor.execute(runnable);
}
/**
* This method can be invoked from {@link #doInBackground} to
* publish updates on the UI thread while the background computation is
* still running. Each call to this method will trigger the execution of
* {@link #onProgressUpdate} on the UI thread.
*
* {@link #onProgressUpdate} will note be called if the task has been
* canceled.
*
* @param values The progress values to update the UI with.
*
* @see #onProgressUpdate
* @see #doInBackground
*/
protected final void publishProgress(Progress... values) {
if (!isCancelled()) {
sHandler.obtainMessage(MESSAGE_POST_PROGRESS,
new AsyncTaskResult<>(this, values)).sendToTarget();
}
}
private void finish(Result result) {
if (isCancelled()) {
onCancelled(result);
} else {
onPostExecute(result);
}
mStatus = Status.FINISHED;
}
private static class InternalHandler extends Handler {
@SuppressWarnings({"unchecked", "RawUseOfParameterizedType"})
@Override
public void handleMessage(Message msg) {
AsyncTaskResult result = (AsyncTaskResult) msg.obj;
switch (msg.what) {
case MESSAGE_POST_RESULT:
// There is only one result
result.mTask.finish(result.mData[0]);
break;
case MESSAGE_POST_PROGRESS:
result.mTask.onProgressUpdate(result.mData);
break;
}
}
}
private static abstract class WorkerRunnable<Params, Result> implements Callable<Result> {
Params[] mParams;
}
@SuppressWarnings({"RawUseOfParameterizedType"})
private static class AsyncTaskResult<Data> {
final AsyncTask mTask;
final Data[] mData;
AsyncTaskResult(AsyncTask task, Data... data) {
mTask = task;
mData = data;
}
}
} | reproio/apps-android-wikipedia | wikipedia/src/main/java/org/wikipedia/concurrency/AsyncTask.java | Java | apache-2.0 | 26,978 |
package cn.edu.hhu.reg.vo;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
@Entity
@Table(name="doctor_login")
public class DoctorLogin {
@Id
@GeneratedValue(strategy=GenerationType.IDENTITY)
@Column(length = 16)
private Integer id;
/**
* 医生id
*/
@Column(name="doctor_id",length=16)
private Integer doctorId;
/**
* 医生登录名
*/
@Column(name="login_name",length=50)
private String loginName;
/**
* 医生登录密码
*/
@Column(name="password",length=50)
private String password;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public Integer getDoctorId() {
return doctorId;
}
public void setDoctorId(Integer doctorId) {
this.doctorId = doctorId;
}
public String getLoginName() {
return loginName;
}
public void setLoginName(String loginName) {
this.loginName = loginName;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public DoctorLogin() {
}
}
| pqpo/registration_api | src/cn/edu/hhu/reg/vo/DoctorLogin.java | Java | apache-2.0 | 1,216 |
/*
* Copyright 2018 Aleksander Jagiełło
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package pl.themolka.arcade.team;
import org.bukkit.ChatColor;
import pl.themolka.arcade.command.CommandException;
import pl.themolka.arcade.command.CommandUtils;
import pl.themolka.arcade.command.Sender;
import pl.themolka.arcade.game.GamePlayer;
import pl.themolka.arcade.match.Observers;
import pl.themolka.arcade.parser.Context;
import pl.themolka.arcade.util.Color;
import java.util.ArrayList;
import java.util.Collection;
public class TeamCommands {
private final TeamsGame game;
public TeamCommands(TeamsGame game) {
this.game = game;
}
//
// Commands
//
public void clearCommand(Sender sender, String teamId) {
Team team = this.fetchTeam(teamId);
if (team.isObservers()) {
throw new CommandException("Cannot clear observers.");
}
Observers observers = this.game.getMatch().getObservers();
int result = 0;
for (GamePlayer player : new ArrayList<>(team.getOnlineMembers())) {
observers.joinForce(player);
result++;
}
if (result > 0) {
sender.sendSuccess(team.getName() + " has been cleared (" + result + " players) and moved to " +
observers.getName() + ".");
} else {
sender.sendError("No players to clear.");
}
}
public void forceCommand(Sender sender, String username, String teamId) {
GamePlayer player = this.fetchPlayer(username);
Team team = this.fetchTeam(teamId);
if (team.contains(player)) {
throw new CommandException(player.getUsername() + " is already member of " + team.getName() + ".");
}
team.joinForce(player);
sender.sendSuccess(player.getUsername() + " has been moved to " + team.getName() + ".");
}
public void friendlyCommand(Sender sender, String teamId, boolean friendly) {
Team team = this.fetchTeam(teamId);
if (team.isObservers()) {
throw new CommandException("Cannot edit observers.");
}
if (friendly == team.isFriendlyFire()) {
if (friendly) {
throw new CommandException(team.getName() + " is already in friendly-fire.");
} else {
throw new CommandException(team.getName() + " is already not in friendly-fire");
}
}
Team oldState = new Team(team);
team.setFriendlyFire(friendly);
this.callEditEvent(team, oldState, TeamEditEvent.Reason.FRIENDLY_FIRE);
if (friendly) {
sender.sendSuccess(oldState.getName() + " is now in friendly-fire.");
} else {
sender.sendSuccess(oldState.getName() + " is now not in friendly-fire.");
}
}
public void infoCommand(Sender sender) {
Collection<Team> teams = this.game.getTeams();
CommandUtils.sendTitleMessage(sender, "Teams", Integer.toString(teams.size()));
for (Team team : teams) {
sender.send(String.format("%s - %s/%s - %s minimal to play and %s overfill",
team.getPrettyName() + ChatColor.GRAY,
ChatColor.GOLD.toString() + team.getOnlineMembers().size() + ChatColor.GRAY,
Integer.toString(team.getSlots()),
ChatColor.GREEN.toString() + team.getMinPlayers() + ChatColor.GRAY,
ChatColor.RED.toString() + team.getMaxPlayers() + ChatColor.GRAY));
}
}
public void kickCommand(Sender sender, String username) {
GamePlayer player = this.fetchPlayer(username);
Team team = this.game.getTeam(player);
if (team.isObservers()) {
throw new CommandException("Cannot kick from observers.");
}
team.leaveForce(player);
team.getMatch().getObservers().joinForce(player);
sender.sendSuccess(player.getUsername() + " has been kicked from " + team.getName() + ".");
}
public void minCommand(Sender sender, String teamId, int min) {
Team team = this.fetchTeam(teamId);
if (team.isObservers()) {
throw new CommandException("Cannot edit observers.");
} else if (min < 0) {
throw new CommandException("Number cannot be negative.");
}
Team oldState = new Team(team);
team.setMinPlayers(min);
this.callEditEvent(team, oldState, TeamEditEvent.Reason.MIN_PLAYERS);
sender.sendSuccess(oldState.getName() + " has been edited.");
}
public void overfillCommand(Sender sender, String teamId, int overfill) {
Team team = this.fetchTeam(teamId);
if (team.isObservers()) {
throw new CommandException("Cannot edit observers.");
}
// set to unlimited if zero or negative
int max = Integer.MAX_VALUE;
if (overfill > 0) {
max = overfill;
}
Team oldState = new Team(team);
team.setMaxPlayers(max);
if (max > team.getSlots()) {
team.setSlots(max); // slots
}
this.callEditEvent(team, oldState, TeamEditEvent.Reason.MAX_PLAYERS);
sender.sendSuccess(oldState.getName() + " has been edited.");
}
public void paintCommand(Sender sender, String teamId, String paint) {
Team team = this.fetchTeam(teamId);
ChatColor color = Color.parseChat(new Context(this.game.getPlugin()), paint);
if (color == null) {
StringBuilder colors = new StringBuilder();
for (int i = 0; i < ChatColor.values().length; i++) {
ChatColor value = ChatColor.values()[i];
if (i != 0) {
colors.append(", ");
}
ChatColor result = ChatColor.RED;
if (!value.equals(ChatColor.MAGIC)) {
result = value;
}
colors.append(result).append(value.name().toLowerCase().replace("_", "-"))
.append(ChatColor.RESET).append(ChatColor.RED);
}
throw new CommandException("Available colors: " + colors.toString() + ".");
}
Team oldState = new Team(team);
team.setChatColor(color);
this.callEditEvent(team, oldState, TeamEditEvent.Reason.PAINT);
sender.sendSuccess(oldState.getName() + " has been painted from " +
oldState.getChatColor().name().toLowerCase().replace("_", "-") + " to " +
team.getChatColor().name().toLowerCase().replace("_", "-") + ".");
}
public void renameCommand(Sender sender, String teamId, String name) {
Team team = this.fetchTeam(teamId);
if (name == null) {
throw new CommandException("New name not given.");
} else if (name.length() > Team.NAME_MAX_LENGTH) {
throw new CommandException("Name too long (greater than " + Team.NAME_MAX_LENGTH + " characters).");
} else if (team.getName().equals(name)) {
throw new CommandException("Already named '" + team.getName() + "'.");
}
Team oldState = new Team(team);
team.setName(name);
this.callEditEvent(team, oldState, TeamEditEvent.Reason.RENAME);
sender.sendSuccess(oldState.getName() + " has been renamed to " + team.getName() + ".");
}
public void slotsCommand(Sender sender, String teamId, int slots) {
Team team = this.fetchTeam(teamId);
if (team.isObservers()) {
throw new CommandException("Cannot edit observers.");
}
// set to unlimited if zero or negative
int max = Integer.MAX_VALUE;
if (slots > 0) {
max = slots;
}
Team oldState = new Team(team);
team.setSlots(max);
if (max > team.getMaxPlayers()) {
team.setMaxPlayers(max); // overfill
}
this.callEditEvent(team, oldState, TeamEditEvent.Reason.SLOTS);
sender.sendSuccess(oldState.getName() + " has been edited.");
}
//
// Command Utilities
//
private void callEditEvent(Team newState, Team oldState, TeamEditEvent.Reason reason) {
this.game.getPlugin().getEventBus().publish(new TeamEditEvent(
this.game.getPlugin(), newState, oldState, reason));
}
private GamePlayer fetchPlayer(String player) {
if (player != null && !player.isEmpty()) {
GamePlayer result = this.game.getGame().findPlayer(player);
if (result != null) {
return result;
}
}
throw new CommandException("Player not found.");
}
private Team fetchTeam(String team) {
if (team != null && !team.isEmpty()) {
Team result = this.game.findTeamById(team);
if (result != null) {
return result;
}
}
throw new CommandException("Team not found.");
}
}
| ShootGame/Arcade2 | src/main/java/pl/themolka/arcade/team/TeamCommands.java | Java | apache-2.0 | 9,514 |
package problems;
import java.util.Arrays;
import java.util.PriorityQueue;
/**
* Leetcode: Super Ugly Number
* Created by alan on 2/24/2016.
*/
public class SuperUglyNumber {
class Node implements Comparable<Node> {
int val;
final int prime_index;
public Node(int value, int prime_idx) {
this.val = value;
this.prime_index = prime_idx;
}
public int compareTo(Node a) {
return this.val - a.val;
}
}
public int[] nthSuperUglyNumber(int n, int[] primes) {
int[] nums = new int[n];
nums[0] = 1;
int[] index = new int[primes.length];
PriorityQueue<Node> pq = new PriorityQueue<>();
for (int i = 0; i < primes.length; i++)
pq.add(new Node(primes[i], i));
for (int i = 1; i < n; i++) {
Node node = pq.poll();
while (node.val == nums[i - 1]) {
node.val = nums[++index[node.prime_index]] * primes[node.prime_index];
pq.add(node);
node = pq.poll();
}
nums[i] = node.val;
node.val = nums[++index[node.prime_index]] * primes[node.prime_index];
pq.add(node);
}
return nums;
}
public static void main(String[] args) {
SuperUglyNumber sn = new SuperUglyNumber();
int[] primes = {2, 7, 13, 19};
System.out.println(Arrays.toString(primes));
System.out.println(Arrays.toString(sn.nthSuperUglyNumber(12, primes)));
}
}
| alyiwang/LeetCode | src/problems/SuperUglyNumber.java | Java | apache-2.0 | 1,548 |
package yuku.alkitab.base.util;
import android.app.Activity;
import android.app.Dialog;
import android.content.Intent;
import android.database.Cursor;
import android.database.DatabaseUtils;
import android.database.sqlite.SQLiteDatabase;
import android.os.AsyncTask;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import android.util.Xml;
import com.afollestad.materialdialogs.MaterialDialog;
import gnu.trove.list.TIntList;
import gnu.trove.list.array.TIntArrayList;
import gnu.trove.map.hash.TIntLongHashMap;
import gnu.trove.map.hash.TIntObjectHashMap;
import gnu.trove.map.hash.TObjectIntHashMap;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.ext.DefaultHandler2;
import yuku.alkitab.base.App;
import yuku.alkitab.base.IsiActivity;
import yuku.alkitab.base.S;
import yuku.alkitab.base.storage.Db;
import yuku.alkitab.base.storage.InternalDb;
import yuku.alkitab.debug.R;
import yuku.alkitab.model.Label;
import yuku.alkitab.model.Marker;
import yuku.alkitab.model.Marker_Label;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static yuku.alkitab.base.util.Literals.ToStringArray;
// Imported from v3. Used for once-only migration from v3 to v4.
public class BookmarkImporter {
static final String TAG = BookmarkImporter.class.getSimpleName();
// constants
static class Bookmark2_Label { // DO NOT CHANGE CONSTANT VALUES!
public static final String XMLTAG_Bookmark2_Label = "Bukmak2_Label";
public static final String XMLATTR_bookmark2_relId = "bukmak2_relId";
public static final String XMLATTR_label_relId = "label_relId";
}
// constants
static class BackupManager {
public static final String XMLTAG_Bukmak2 = "Bukmak2";
private static final String XMLATTR_ari = "ari";
private static final String XMLATTR_kind = "jenis";
private static final String XMLATTR_caption = "tulisan";
private static final String XMLATTR_addTime = "waktuTambah";
private static final String XMLATTR_modifyTime = "waktuUbah";
private static final String XMLATTR_relId = "relId";
private static final String XMLVAL_bookmark = "bukmak";
private static final String XMLVAL_note = "catatan";
private static final String XMLVAL_highlight = "stabilo";
public static final String XMLTAG_Label = "Label";
private static final String XMLATTR_title = "judul";
private static final String XMLATTR_bgColor = "warnaLatar";
@Nullable
public static Marker markerFromAttributes(Attributes attributes) {
int ari = Integer.parseInt(attributes.getValue("", XMLATTR_ari));
String kind_s = attributes.getValue("", XMLATTR_kind);
Marker.Kind kind = kind_s.equals(XMLVAL_bookmark) ? Marker.Kind.bookmark : kind_s.equals(XMLVAL_note) ? Marker.Kind.note : kind_s.equals(XMLVAL_highlight) ? Marker.Kind.highlight : null;
String caption = unescapeHighUnicode(attributes.getValue("", XMLATTR_caption));
Date addTime = Sqlitil.toDate(Integer.parseInt(attributes.getValue("", XMLATTR_addTime)));
Date modifyTime = Sqlitil.toDate(Integer.parseInt(attributes.getValue("", XMLATTR_modifyTime)));
if (kind == null) { // invalid
return null;
}
return Marker.createNewMarker(ari, kind, caption, 1, addTime, modifyTime);
}
public static int getRelId(Attributes attributes) {
String s = attributes.getValue("", XMLATTR_relId);
return s == null ? 0 : Integer.parseInt(s);
}
public static Label labelFromAttributes(Attributes attributes) {
String title = unescapeHighUnicode(attributes.getValue("", XMLATTR_title));
String bgColor = attributes.getValue("", XMLATTR_bgColor);
return Label.createNewLabel(title, 0, bgColor);
}
static ThreadLocal<Matcher> highUnicodeMatcher = new ThreadLocal<Matcher>() {
@Override
protected Matcher initialValue() {
return Pattern.compile("\\[\\[~U([0-9A-Fa-f]{6})~\\]\\]").matcher("");
}
};
public static String unescapeHighUnicode(String input) {
if (input == null) return null;
final Matcher m = highUnicodeMatcher.get();
m.reset(input);
StringBuffer res = new StringBuffer();
while (m.find()) {
String s = m.group(1);
final int cp = Integer.parseInt(s, 16);
m.appendReplacement(res, new String(new int[]{cp}, 0, 1));
}
m.appendTail(res);
return res.toString();
}
}
public static void importBookmarks(final Activity activity, @NonNull final InputStream fis, final boolean finishActivityAfterwards, final Runnable runWhenDone) {
final MaterialDialog pd = new MaterialDialog.Builder(activity)
.content(R.string.mengimpor_titiktiga)
.cancelable(false)
.progress(true, 0)
.show();
new AsyncTask<Boolean, Integer, Object>() {
int count_bookmark = 0;
int count_label = 0;
@Override
protected Object doInBackground(Boolean... params) {
final List<Marker> markers = new ArrayList<>();
final TObjectIntHashMap<Marker> markerToRelIdMap = new TObjectIntHashMap<>();
final List<Label> labels = new ArrayList<>();
final TObjectIntHashMap<Label> labelToRelIdMap = new TObjectIntHashMap<>();
final TIntLongHashMap labelRelIdToAbsIdMap = new TIntLongHashMap();
final TIntObjectHashMap<TIntList> markerRelIdToLabelRelIdsMap = new TIntObjectHashMap<>();
try {
Xml.parse(fis, Xml.Encoding.UTF_8, new DefaultHandler2() {
@Override
public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException {
switch (localName) {
case BackupManager.XMLTAG_Bukmak2:
final Marker marker = BackupManager.markerFromAttributes(attributes);
if (marker != null) {
markers.add(marker);
final int bookmark2_relId = BackupManager.getRelId(attributes);
markerToRelIdMap.put(marker, bookmark2_relId);
count_bookmark++;
}
break;
case BackupManager.XMLTAG_Label: {
final Label label = BackupManager.labelFromAttributes(attributes);
int label_relId = BackupManager.getRelId(attributes);
labels.add(label);
labelToRelIdMap.put(label, label_relId);
count_label++;
break;
}
case Bookmark2_Label.XMLTAG_Bookmark2_Label: {
final int bookmark2_relId = Integer.parseInt(attributes.getValue("", Bookmark2_Label.XMLATTR_bookmark2_relId));
final int label_relId = Integer.parseInt(attributes.getValue("", Bookmark2_Label.XMLATTR_label_relId));
TIntList labelRelIds = markerRelIdToLabelRelIdsMap.get(bookmark2_relId);
if (labelRelIds == null) {
labelRelIds = new TIntArrayList();
markerRelIdToLabelRelIdsMap.put(bookmark2_relId, labelRelIds);
}
labelRelIds.add(label_relId);
break;
}
}
}
});
fis.close();
} catch (Exception e) {
return e;
}
{ // bikin label-label yang diperlukan, juga map relId dengan id dari label.
final HashMap<String, Label> judulMap = new HashMap<>();
final List<Label> xlabelLama = S.getDb().listAllLabels();
for (Label labelLama : xlabelLama) {
judulMap.put(labelLama.title, labelLama);
}
for (Label label : labels) {
// cari apakah label yang judulnya persis sama udah ada
Label labelLama = judulMap.get(label.title);
final int labelRelId = labelToRelIdMap.get(label);
if (labelLama != null) {
// removed from v3: update warna label lama
labelRelIdToAbsIdMap.put(labelRelId, labelLama._id);
AppLog.d(TAG, "label (lama) r->a : " + labelRelId + "->" + labelLama._id);
} else { // belum ada, harus bikin baru
Label labelBaru = S.getDb().insertLabel(label.title, label.backgroundColor);
labelRelIdToAbsIdMap.put(labelRelId, labelBaru._id);
AppLog.d(TAG, "label (baru) r->a : " + labelRelId + "->" + labelBaru._id);
}
}
}
importBookmarks(markers, markerToRelIdMap, labelRelIdToAbsIdMap, markerRelIdToLabelRelIdsMap);
return null;
}
@Override
protected void onPostExecute(@NonNull Object result) {
pd.dismiss();
if (result instanceof Exception) {
AppLog.e(TAG, "Error when importing markers", (Throwable) result);
new MaterialDialog.Builder(activity)
.content(activity.getString(R.string.terjadi_kesalahan_ketika_mengimpor_pesan, ((Exception) result).getMessage()))
.positiveText(R.string.ok)
.show();
} else {
final Dialog dialog = new MaterialDialog.Builder(activity)
.content(activity.getString(R.string.impor_berhasil_angka_diproses, count_bookmark, count_label))
.positiveText(R.string.ok)
.show();
if (finishActivityAfterwards) {
dialog.setOnDismissListener(dialog1 -> activity.finish());
}
}
if (runWhenDone != null) runWhenDone.run();
}
}.execute();
}
public static void importBookmarks(List<Marker> markers, TObjectIntHashMap<Marker> markerToRelIdMap, TIntLongHashMap labelRelIdToAbsIdMap, TIntObjectHashMap<TIntList> markerRelIdToLabelRelIdsMap) {
SQLiteDatabase db = S.getDb().getWritableDatabase();
db.beginTransaction();
try {
final TIntObjectHashMap<Marker> markerRelIdToMarker = new TIntObjectHashMap<>();
{ // write new markers (if not available yet)
for (int i = 0; i < markers.size(); i++) {
Marker marker = markers.get(i);
final int marker_relId = markerToRelIdMap.get(marker);
// migrate: look for existing marker with same kind, ari, and content
try (Cursor cursor = db.query(
Db.TABLE_Marker,
null,
Db.Marker.ari + "=? and " + Db.Marker.kind + "=? and " + Db.Marker.caption + "=?",
ToStringArray(marker.ari, marker.kind.code, marker.caption),
null, null, null
)) {
if (cursor.moveToNext()) {
marker = InternalDb.markerFromCursor(cursor);
markers.set(i, marker);
} else {
InternalDb.insertMarker(db, marker);
}
// map it
markerRelIdToMarker.put(marker_relId, marker);
}
}
}
{ // now is marker-label assignments
for (final int marker_relId : markerRelIdToLabelRelIdsMap.keys()) {
final TIntList label_relIds = markerRelIdToLabelRelIdsMap.get(marker_relId);
final Marker marker = markerRelIdToMarker.get(marker_relId);
if (marker != null) {
// existing labels > 0: ignore
// existing labels == 0: insert
final int existing_label_count = (int) DatabaseUtils.queryNumEntries(db, Db.TABLE_Marker_Label, Db.Marker_Label.marker_gid + "=?", ToStringArray(marker.gid));
if (existing_label_count == 0) {
for (int label_relId : label_relIds.toArray()) {
final long label_id = labelRelIdToAbsIdMap.get(label_relId);
if (label_id > 0) {
final Label label = S.getDb().getLabelById(label_id);
final Marker_Label marker_label = Marker_Label.createNewMarker_Label(marker.gid, label.gid);
InternalDb.insertMarker_LabelIfNotExists(db, marker_label);
} else {
AppLog.w(TAG, "label_id is invalid!: " + label_id);
}
}
}
} else {
AppLog.w(TAG, "wrong marker_relId: " + marker_relId);
}
}
}
db.setTransactionSuccessful();
} finally {
db.endTransaction();
}
App.getLbm().sendBroadcast(new Intent(IsiActivity.ACTION_ATTRIBUTE_MAP_CHANGED));
}
}
| infojulio/androidbible | Alkitab/src/main/java/yuku/alkitab/base/util/BookmarkImporter.java | Java | apache-2.0 | 11,461 |
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.core.region;
import java.util.Set;
import org.joda.beans.impl.flexi.FlexiBean;
import org.threeten.bp.ZoneId;
import com.opengamma.id.ExternalBundleIdentifiable;
import com.opengamma.id.ExternalIdBundle;
import com.opengamma.id.UniqueId;
import com.opengamma.id.UniqueIdentifiable;
import com.opengamma.util.PublicAPI;
import com.opengamma.util.i18n.Country;
import com.opengamma.util.money.Currency;
/**
* A region of the world.
* <p>
* Many aspects of business, algorithms and contracts are specific to a region. The region may be of any size, from a municipality to a super-national group.
* <p>
* This interface is read-only. Implementations may be mutable.
*/
@PublicAPI
public interface Region extends UniqueIdentifiable, ExternalBundleIdentifiable {
/**
* Gets the unique identifier of the region.
* <p>
* This specifies a single version-correction of the region.
*
* @return the unique identifier for this region, not null within the engine
*/
@Override
UniqueId getUniqueId();
/**
* Gets the external identifier bundle that defines the region.
* <p>
* Each external system has one or more identifiers by which they refer to the region.
* Some of these may be unique within that system, while others may be more descriptive.
* This bundle stores the set of these external identifiers.
* <p>
* This will include the country, currency and time-zone.
*
* @return the bundle defining the region, not null
*/
@Override // override for Javadoc
ExternalIdBundle getExternalIdBundle();
/**
* Gets the classification of the region.
*
* @return the classification of region, such as SUPER_NATIONAL or INDEPENDENT_STATE, not null
*/
RegionClassification getClassification();
/**
* Gets the unique identifiers of the regions that this region is a member of. For example, a country might be a member
* of the World, UN, European Union and NATO.
*
* @return the parent unique identifiers, null if this is the root entry
*/
Set<UniqueId> getParentRegionIds();
/**
* Gets the country.
*
* @return the country, null if not applicable
*/
Country getCountry();
/**
* Gets the currency.
*
* @return the currency, null if not applicable
*/
Currency getCurrency();
/**
* Gets the time-zone. For larger regions, there can be multiple time-zones, so this is only reliable for municipalities.
*
* @return the time-zone, null if not applicable
*/
ZoneId getTimeZone();
/**
* Gets the short descriptive name of the region.
*
* @return the name of the region, not null
*/
String getName();
/**
* Gets the full descriptive name of the region.
*
* @return the full name of the region, not null
*/
String getFullName();
/**
* Gets the extensible data store for additional information. Applications may store additional region based information here.
*
* @return the additional data, not null
*/
FlexiBean getData();
}
| McLeodMoores/starling | projects/core/src/main/java/com/opengamma/core/region/Region.java | Java | apache-2.0 | 3,168 |
/* $Id$
* $URL: https://dev.almende.com/svn/abms/coala-common/src/main/java/com/almende/coala/time/NanoInstant.java $
*
* Part of the EU project Adapt4EE, see http://www.adapt4ee.eu/
*
* @license
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
* Copyright (c) 2010-2013 Almende B.V.
*/
package io.coala.time;
/**
* {@link NanoInstant} has the nano-second as base time unit
*
* @date $Date: 2014-06-03 14:26:09 +0200 (Tue, 03 Jun 2014) $
* @version $Revision: 296 $
* @author <a href="mailto:Rick@almende.org">Rick</a>
*/
public class NanoInstant extends AbstractInstant<NanoInstant>
{
/** */
private static final long serialVersionUID = 1L;
/** */
// private static final Logger LOG = LogUtil.getLogger(NanoInstant.class);
/** */
// private static final TimeUnit BASE_UNIT = TimeUnit.NANOS;
/** */
public static final NanoInstant ZERO = new NanoInstant(null, 0);
/**
* {@link NanoInstant} constructor
*
* @param value
*/
public NanoInstant(final ClockID clockID, final Number value)
{
super(clockID, value, TimeUnit.NANOS);
}
// /**
// * {@link NanoInstant} constructor
// *
// * @param value
// */
// public NanoInstant(final ClockID clockID, final Number value,
// final TimeUnit unit)
// {
// super(clockID, value, unit);
// }
//
// /** @see Instant#getBaseUnit() */
// @Override
// public TimeUnit getBaseUnit()
// {
// return BASE_UNIT;
// }
/** @see Instant#toUnit(TimeUnit) */
@Override
public NanoInstant toUnit(final TimeUnit unit)
{
throw new RuntimeException(
"Can't convert NanoInstant to another TimeUnit");
}
/** @see Instant#plus(Number) */
@Override
public NanoInstant plus(final Number value)
{
return new NanoInstant(getClockID(), getValue().doubleValue()
+ value.doubleValue());
}
}
| krevelen/coala | coala-core/src/main/java/io/coala/time/NanoInstant.java | Java | apache-2.0 | 2,299 |
package cat.ereza.customactivityoncrash.activity;
import android.app.Activity;
import android.content.Intent;
import android.os.Bundle;
import cat.ereza.customactivityoncrash.CustomActivityOnCrash;
/**
* Created by zhy on 15/8/4.
*/
public class ClearStack extends Activity
{
@Override
protected void onCreate(Bundle savedInstanceState)
{
super.onCreate(savedInstanceState);
Intent intent = getIntent().getParcelableExtra(CustomActivityOnCrash.KEY_CURRENT_INTENT);
startActivity(intent);
finish();
Runtime.getRuntime().exit(0);
}
}
| hongyangAndroid/CustomActivityOnCrash | library/src/main/java/cat/ereza/customactivityoncrash/activity/ClearStack.java | Java | apache-2.0 | 596 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cxf.tools.common;
import javax.xml.namespace.QName;
public final class ToolConstants {
//public static final String TOOLSPECS_BASE = "/org/apache/cxf/tools/common/toolspec/toolspecs/";
public static final String TOOLSPECS_BASE = "/org/apache/cxf/tools/";
public static final String SCHEMA_URI = "http://www.w3.org/2001/XMLSchema";
public static final String XML_NAMESPACE_URI = "http://www.w3.org/XML/1998/namespace";
public static final String WSDL_NAMESPACE_URI = "http://schemas.xmlsoap.org/wsdl/";
public static final String WSA_NAMESPACE_URI = "http://www.w3.org/2005/08/addressing";
/**
* Tools permit caller to pass in additional bean definitions.
*/
public static final String CFG_BEAN_CONFIG = "beans";
public static final String DEFAULT_TEMP_DIR = "gen_tmp";
public static final String CFG_OUTPUTDIR = "outputdir";
public static final String CFG_OUTPUTFILE = "outputfile";
public static final String CFG_WSDLURL = "wsdlurl";
public static final String CFG_WSDLLOCATION = "wsdlLocation";
public static final String CFG_WSDLLIST = "wsdlList";
public static final String CFG_NAMESPACE = "namespace";
public static final String CFG_VERBOSE = "verbose";
public static final String CFG_PORT = "port";
public static final String CFG_BINDING = "binding";
public static final String CFG_AUTORESOLVE = "autoNameResolution";
public static final String CFG_WEBSERVICE = "webservice";
public static final String CFG_SERVER = "server";
public static final String CFG_CLIENT = "client";
public static final String CFG_ALL = "all";
public static final String CFG_IMPL = "impl";
public static final String CFG_PACKAGENAME = "packagename";
public static final String CFG_JSPACKAGEPREFIX = "jspackageprefix";
public static final String CFG_NINCLUDE = "ninclude";
public static final String CFG_NEXCLUDE = "nexclude";
public static final String CFG_CMD_ARG = "args";
public static final String CFG_INSTALL_DIR = "install.dir";
public static final String CFG_PLATFORM_VERSION = "platform.version";
public static final String CFG_COMPILE = "compile";
public static final String CFG_CLASSDIR = "classdir";
public static final String CFG_EXTRA_SOAPHEADER = "exsoapheader";
public static final String CFG_DEFAULT_NS = "defaultns";
public static final String CFG_DEFAULT_EX = "defaultex";
public static final String CFG_NO_TYPES = "notypes";
public static final String CFG_XJC_ARGS = "xjc";
public static final String CFG_CATALOG = "catalog";
public static final String CFG_BAREMETHODS = "bareMethods";
public static final String CFG_ASYNCMETHODS = "asyncMethods";
public static final String CFG_MIMEMETHODS = "mimeMethods";
public static final String CFG_DEFAULT_VALUES = "defaultValues";
public static final String CFG_JAVASCRIPT_UTILS = "javascriptUtils";
public static final String CFG_VALIDATE_WSDL = "validate";
public static final String CFG_CREATE_XSD_IMPORTS = "createxsdimports";
/**
* Front-end selection command-line option to java2ws.
*/
public static final String CFG_FRONTEND = "frontend";
public static final String CFG_DATABINDING = "databinding";
public static final String DEFAULT_ADDRESS = "http://localhost:9090";
// WSDL2Java Constants
public static final String CFG_TYPES = "types";
public static final String CFG_INTERFACE = "interface";
public static final String CFG_NIGNOREEXCLUDE = "nignoreexclude";
public static final String CFG_ANT = "ant";
public static final String CFG_LIB_REF = "library.references";
public static final String CFG_ANT_PROP = "ant.prop";
public static final String CFG_NO_ADDRESS_BINDING = "noAddressBinding";
public static final String CFG_ALLOW_ELEMENT_REFS = "allowElementReferences";
public static final String CFG_RESERVE_NAME = "reserveClass";
public static final String CFG_FAULT_SERIAL_VERSION_UID = "faultSerialVersionUID";
public static final String CFG_EXCEPTION_SUPER = "exceptionSuper";
public static final String CFG_MARK_GENERATED = "mark-generated";
//Internal Flag to generate
public static final String CFG_IMPL_CLASS = "implClass";
public static final String CFG_GEN_CLIENT = "genClient";
public static final String CFG_GEN_SERVER = "genServer";
public static final String CFG_GEN_IMPL = "genImpl";
public static final String CFG_GEN_TYPES = "genTypes";
public static final String CFG_GEN_SEI = "genSEI";
public static final String CFG_GEN_ANT = "genAnt";
public static final String CFG_GEN_SERVICE = "genService";
public static final String CFG_GEN_OVERWRITE = "overwrite";
public static final String CFG_GEN_FAULT = "genFault";
public static final String CFG_GEN_NEW_ONLY = "newonly";
// Java2WSDL Constants
public static final String CFG_CLASSPATH = "classpath";
public static final String CFG_TNS = "tns";
public static final String CFG_SERVICENAME = "servicename";
public static final String CFG_SCHEMANS = "schemans";
public static final String CFG_USETYPES = "usetypes";
public static final String CFG_CLASSNAME = "classname";
public static final String CFG_PORTTYPE = "porttype";
public static final String CFG_SOURCEDIR = "sourcedir";
public static final String CFG_WSDL = "wsdl";
public static final String CFG_WRAPPERBEAN = "wrapperbean";
// WSDL2Service Constants
public static final String CFG_ADDRESS = "address";
public static final String CFG_TRANSPORT = "transport";
public static final String CFG_SERVICE = "service";
public static final String CFG_BINDING_ATTR = "attrbinding";
public static final String CFG_SOAP12 = "soap12";
// WSDL2Soap Constants
public static final String CFG_STYLE = "style";
public static final String CFG_USE = "use";
// XSD2WSDL Constants
public static final String CFG_XSDURL = "xsdurl";
public static final String CFG_NAME = "name";
// WsdlValidator
public static final String CFG_DEEP = "deep";
public static final String CFG_SCHEMA_DIR = "schemaDir";
public static final String CFG_SCHEMA_URL = "schemaURL";
public static final String CXF_SCHEMA_DIR = "cxf_schema_dir";
public static final String CXF_SCHEMAS_DIR_INJAR = "schemas/wsdl/";
public static final String CFG_SUPPRESS_WARNINGS = "suppressWarnings";
// WSDL2Java Processor Constants
public static final String SEI_GENERATOR = "sei.generator";
public static final String FAULT_GENERATOR = "fault.generator";
public static final String TYPE_GENERATOR = "type.generator";
public static final String IMPL_GENERATOR = "impl.generator";
public static final String SVR_GENERATOR = "svr.generator";
public static final String CLT_GENERATOR = "clt.generator";
public static final String SERVICE_GENERATOR = "service.generator";
public static final String ANT_GENERATOR = "ant.generator";
public static final String HANDLER_GENERATOR = "handler.generator";
// Binding namespace
public static final String NS_JAXWS_BINDINGS = "http://java.sun.com/xml/ns/jaxws";
public static final String NS_JAXB_BINDINGS = "http://java.sun.com/xml/ns/jaxb";
public static final QName JAXWS_BINDINGS = new QName(NS_JAXWS_BINDINGS, "bindings");
public static final QName JAXB_BINDINGS = new QName(NS_JAXB_BINDINGS, "bindings");
public static final String JAXWS_BINDINGS_WSDL_LOCATION = "wsdlLocation";
public static final String JAXWS_BINDING_NODE = "node";
public static final String JAXWS_BINDING_VERSION = "version";
public static final String ASYNC_METHOD_SUFFIX = "Async";
public static final String HANDLER_CHAINS_URI = "http://java.sun.com/xml/ns/javaee";
public static final String HANDLER_CHAIN = "handler-chain";
public static final String HANDLER_CHAINS = "handler-chains";
//public static final String RAW_JAXB_MODEL = "rawjaxbmodel";
// JMS address
public static final String NS_JMS_ADDRESS = "http://cxf.apache.org/transports/jms";
public static final QName JMS_ADDRESS = new QName(NS_JMS_ADDRESS, "address");
public static final String JMS_ADDR_DEST_STYLE = "destinationStyle";
public static final String JMS_ADDR_JNDI_URL = "jndiProviderURL";
public static final String JMS_ADDR_JNDI_FAC = "jndiConnectionFactoryName";
public static final String JMS_ADDR_JNDI_DEST = "jndiDestinationName";
public static final String JMS_ADDR_MSG_TYPE = "messageType";
public static final String JMS_ADDR_INIT_CTX = "initialContextFactory";
public static final String JMS_ADDR_SUBSCRIBER_NAME = "durableSubscriberName";
public static final String JMS_ADDR_MSGID_TO_CORRID = "useMessageIDAsCorrelationID";
// XML Binding
public static final String XMLBINDING_ROOTNODE = "rootNode";
public static final String XMLBINDING_HTTP_LOCATION = "location";
public static final String NS_XML_FORMAT = "http://cxf.apache.org/bindings/xformat";
public static final String XML_FORMAT_PREFIX = "xformat";
public static final String NS_XML_HTTP = "http://schemas.xmlsoap.org/wsdl/http/";
public static final String XML_HTTP_PREFIX = "http";
public static final QName XML_HTTP_ADDRESS = new QName(NS_XML_HTTP, "address");
public static final QName XML_FORMAT = new QName(NS_XML_FORMAT, "body");
public static final QName XML_BINDING_FORMAT = new QName(NS_XML_FORMAT, "binding");
public static final String XML_SCHEMA_COLLECTION = "xmlSchemaCollection";
public static final String PORTTYPE_MAP = "portTypeMap";
public static final String SCHEMA_TARGET_NAMESPACES = "schemaTargetNameSpaces";
public static final String WSDL_DEFINITION = "wsdlDefinition";
public static final String IMPORTED_DEFINITION = "importedDefinition";
public static final String IMPORTED_PORTTYPE = "importedPortType";
public static final String IMPORTED_SERVICE = "importedService";
public static final String BINDING_GENERATOR = "BindingGenerator";
// Tools framework
public static final String FRONTEND_PLUGIN = "frontend";
public static final String DATABINDING_PLUGIN = "databinding";
public static final String RUNTIME_DATABINDING_CLASS = "databinding-class";
public static final String CFG_WSDL_VERSION = "wsdlversion";
// Suppress the code generation, in this case you can just get the generated code model
public static final String CFG_SUPPRESS_GEN = "suppress";
public static final String DEFAULT_PACKAGE_NAME = "defaultnamespace";
//For java2ws tool
public static final String SERVICE_LIST = "serviceList";
public static final String GEN_FROM_SEI = "genFromSEI";
public static final String JAXWS_FRONTEND = "jaxws";
public static final String SIMPLE_FRONTEND = "simple";
public static final String JAXB_DATABINDING = "jaxb";
public static final String AEGIS_DATABINDING = "aegis";
//For Simple FrontEnd
public static final String SEI_CLASS = "seiClass";
public static final String IMPL_CLASS = "implClass";
public static final String SERVICE_NAME = "serviceName";
public static final String PORT_NAME = "portName";
public static final String DEFAULT_DATA_BINDING_NAME = "jaxb";
public static final String DATABIND_BEAN_NAME_SUFFIX = "DatabindingBean";
public static final String CLIENT_CLASS = "clientClass";
public static final String SERVER_CLASS = "serverClass";
public static final String CFG_JSPREFIXMAP = "javascriptPrefixMap";
private ToolConstants() {
//utility class
}
}
| zzsoszz/webservice_gzdx | opensource_cxf/org/apache/cxf/tools/common/ToolConstants.java | Java | apache-2.0 | 12,534 |
/**
* Copyright Pravega Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.pravega.client.connection.impl;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.buffer.Unpooled;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.epoll.EpollEventLoopGroup;
import io.netty.channel.epoll.EpollServerSocketChannel;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.codec.LengthFieldBasedFrameDecoder;
import io.netty.handler.logging.LogLevel;
import io.netty.handler.logging.LoggingHandler;
import io.netty.handler.ssl.SslContext;
import io.netty.handler.ssl.SslContextBuilder;
import io.netty.handler.ssl.SslHandler;
import io.pravega.client.ClientConfig;
import io.pravega.shared.protocol.netty.CommandDecoder;
import io.pravega.shared.protocol.netty.CommandEncoder;
import io.pravega.shared.protocol.netty.ConnectionFailedException;
import io.pravega.shared.protocol.netty.FailingReplyProcessor;
import io.pravega.shared.protocol.netty.PravegaNodeUri;
import io.pravega.shared.protocol.netty.WireCommands;
import io.pravega.test.common.AssertExtensions;
import io.pravega.test.common.SecurityConfigDefaults;
import io.pravega.test.common.TestUtils;
import java.io.File;
import java.net.URI;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.CompletableFuture;
import java.util.function.Function;
import javax.net.ssl.SSLEngine;
import javax.net.ssl.SSLException;
import javax.net.ssl.SSLParameters;
import lombok.Cleanup;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import static io.pravega.shared.metrics.MetricNotifier.NO_OP_METRIC_NOTIFIER;
import static io.pravega.shared.protocol.netty.WireCommands.MAX_WIRECOMMAND_SIZE;
import static io.pravega.test.common.AssertExtensions.assertThrows;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
public class ConnectionPoolingTest {
@Rule
public Timeout globalTimeout = Timeout.seconds(1000);
boolean ssl = false;
private Channel serverChannel;
private int port;
private final String seg = "Segment-0";
private final long offset = 1234L;
private final int length = 1024;
private final String data = "data";
private final Function<Long, WireCommands.ReadSegment> readRequestGenerator = id ->
new WireCommands.ReadSegment(seg, offset, length, "", id);
private final Function<Long, WireCommands.SegmentRead> readResponseGenerator = id ->
new WireCommands.SegmentRead(seg, offset, true, false, Unpooled.wrappedBuffer(data.getBytes(StandardCharsets.UTF_8)), id);
private class EchoServerHandler extends ChannelInboundHandlerAdapter {
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
cause.printStackTrace();
ctx.close();
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object message) {
if (message instanceof WireCommands.Hello) {
ctx.write(message);
ctx.flush();
} else if (message instanceof WireCommands.ReadSegment) {
WireCommands.ReadSegment msg = (WireCommands.ReadSegment) message;
ctx.write(readResponseGenerator.apply(msg.getRequestId()));
ctx.flush();
}
}
}
@Before
public void setUp() throws Exception {
// Configure SSL.
port = TestUtils.getAvailableListenPort();
final SslContext sslCtx;
if (ssl) {
try {
sslCtx = SslContextBuilder.forServer(
new File(SecurityConfigDefaults.TLS_SERVER_CERT_PATH),
new File(SecurityConfigDefaults.TLS_SERVER_PRIVATE_KEY_PATH))
.build();
} catch (SSLException e) {
throw new RuntimeException(e);
}
} else {
sslCtx = null;
}
boolean nio = false;
EventLoopGroup bossGroup;
EventLoopGroup workerGroup;
try {
bossGroup = new EpollEventLoopGroup(1);
workerGroup = new EpollEventLoopGroup();
} catch (ExceptionInInitializerError | UnsatisfiedLinkError | NoClassDefFoundError e) {
nio = true;
bossGroup = new NioEventLoopGroup(1);
workerGroup = new NioEventLoopGroup();
}
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup, workerGroup)
.channel(nio ? NioServerSocketChannel.class : EpollServerSocketChannel.class)
.option(ChannelOption.SO_BACKLOG, 100)
.handler(new LoggingHandler(LogLevel.INFO))
.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline p = ch.pipeline();
if (sslCtx != null) {
SslHandler handler = sslCtx.newHandler(ch.alloc());
SSLEngine sslEngine = handler.engine();
SSLParameters sslParameters = sslEngine.getSSLParameters();
sslParameters.setEndpointIdentificationAlgorithm("LDAPS");
sslEngine.setSSLParameters(sslParameters);
p.addLast(handler);
}
p.addLast(new CommandEncoder(null, NO_OP_METRIC_NOTIFIER),
new LengthFieldBasedFrameDecoder(MAX_WIRECOMMAND_SIZE, 4, 4),
new CommandDecoder(),
new EchoServerHandler());
}
});
// Start the server.
serverChannel = b.bind("localhost", port).awaitUninterruptibly().channel();
}
@After
public void tearDown() throws Exception {
serverChannel.close();
serverChannel.closeFuture();
}
@Test
public void testNonPooling() throws Exception {
ClientConfig clientConfig = ClientConfig.builder()
.controllerURI(URI.create((this.ssl ? "tls://" : "tcp://")
+ "localhost"))
.trustStore(SecurityConfigDefaults.TLS_CA_CERT_PATH)
.maxConnectionsPerSegmentStore(1)
.build();
@Cleanup
SocketConnectionFactoryImpl factory = new SocketConnectionFactoryImpl(clientConfig, 1);
@Cleanup
ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(clientConfig, factory);
ArrayBlockingQueue<WireCommands.SegmentRead> msgRead = new ArrayBlockingQueue<>(10);
FailingReplyProcessor rp = new FailingReplyProcessor() {
@Override
public void connectionDropped() {
}
@Override
public void segmentRead(WireCommands.SegmentRead data) {
msgRead.add(data);
}
@Override
public void processingFailure(Exception error) {
}
@Override
public void authTokenCheckFailed(WireCommands.AuthTokenCheckFailed authTokenCheckFailed) {
}
};
Flow flow1 = new Flow(1, 0);
@Cleanup
ClientConnection connection1 = connectionPool.getClientConnection(flow1, new PravegaNodeUri("localhost", port), rp).join();
connection1.send(readRequestGenerator.apply(flow1.asLong()));
WireCommands.SegmentRead msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow1.asLong()), msg);
assertEquals(1, connectionPool.getActiveChannels().size());
// create a second connection, since not using a flow.
@Cleanup
ClientConnection connection2 = connectionPool.getClientConnection(new PravegaNodeUri("localhost", port), rp).join();
Flow flow2 = new Flow(2, 0);
// send data over connection2 and verify.
connection2.send(readRequestGenerator.apply(flow2.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow2.asLong()), msg);
assertEquals(1, connectionPool.getActiveChannels().size());
assertEquals(2, factory.getOpenSocketCount());
// send data over connection1 and verify.
connection1.send(readRequestGenerator.apply(flow1.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow1.asLong()), msg);
// send data over connection2 and verify.
connection2.send(readRequestGenerator.apply(flow2.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow2.asLong()), msg);
// close a client connection, this should not close the channel.
connection2.close();
assertThrows(ConnectionFailedException.class, () -> connection2.send(readRequestGenerator.apply(flow2.asLong())));
// verify we are able to send data over connection1.
connection1.send(readRequestGenerator.apply(flow1.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow1.asLong()), msg);
// close connection1
connection1.close();
assertThrows(ConnectionFailedException.class, () -> connection1.send(readRequestGenerator.apply(flow2.asLong())));
AssertExtensions.assertEventuallyEquals(0, () -> {
connectionPool.pruneUnusedConnections();
return factory.getOpenSocketCount();
}, 10000);
assertEquals(0, connectionPool.getActiveChannels().size());
}
@Test
public void testConnectionPooling() throws Exception {
ClientConfig clientConfig = ClientConfig.builder()
.controllerURI(URI.create((this.ssl ? "tls://" : "tcp://")
+ "localhost"))
.trustStore(SecurityConfigDefaults.TLS_CA_CERT_PATH)
.maxConnectionsPerSegmentStore(1)
.build();
@Cleanup
SocketConnectionFactoryImpl factory = new SocketConnectionFactoryImpl(clientConfig, 1);
@Cleanup
ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(clientConfig, factory);
ArrayBlockingQueue<WireCommands.SegmentRead> msgRead = new ArrayBlockingQueue<>(10);
FailingReplyProcessor rp = new FailingReplyProcessor() {
@Override
public void connectionDropped() {
}
@Override
public void segmentRead(WireCommands.SegmentRead data) {
msgRead.add(data);
}
@Override
public void processingFailure(Exception error) {
}
@Override
public void authTokenCheckFailed(WireCommands.AuthTokenCheckFailed authTokenCheckFailed) {
}
};
Flow flow1 = new Flow(1, 0);
@Cleanup
ClientConnection connection1 = connectionPool.getClientConnection(flow1, new PravegaNodeUri("localhost", port), rp).join();
connection1.send(readRequestGenerator.apply(flow1.asLong()));
WireCommands.SegmentRead msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow1.asLong()), msg);
assertEquals(1, connectionPool.getActiveChannels().size());
// create a second connection, since the max number of connections is 1 this should reuse the same connection.
Flow flow2 = new Flow(2, 0);
CompletableFuture<ClientConnection> cf = new CompletableFuture<>();
connectionPool.getClientConnection(flow2, new PravegaNodeUri("localhost", port), rp, cf);
@Cleanup
ClientConnection connection2 = cf.join();
// send data over connection2 and verify.
connection2.send(readRequestGenerator.apply(flow2.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow2.asLong()), msg);
assertEquals(1, connectionPool.getActiveChannels().size());
assertEquals(1, factory.getOpenSocketCount());
// send data over connection1 and verify.
connection1.send(readRequestGenerator.apply(flow1.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow1.asLong()), msg);
// send data over connection2 and verify.
connection2.send(readRequestGenerator.apply(flow2.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow2.asLong()), msg);
// close a client connection, this should not close the channel.
connection2.close();
assertThrows(ConnectionFailedException.class, () -> connection2.send(readRequestGenerator.apply(flow2.asLong())));
// verify we are able to send data over connection1.
connection1.send(readRequestGenerator.apply(flow1.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow1.asLong()), msg);
// close connection1
connection1.close();
assertThrows(ConnectionFailedException.class, () -> connection1.send(readRequestGenerator.apply(flow2.asLong())));
AssertExtensions.assertEventuallyEquals(0, () -> {
connectionPool.pruneUnusedConnections();
return factory.getOpenSocketCount();
}, 10000);
assertEquals(0, connectionPool.getActiveChannels().size());
}
@Test
public void testPoolBalancing() throws Exception {
ClientConfig clientConfig = ClientConfig.builder()
.controllerURI(URI.create((this.ssl ? "tls://" : "tcp://")
+ "localhost"))
.trustStore(SecurityConfigDefaults.TLS_CA_CERT_PATH)
.maxConnectionsPerSegmentStore(2)
.build();
@Cleanup
SocketConnectionFactoryImpl factory = new SocketConnectionFactoryImpl(clientConfig, 1);
@Cleanup
ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(clientConfig, factory);
ArrayBlockingQueue<WireCommands.SegmentRead> msgRead = new ArrayBlockingQueue<>(10);
FailingReplyProcessor rp = new FailingReplyProcessor() {
@Override
public void connectionDropped() {
}
@Override
public void segmentRead(WireCommands.SegmentRead data) {
msgRead.add(data);
}
@Override
public void processingFailure(Exception error) {
}
@Override
public void authTokenCheckFailed(WireCommands.AuthTokenCheckFailed authTokenCheckFailed) {
}
};
Flow flow1 = new Flow(1, 0);
@Cleanup
ClientConnection connection1 = connectionPool.getClientConnection(flow1, new PravegaNodeUri("localhost", port), rp).join();
connection1.send(readRequestGenerator.apply(flow1.asLong()));
WireCommands.SegmentRead msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow1.asLong()), msg);
assertEquals(1, factory.getOpenSocketCount());
// create a second connection, since the max number of connections is 2 this should not reuse the same connection.
Flow flow2 = new Flow(2, 0);
@Cleanup
ClientConnection connection2 = connectionPool.getClientConnection(flow2, new PravegaNodeUri("localhost", port), rp).join();
// send data over connection2 and verify.
connection2.send(readRequestGenerator.apply(flow2.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow2.asLong()), msg);
assertEquals(2, factory.getOpenSocketCount());
assertNotEquals(((FlowClientConnection) connection1).getChannel(),
((FlowClientConnection) connection2).getChannel());
// create a second connection, since the max number of connections is 2 this should reuse the same connection.
Flow flow3 = new Flow(3, 0);
@Cleanup
ClientConnection connection3 = connectionPool.getClientConnection(flow3, new PravegaNodeUri("localhost", port), rp).join();
// send data over connection3 and verify.
connection3.send(readRequestGenerator.apply(flow3.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow3.asLong()), msg);
assertEquals(2, factory.getOpenSocketCount());
assertEquals(((FlowClientConnection) connection1).getChannel(),
((FlowClientConnection) connection3).getChannel());
Flow flow4 = new Flow(3, 0);
@Cleanup
ClientConnection connection4 = connectionPool.getClientConnection(flow4, new PravegaNodeUri("localhost", port), rp).join();
// send data over connection3 and verify.
connection3.send(readRequestGenerator.apply(flow4.asLong()));
msg = msgRead.take();
assertEquals(readResponseGenerator.apply(flow4.asLong()), msg);
assertEquals(2, factory.getOpenSocketCount());
assertEquals(2, connectionPool.getActiveChannels().size());
assertNotEquals(((FlowClientConnection) connection3).getChannel(),
((FlowClientConnection) connection4).getChannel());
assertEquals(((FlowClientConnection) connection2).getChannel(),
((FlowClientConnection) connection4).getChannel());
}
@Test
public void testConcurrentRequests() throws Exception {
ClientConfig clientConfig = ClientConfig.builder()
.controllerURI(URI.create((this.ssl ? "tls://" : "tcp://")
+ "localhost"))
.trustStore(SecurityConfigDefaults.TLS_CA_CERT_PATH)
.maxConnectionsPerSegmentStore(1)
.build();
@Cleanup
SocketConnectionFactoryImpl factory = new SocketConnectionFactoryImpl(clientConfig, 1);
@Cleanup
ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(clientConfig, factory);
ArrayBlockingQueue<WireCommands.SegmentRead> msgRead = new ArrayBlockingQueue<>(10);
FailingReplyProcessor rp = new FailingReplyProcessor() {
@Override
public void connectionDropped() {
}
@Override
public void segmentRead(WireCommands.SegmentRead data) {
msgRead.add(data);
}
@Override
public void processingFailure(Exception error) {
}
@Override
public void authTokenCheckFailed(WireCommands.AuthTokenCheckFailed authTokenCheckFailed) {
}
};
Flow flow1 = new Flow(1, 0);
ClientConnection connection1 = connectionPool.getClientConnection(flow1, new PravegaNodeUri("localhost", port), rp).join();
// create a second connection, since the max number of connections is 1 this should reuse the same connection.
Flow flow2 = new Flow(2, 0);
ClientConnection connection2 = connectionPool.getClientConnection(flow2, new PravegaNodeUri("localhost", port), rp).join();
assertEquals(1, factory.getOpenSocketCount());
assertEquals(1, connectionPool.getActiveChannels().size());
connection1.send(readRequestGenerator.apply(flow1.asLong()));
connection2.send(readRequestGenerator.apply(flow2.asLong()));
List<WireCommands.SegmentRead> msgs = new ArrayList<WireCommands.SegmentRead>();
msgs.add(msgRead.take());
msgs.add(msgRead.take());
assertTrue(msgs.contains(readResponseGenerator.apply(flow1.asLong())));
assertTrue(msgs.contains(readResponseGenerator.apply(flow1.asLong())));
assertEquals(1, factory.getOpenSocketCount());
connection1.close();
connection2.close();
AssertExtensions.assertEventuallyEquals(0, () -> {
connectionPool.pruneUnusedConnections();
return factory.getOpenSocketCount();
}, 10000);
assertEquals(0, connectionPool.getActiveChannels().size());
}
}
| pravega/pravega | client/src/test/java/io/pravega/client/connection/impl/ConnectionPoolingTest.java | Java | apache-2.0 | 21,376 |
package jp.co.omana.action;
import org.seasar.struts.annotation.Execute;
public class ServiceAction {
@Execute(validator = false)
public String index() {
return "board.jsp";
}
@Execute(validator = false)
public String confirm() {
return "index.jsp";
}
@Execute(validator = false)
public String finish() {
return "index.jsp";
}
}
| ikraikra/bunsekiya | src/main/java/jp/co/omana/action/ServiceAction.java | Java | apache-2.0 | 387 |
/**
*
*/
package com.sivalabs.demo.orders.repositories;
import org.springframework.data.jpa.repository.JpaRepository;
import com.sivalabs.demo.orders.entities.Order;
/**
* @author Siva
*
*/
public interface OrderRepository extends JpaRepository<Order, Integer>{
}
| sivaprasadreddy/springboot-learn-by-example | chapter-09/springboot-multiple-datasources-demo/src/main/java/com/sivalabs/demo/orders/repositories/OrderRepository.java | Java | apache-2.0 | 274 |
/************************************************************
* * EaseMob CONFIDENTIAL
* __________________
* Copyright (C) 2013-2014 EaseMob Technologies. All rights reserved.
*
* NOTICE: All information contained herein is, and remains
* the property of EaseMob Technologies.
* Dissemination of this information or reproduction of this material
* is strictly forbidden unless prior written permission is obtained
* from EaseMob Technologies.
*/
package com.easemob.chatuidemo.activity;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import android.annotation.SuppressLint;
import android.app.AlertDialog;
import android.app.ProgressDialog;
import android.content.Context;
import android.content.DialogInterface;
import android.graphics.Bitmap;
import android.graphics.PixelFormat;
import android.hardware.Camera;
import android.hardware.Camera.CameraInfo;
import android.hardware.Camera.Parameters;
import android.hardware.Camera.Size;
import android.media.MediaRecorder;
import android.media.MediaRecorder.OnErrorListener;
import android.media.MediaRecorder.OnInfoListener;
import android.media.MediaScannerConnection;
import android.media.MediaScannerConnection.MediaScannerConnectionClient;
import android.net.Uri;
import android.os.Bundle;
import android.os.Environment;
import android.os.PowerManager;
import android.os.SystemClock;
import android.text.TextUtils;
import android.view.SurfaceHolder;
import android.view.View;
import android.view.View.OnClickListener;
import android.view.Window;
import android.view.WindowManager;
import android.widget.Button;
import android.widget.Chronometer;
import android.widget.ImageView;
import android.widget.Toast;
import android.widget.VideoView;
import com.easemob.chatuidemo.utils.CommonUtils;
import com.easemob.chatuidemo.video.util.Utils;
import com.easemob.qixin.R;
import com.easemob.util.EMLog;
import com.easemob.util.PathUtil;
public class RecorderVideoActivity extends BaseActivity implements
OnClickListener, SurfaceHolder.Callback, OnErrorListener,
OnInfoListener {
private static final String TAG = "RecorderVideoActivity";
private final static String CLASS_LABEL = "RecordActivity";
private PowerManager.WakeLock mWakeLock;
private ImageView btnStart;// 开始录制按钮
private ImageView btnStop;// 停止录制按钮
private MediaRecorder mediaRecorder;// 录制视频的类
private VideoView mVideoView;// 显示视频的控件
String localPath = "";// 录制的视频路径
private Camera mCamera;
// 预览的宽高
private int previewWidth = 480;
private int previewHeight = 480;
private Chronometer chronometer;
private int frontCamera = 0;// 0是后置摄像头,1是前置摄像头
private Button btn_switch;
Parameters cameraParameters = null;
private SurfaceHolder mSurfaceHolder;
int defaultVideoFrameRate = -1;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
requestWindowFeature(Window.FEATURE_NO_TITLE);// 去掉标题栏
getWindow().setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN,
WindowManager.LayoutParams.FLAG_FULLSCREEN);// 设置全屏
// 选择支持半透明模式,在有surfaceview的activity中使用
getWindow().setFormat(PixelFormat.TRANSLUCENT);
setContentView(R.layout.recorder_activity);
PowerManager pm = (PowerManager) getSystemService(Context.POWER_SERVICE);
mWakeLock = pm.newWakeLock(PowerManager.SCREEN_BRIGHT_WAKE_LOCK,
CLASS_LABEL);
mWakeLock.acquire();
initViews();
}
private void initViews() {
btn_switch = (Button) findViewById(R.id.switch_btn);
btn_switch.setOnClickListener(this);
btn_switch.setVisibility(View.VISIBLE);
mVideoView = (VideoView) findViewById(R.id.mVideoView);
btnStart = (ImageView) findViewById(R.id.recorder_start);
btnStop = (ImageView) findViewById(R.id.recorder_stop);
btnStart.setOnClickListener(this);
btnStop.setOnClickListener(this);
mSurfaceHolder = mVideoView.getHolder();
mSurfaceHolder.addCallback(this);
mSurfaceHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS);
chronometer = (Chronometer) findViewById(R.id.chronometer);
}
public void back(View view) {
releaseRecorder();
releaseCamera();
finish();
}
@Override
protected void onResume() {
super.onResume();
if (mWakeLock == null) {
// 获取唤醒锁,保持屏幕常亮
PowerManager pm = (PowerManager) getSystemService(Context.POWER_SERVICE);
mWakeLock = pm.newWakeLock(PowerManager.SCREEN_BRIGHT_WAKE_LOCK,
CLASS_LABEL);
mWakeLock.acquire();
}
// if (!initCamera()) {
// showFailDialog();
// }
}
@SuppressLint("NewApi")
private boolean initCamera() {
try {
if (frontCamera == 0) {
mCamera = Camera.open(CameraInfo.CAMERA_FACING_BACK);
} else {
mCamera = Camera.open(CameraInfo.CAMERA_FACING_FRONT);
}
Camera.Parameters camParams = mCamera.getParameters();
mCamera.lock();
mSurfaceHolder = mVideoView.getHolder();
mSurfaceHolder.addCallback(this);
mSurfaceHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS);
mCamera.setDisplayOrientation(90);
} catch (RuntimeException ex) {
EMLog.e("video", "init Camera fail " + ex.getMessage());
return false;
}
return true;
}
private void handleSurfaceChanged() {
if (mCamera == null) {
finish();
return;
}
boolean hasSupportRate = false;
List<Integer> supportedPreviewFrameRates = mCamera.getParameters()
.getSupportedPreviewFrameRates();
if (supportedPreviewFrameRates != null
&& supportedPreviewFrameRates.size() > 0) {
Collections.sort(supportedPreviewFrameRates);
for (int i = 0; i < supportedPreviewFrameRates.size(); i++) {
int supportRate = supportedPreviewFrameRates.get(i);
if (supportRate == 15) {
hasSupportRate = true;
}
}
if (hasSupportRate) {
defaultVideoFrameRate = 15;
} else {
defaultVideoFrameRate = supportedPreviewFrameRates.get(0);
}
}
// 获取摄像头的所有支持的分辨率
List<Camera.Size> resolutionList = Utils.getResolutionList(mCamera);
if (resolutionList != null && resolutionList.size() > 0) {
Collections.sort(resolutionList, new Utils.ResolutionComparator());
Camera.Size previewSize = null;
boolean hasSize = false;
// 如果摄像头支持640*480,那么强制设为640*480
for (int i = 0; i < resolutionList.size(); i++) {
Size size = resolutionList.get(i);
if (size != null && size.width == 640 && size.height == 480) {
previewSize = size;
previewWidth = previewSize.width;
previewHeight = previewSize.height;
hasSize = true;
break;
}
}
// 如果不支持设为中间的那个
if (!hasSize) {
int mediumResolution = resolutionList.size() / 2;
if (mediumResolution >= resolutionList.size())
mediumResolution = resolutionList.size() - 1;
previewSize = resolutionList.get(mediumResolution);
previewWidth = previewSize.width;
previewHeight = previewSize.height;
}
}
}
@Override
protected void onPause() {
super.onPause();
if (mWakeLock != null) {
mWakeLock.release();
mWakeLock = null;
}
}
@Override
public void onClick(View view) {
switch (view.getId()) {
case R.id.switch_btn:
switchCamera();
break;
case R.id.recorder_start:
// start recording
if(!startRecording())
return;
Toast.makeText(this, R.string.The_video_to_start, Toast.LENGTH_SHORT).show();
btn_switch.setVisibility(View.INVISIBLE);
btnStart.setVisibility(View.INVISIBLE);
btnStart.setEnabled(false);
btnStop.setVisibility(View.VISIBLE);
// 重置其他
chronometer.setBase(SystemClock.elapsedRealtime());
chronometer.start();
break;
case R.id.recorder_stop:
btnStop.setEnabled(false);
// 停止拍摄
stopRecording();
btn_switch.setVisibility(View.VISIBLE);
chronometer.stop();
btnStart.setVisibility(View.VISIBLE);
btnStop.setVisibility(View.INVISIBLE);
new AlertDialog.Builder(this)
.setMessage(R.string.Whether_to_send)
.setPositiveButton(R.string.ok,
new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog,
int which) {
dialog.dismiss();
sendVideo(null);
}
})
.setNegativeButton(R.string.cancel,
new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog,
int which) {
if(localPath != null){
File file = new File(localPath);
if(file.exists())
file.delete();
}
finish();
}
}).setCancelable(false).show();
break;
default:
break;
}
}
@Override
public void surfaceChanged(SurfaceHolder holder, int format, int width,
int height) {
// 将holder,这个holder为开始在oncreat里面取得的holder,将它赋给surfaceHolder
mSurfaceHolder = holder;
}
@Override
public void surfaceCreated(SurfaceHolder holder) {
if (mCamera == null){
if(!initCamera()){
showFailDialog();
return;
}
}
try {
mCamera.setPreviewDisplay(mSurfaceHolder);
mCamera.startPreview();
handleSurfaceChanged();
} catch (Exception e1) {
EMLog.e("video", "start preview fail " + e1.getMessage());
showFailDialog();
}
}
@Override
public void surfaceDestroyed(SurfaceHolder arg0) {
EMLog.v("video", "surfaceDestroyed");
}
public boolean startRecording(){
if (mediaRecorder == null){
if(!initRecorder())
return false;
}
mediaRecorder.setOnInfoListener(this);
mediaRecorder.setOnErrorListener(this);
mediaRecorder.start();
return true;
}
@SuppressLint("NewApi")
private boolean initRecorder(){
if(!CommonUtils.isExitsSdcard()){
showNoSDCardDialog();
return false;
}
if (mCamera == null) {
if(!initCamera()){
showFailDialog();
return false;
}
}
mVideoView.setVisibility(View.VISIBLE);
// TODO init button
mCamera.stopPreview();
mediaRecorder = new MediaRecorder();
mCamera.unlock();
mediaRecorder.setCamera(mCamera);
mediaRecorder.setAudioSource(MediaRecorder.AudioSource.DEFAULT);
// 设置录制视频源为Camera(相机)
mediaRecorder.setVideoSource(MediaRecorder.VideoSource.CAMERA);
if (frontCamera == 1) {
mediaRecorder.setOrientationHint(270);
} else {
mediaRecorder.setOrientationHint(90);
}
// 设置录制完成后视频的封装格式THREE_GPP为3gp.MPEG_4为mp4
mediaRecorder.setOutputFormat(MediaRecorder.OutputFormat.MPEG_4);
mediaRecorder.setAudioEncoder(MediaRecorder.AudioEncoder.AAC);
// 设置录制的视频编码h263 h264
mediaRecorder.setVideoEncoder(MediaRecorder.VideoEncoder.H264);
// 设置视频录制的分辨率。必须放在设置编码和格式的后面,否则报错
mediaRecorder.setVideoSize(previewWidth, previewHeight);
// 设置视频的比特率
mediaRecorder.setVideoEncodingBitRate(384 * 1024);
// // 设置录制的视频帧率。必须放在设置编码和格式的后面,否则报错
if (defaultVideoFrameRate != -1) {
mediaRecorder.setVideoFrameRate(defaultVideoFrameRate);
}
// 设置视频文件输出的路径
localPath = PathUtil.getInstance().getVideoPath() + "/"
+ System.currentTimeMillis() + ".mp4";
mediaRecorder.setOutputFile(localPath);
mediaRecorder.setMaxDuration(30000);
mediaRecorder.setPreviewDisplay(mSurfaceHolder.getSurface());
try {
mediaRecorder.prepare();
} catch (IllegalStateException e) {
e.printStackTrace();
return false;
} catch (IOException e) {
e.printStackTrace();
return false;
}
return true;
}
public void stopRecording() {
if (mediaRecorder != null) {
mediaRecorder.setOnErrorListener(null);
mediaRecorder.setOnInfoListener(null);
try {
mediaRecorder.stop();
} catch (IllegalStateException e) {
EMLog.e("video", "stopRecording error:" + e.getMessage());
}
}
releaseRecorder();
if (mCamera != null) {
mCamera.stopPreview();
releaseCamera();
}
}
private void releaseRecorder() {
if (mediaRecorder != null) {
mediaRecorder.release();
mediaRecorder = null;
}
}
protected void releaseCamera() {
try {
if (mCamera != null) {
mCamera.stopPreview();
mCamera.release();
mCamera = null;
}
} catch (Exception e) {
}
}
@SuppressLint("NewApi")
public void switchCamera() {
if (mCamera == null) {
return;
}
if (Camera.getNumberOfCameras() >= 2) {
btn_switch.setEnabled(false);
if (mCamera != null) {
mCamera.stopPreview();
mCamera.release();
mCamera = null;
}
switch (frontCamera) {
case 0:
mCamera = Camera.open(CameraInfo.CAMERA_FACING_FRONT);
frontCamera = 1;
break;
case 1:
mCamera = Camera.open(CameraInfo.CAMERA_FACING_BACK);
frontCamera = 0;
break;
}
try {
mCamera.lock();
mCamera.setDisplayOrientation(90);
mCamera.setPreviewDisplay(mVideoView.getHolder());
mCamera.startPreview();
} catch (IOException e) {
mCamera.release();
mCamera = null;
}
btn_switch.setEnabled(true);
}
}
MediaScannerConnection msc = null;
ProgressDialog progressDialog = null;
public void sendVideo(View view) {
if (TextUtils.isEmpty(localPath)) {
EMLog.e("Recorder", "recorder fail please try again!");
return;
}
if(msc == null)
msc = new MediaScannerConnection(this,
new MediaScannerConnectionClient() {
@Override
public void onScanCompleted(String path, Uri uri) {
EMLog.d(TAG, "scanner completed");
msc.disconnect();
progressDialog.dismiss();
setResult(RESULT_OK, getIntent().putExtra("uri", uri));
finish();
}
@Override
public void onMediaScannerConnected() {
msc.scanFile(localPath, "video/*");
}
});
if(progressDialog == null){
progressDialog = new ProgressDialog(this);
progressDialog.setMessage("processing...");
progressDialog.setCancelable(false);
}
progressDialog.show();
msc.connect();
}
@Override
public void onInfo(MediaRecorder mr, int what, int extra) {
EMLog.v("video", "onInfo");
if (what == MediaRecorder.MEDIA_RECORDER_INFO_MAX_DURATION_REACHED) {
EMLog.v("video", "max duration reached");
stopRecording();
btn_switch.setVisibility(View.VISIBLE);
chronometer.stop();
btnStart.setVisibility(View.VISIBLE);
btnStop.setVisibility(View.INVISIBLE);
chronometer.stop();
if (localPath == null) {
return;
}
String st3 = getResources().getString(R.string.Whether_to_send);
new AlertDialog.Builder(this)
.setMessage(st3)
.setPositiveButton(R.string.ok,
new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface arg0,
int arg1) {
arg0.dismiss();
sendVideo(null);
}
}).setNegativeButton(R.string.cancel, null)
.setCancelable(false).show();
}
}
@Override
public void onError(MediaRecorder mr, int what, int extra) {
EMLog.e("video", "recording onError:");
stopRecording();
Toast.makeText(this,
"Recording error has occurred. Stopping the recording",
Toast.LENGTH_SHORT).show();
}
public void saveBitmapFile(Bitmap bitmap) {
File file = new File(Environment.getExternalStorageDirectory(), "a.jpg");
try {
BufferedOutputStream bos = new BufferedOutputStream(
new FileOutputStream(file));
bitmap.compress(Bitmap.CompressFormat.JPEG, 100, bos);
bos.flush();
bos.close();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
protected void onDestroy() {
super.onDestroy();
releaseCamera();
if (mWakeLock != null) {
mWakeLock.release();
mWakeLock = null;
}
}
@Override
public void onBackPressed() {
back(null);
}
private void showFailDialog() {
new AlertDialog.Builder(this)
.setTitle(R.string.prompt)
.setMessage(R.string.Open_the_equipment_failure)
.setPositiveButton(R.string.ok,
new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog,
int which) {
finish();
}
}).setCancelable(false).show();
}
private void showNoSDCardDialog() {
new AlertDialog.Builder(this)
.setTitle(R.string.prompt)
.setMessage("No sd card!")
.setPositiveButton(R.string.ok,
new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog,
int which) {
finish();
}
}).setCancelable(false).show();
}
}
| liyuzhao/enterpriseChat-android | src/com/easemob/chatuidemo/activity/RecorderVideoActivity.java | Java | apache-2.0 | 16,993 |
package com.github.sergejsamsonow.codegenerator.producer.pojo.renderer;
import com.github.sergejsamsonow.codegenerator.api.producer.sc.SCMethodCodeConcatenator;
import com.github.sergejsamsonow.codegenerator.api.producer.sc.SCNewLineAndIndentationFormat;
import com.github.sergejsamsonow.codegenerator.producer.pojo.model.PojoProperty;
import com.github.sergejsamsonow.codegenerator.producer.pojo.renderer.javalang.BeanModifier;
public class JavaLangToString extends BeanModifier {
public JavaLangToString(SCNewLineAndIndentationFormat format) {
super(format);
}
@Override
protected void writeBeforePropertiesIteration() {
SCMethodCodeConcatenator writer = getMethodCodeWriter();
writer.annotation("@Override");
writer.start("public String toString() {");
writer.code("StringBuilder builder = new StringBuilder();");
writer.code("builder.append(\"%s (\");", getData().getClassName());
}
@Override
protected void writePropertyCode(PojoProperty property) {
SCMethodCodeConcatenator writer = getMethodCodeWriter();
String end = isLast() ? ");" : " + \", \");";
writer.code("builder.append(\"%s: \" + Objects.toString(%s())%s",
property.getFieldName(), property.getGetterName(), end);
}
@Override
protected void writeAfterPropertiesIteration() {
SCMethodCodeConcatenator writer = getMethodCodeWriter();
writer.code("builder.append(\")\");");
writer.code("return builder.toString();");
writer.end();
writer.emptyNewLine();
}
}
| sergej-samsonow/code-generator | producer/pojo/src/main/java/com/github/sergejsamsonow/codegenerator/producer/pojo/renderer/JavaLangToString.java | Java | apache-2.0 | 1,597 |
/*
* Copyright 2010-2011 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.autoscaling.model;
/**
* <p>
* The output for the TerminateInstanceInAutoScalingGroup action.
* </p>
*/
public class TerminateInstanceInAutoScalingGroupResult {
/**
* A Scaling Activity.
*/
private Activity activity;
/**
* A Scaling Activity.
*
* @return A Scaling Activity.
*/
public Activity getActivity() {
return activity;
}
/**
* A Scaling Activity.
*
* @param activity A Scaling Activity.
*/
public void setActivity(Activity activity) {
this.activity = activity;
}
/**
* A Scaling Activity.
* <p>
* Returns a reference to this object so that method calls can be chained together.
*
* @param activity A Scaling Activity.
*
* @return A reference to this updated object so that method calls can be chained
* together.
*/
public TerminateInstanceInAutoScalingGroupResult withActivity(Activity activity) {
this.activity = activity;
return this;
}
/**
* Returns a string representation of this object; useful for testing and
* debugging.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
sb.append("Activity: " + activity + ", ");
sb.append("}");
return sb.toString();
}
}
| apetresc/aws-sdk-for-java-on-gae | src/main/java/com/amazonaws/services/autoscaling/model/TerminateInstanceInAutoScalingGroupResult.java | Java | apache-2.0 | 2,135 |
/*
* Copyright 2014 Alexey Andreev.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.teavm.classlib.java.util;
import org.teavm.classlib.java.io.TSerializable;
import org.teavm.classlib.java.lang.TMath;
import org.teavm.classlib.java.lang.TObject;
import org.teavm.javascript.spi.GeneratedBy;
/**
*
* @author Alexey Andreev
*/
public class TRandom extends TObject implements TSerializable {
public TRandom() {
}
public TRandom(@SuppressWarnings("unused") long seed) {
}
public void setSeed(@SuppressWarnings("unused") long seed) {
}
protected int next(int bits) {
return (int)(random() * (1L << TMath.min(32, bits)));
}
public void nextBytes(byte[] bytes) {
for (int i = 0; i < bytes.length; ++i) {
bytes[i] = (byte)next(8);
}
}
public int nextInt() {
return next(32);
}
public int nextInt(int n) {
return (int)(random() * n);
}
public long nextLong() {
return ((long)nextInt() << 32) | nextInt();
}
public boolean nextBoolean() {
return nextInt() % 2 == 0;
}
public float nextFloat() {
return (float)random();
}
public double nextDouble() {
return random();
}
@GeneratedBy(RandomNativeGenerator.class)
private static native double random();
}
| mpoindexter/teavm | teavm-classlib/src/main/java/org/teavm/classlib/java/util/TRandom.java | Java | apache-2.0 | 1,877 |
/* Copyright (c) The m-m-m Team, Licensed under the Apache License, Version 2.0
* http://www.apache.org/licenses/LICENSE-2.0 */
package net.sf.mmm.util.io.base;
import net.sf.mmm.util.exception.api.NlsNullPointerException;
/**
* This class is similar to {@link java.nio.ByteBuffer} but a lot simpler.
*
* @see java.nio.ByteBuffer#wrap(byte[], int, int)
*
* @author Joerg Hohwiller (hohwille at users.sourceforge.net)
* @since 1.1.0
*/
public class ByteArrayImpl extends AbstractByteArray {
private final byte[] buffer;
private int minimumIndex;
private int maximumIndex;
/**
* The constructor.
*
* @param capacity is the {@code length} of the internal {@link #getBytes() buffer}.
*/
public ByteArrayImpl(int capacity) {
this(new byte[capacity], 0, -1);
}
/**
* The constructor.
*
* @param buffer is the internal {@link #getBytes() buffer}.
*/
public ByteArrayImpl(byte[] buffer) {
this(buffer, 0, buffer.length - 1);
}
/**
* The constructor.
*
* @param buffer is the internal {@link #getBytes() buffer}.
* @param startIndex is the {@link #getCurrentIndex() current index} as well as the {@link #getMinimumIndex() minimum
* index}.
* @param maximumIndex is the {@link #getMaximumIndex() maximum index}.
*/
public ByteArrayImpl(byte[] buffer, int startIndex, int maximumIndex) {
super();
if (buffer == null) {
throw new NlsNullPointerException("buffer");
}
this.buffer = buffer;
this.minimumIndex = startIndex;
this.maximumIndex = maximumIndex;
}
@Override
public byte[] getBytes() {
return this.buffer;
}
@Override
public int getCurrentIndex() {
return this.minimumIndex;
}
@Override
public int getMinimumIndex() {
return this.minimumIndex;
}
@Override
public int getMaximumIndex() {
return this.maximumIndex;
}
/**
* This method sets the {@link #getMaximumIndex() maximumIndex}. This may be useful if the buffer should be reused.
* <br>
* <b>ATTENTION:</b><br>
* Be very careful and only use this method if you know what you are doing!
*
* @param maximumIndex is the {@link #getMaximumIndex() maximumIndex} to set. It has to be in the range from {@code 0}
* ( <code>{@link #getCurrentIndex() currentIndex} - 1</code>) to <code>{@link #getBytes()}.length</code>.
*/
protected void setMaximumIndex(int maximumIndex) {
this.maximumIndex = maximumIndex;
}
@Override
public ByteArrayImpl createSubArray(int minimum, int maximum) {
checkSubArray(minimum, maximum);
return new ByteArrayImpl(this.buffer, minimum, maximum);
}
@Override
public String toString() {
return new String(this.buffer, this.minimumIndex, getBytesAvailable());
}
}
| m-m-m/util | io/src/main/java/net/sf/mmm/util/io/base/ByteArrayImpl.java | Java | apache-2.0 | 2,897 |
/*
* Licensed to Crate under one or more contributor license agreements.
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership. Crate licenses this file
* to you under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*
* However, if you have executed another commercial license agreement
* with Crate these terms will supersede the license and you may use the
* software solely pursuant to the terms of the relevant commercial
* agreement.
*/
package io.crate.execution.engine.collect;
import io.crate.breaker.RamAccounting;
import io.crate.data.BatchIterator;
import io.crate.data.Row;
import io.crate.execution.engine.aggregation.impl.SumAggregation;
import io.crate.expression.reference.doc.lucene.BytesRefColumnReference;
import io.crate.expression.reference.doc.lucene.CollectorContext;
import io.crate.expression.reference.doc.lucene.LongColumnReference;
import io.crate.expression.reference.doc.lucene.LuceneCollectorExpression;
import io.crate.metadata.Functions;
import io.crate.metadata.Reference;
import io.crate.metadata.ReferenceIdent;
import io.crate.metadata.RelationName;
import io.crate.metadata.RowGranularity;
import io.crate.metadata.functions.Signature;
import io.crate.test.integration.CrateDummyClusterServiceUnitTest;
import io.crate.testing.TestingRowConsumer;
import io.crate.types.DataTypes;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedSetDocValuesField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.store.ByteBuffersDirectory;
import org.elasticsearch.common.lucene.BytesRefs;
import org.elasticsearch.index.mapper.NumberFieldMapper;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import static io.crate.testing.TestingHelpers.createNodeContext;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.instanceOf;
public class DocValuesGroupByOptimizedIteratorTest extends CrateDummyClusterServiceUnitTest {
private Functions functions;
private IndexSearcher indexSearcher;
private List<Object[]> rows = List.of(
new Object[]{"1", 1L, 1L},
new Object[]{"0", 0L, 2L},
new Object[]{"1", 1L, 3L},
new Object[]{"0", 0L, 4L}
);
@Before
public void setup() throws IOException {
var nodeContext = createNodeContext();
functions = nodeContext.functions();
var indexWriter = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig());
for (var row : rows) {
Document doc = new Document();
doc.add(new SortedSetDocValuesField("x", BytesRefs.toBytesRef(row[0])));
doc.add(new NumericDocValuesField("y", (Long) row[1]));
doc.add(new NumericDocValuesField("z", (Long) row[2]));
indexWriter.addDocument(doc);
}
indexWriter.commit();
indexSearcher = new IndexSearcher(DirectoryReader.open(indexWriter));
}
@Test
public void test_group_by_doc_values_optimized_iterator_for_single_numeric_key() throws Exception {
SumAggregation<?> sumAggregation = (SumAggregation<?>) functions.getQualified(
Signature.aggregate(
SumAggregation.NAME,
DataTypes.LONG.getTypeSignature(),
DataTypes.LONG.getTypeSignature()
),
List.of(DataTypes.LONG),
DataTypes.LONG
);
var aggregationField = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
aggregationField.setName("z");
var sumDocValuesAggregator = sumAggregation.getDocValueAggregator(
List.of(DataTypes.LONG),
List.of(aggregationField)
);
var keyExpressions = List.of(new LongColumnReference("y"));
var it = DocValuesGroupByOptimizedIterator.GroupByIterator.forSingleKey(
List.of(sumDocValuesAggregator),
indexSearcher,
new Reference(
new ReferenceIdent(RelationName.fromIndexName("test"), "y"),
RowGranularity.DOC,
DataTypes.LONG,
null,
null
),
keyExpressions,
RamAccounting.NO_ACCOUNTING,
new MatchAllDocsQuery(),
new CollectorContext()
);
var rowConsumer = new TestingRowConsumer();
rowConsumer.accept(it, null);
assertThat(
rowConsumer.getResult(),
containsInAnyOrder(new Object[]{0L, 6L}, new Object[]{1L, 4L}));
}
@Test
public void test_group_by_doc_values_optimized_iterator_for_many_keys() throws Exception {
SumAggregation<?> sumAggregation = (SumAggregation<?>) functions.getQualified(
Signature.aggregate(
SumAggregation.NAME,
DataTypes.LONG.getTypeSignature(),
DataTypes.LONG.getTypeSignature()
),
List.of(DataTypes.LONG),
DataTypes.LONG
);
var aggregationField = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
aggregationField.setName("z");
var sumDocValuesAggregator = sumAggregation.getDocValueAggregator(
List.of(DataTypes.LONG),
List.of(aggregationField)
);
var keyExpressions = List.of(new BytesRefColumnReference("x"), new LongColumnReference("y"));
var keyRefs = List.of(
new Reference(
new ReferenceIdent(RelationName.fromIndexName("test"), "x"),
RowGranularity.DOC,
DataTypes.STRING,
null,
null
),
new Reference(
new ReferenceIdent(RelationName.fromIndexName("test"), "y"),
RowGranularity.DOC,
DataTypes.LONG,
null,
null
)
);
var it = DocValuesGroupByOptimizedIterator.GroupByIterator.forManyKeys(
List.of(sumDocValuesAggregator),
indexSearcher,
keyRefs,
keyExpressions,
RamAccounting.NO_ACCOUNTING,
new MatchAllDocsQuery(),
new CollectorContext()
);
var rowConsumer = new TestingRowConsumer();
rowConsumer.accept(it, null);
assertThat(
rowConsumer.getResult(),
containsInAnyOrder(new Object[]{"0", 0L, 6L}, new Object[]{"1", 1L, 4L})
);
}
@Test
public void test_optimized_iterator_stop_processing_on_kill() throws Exception {
Throwable expectedException = stopOnInterrupting(it -> it.kill(new InterruptedException("killed")));
assertThat(expectedException, instanceOf(InterruptedException.class));
}
@Test
public void test_optimized_iterator_stop_processing_on_close() throws Exception {
Throwable expectedException = stopOnInterrupting(BatchIterator::close);
assertThat(expectedException, instanceOf(IllegalStateException.class));
}
private Throwable stopOnInterrupting(Consumer<BatchIterator<Row>> interrupt) throws Exception {
CountDownLatch waitForLoadNextBatch = new CountDownLatch(1);
CountDownLatch pauseOnDocumentCollecting = new CountDownLatch(1);
CountDownLatch batchLoadingCompleted = new CountDownLatch(1);
BatchIterator<Row> it = createBatchIterator(() -> {
waitForLoadNextBatch.countDown();
try {
pauseOnDocumentCollecting.await(5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
});
AtomicReference<Throwable> exception = new AtomicReference<>();
Thread t = new Thread(() -> {
try {
it.loadNextBatch().whenComplete((r, e) -> {
if (e != null) {
exception.set(e.getCause());
}
batchLoadingCompleted.countDown();
});
} catch (Exception e) {
exception.set(e);
}
});
t.start();
waitForLoadNextBatch.await(5, TimeUnit.SECONDS);
interrupt.accept(it);
pauseOnDocumentCollecting.countDown();
batchLoadingCompleted.await(5, TimeUnit.SECONDS);
return exception.get();
}
private BatchIterator<Row> createBatchIterator(Runnable onNextReader) {
return DocValuesGroupByOptimizedIterator.GroupByIterator.getIterator(
List.of(),
indexSearcher,
List.of(new LuceneCollectorExpression<>() {
@Override
public void setNextReader(LeafReaderContext context) {
onNextReader.run();
}
@Override
public Object value() {
return null;
}
}),
RamAccounting.NO_ACCOUNTING,
(states, key) -> {
},
(expressions) -> expressions.get(0).value(),
(key, cells) -> cells[0] = key,
new MatchAllDocsQuery(),
new CollectorContext()
);
}
}
| EvilMcJerkface/crate | server/src/test/java/io/crate/execution/engine/collect/DocValuesGroupByOptimizedIteratorTest.java | Java | apache-2.0 | 10,272 |
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespaclient;
public class ClusterDef {
private final String name;
public ClusterDef(String name) { this.name = name; }
public String getName() { return name; }
public String getRoute() { return "[Content:cluster=" + name + "]"; }
}
| vespa-engine/vespa | vespaclient-core/src/main/java/com/yahoo/vespaclient/ClusterDef.java | Java | apache-2.0 | 372 |
/*
* Copyright (C) 2018 the original author or authors.
*
* This file is part of jBB Application Project.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*/
package org.jbb.security.rest.oauth.client;
import io.swagger.annotations.ApiModel;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
@Getter
@Setter
@Builder
@ApiModel("OAuthClientSecret")
@NoArgsConstructor(access = AccessLevel.PUBLIC)
@AllArgsConstructor(access = AccessLevel.PRIVATE)
public class ClientSecretDto {
private String clientSecret;
}
| jbb-project/jbb | domain-rest/jbb-security-rest/src/main/java/org/jbb/security/rest/oauth/client/ClientSecretDto.java | Java | apache-2.0 | 751 |
package weixin.popular.bean.scan.crud;
import weixin.popular.bean.scan.base.ProductGet;
import weixin.popular.bean.scan.info.BrandInfo;
public class ProductCreate extends ProductGet {
private BrandInfo brand_info;
public BrandInfo getBrand_info() {
return brand_info;
}
public void setBrand_info(BrandInfo brand_info) {
this.brand_info = brand_info;
}
}
| liyiorg/weixin-popular | src/main/java/weixin/popular/bean/scan/crud/ProductCreate.java | Java | apache-2.0 | 395 |
package com.mattinsler.guiceymongo.data.query;
import org.bson.BSON;
/**
* Created by IntelliJ IDEA.
* User: mattinsler
* Date: 12/29/10
* Time: 3:28 AM
* To change this template use File | Settings | File Templates.
*/
public enum BSONType {
Double(BSON.NUMBER),
String(BSON.STRING),
Object(BSON.OBJECT),
Array(BSON.ARRAY),
BinaryData(BSON.BINARY),
ObjectId(BSON.OID),
Boolean(BSON.BOOLEAN),
Date(BSON.DATE),
Null(BSON.NULL),
RegularExpression(BSON.REGEX),
Code(BSON.CODE),
Symbol(BSON.SYMBOL),
CodeWithScope(BSON.CODE_W_SCOPE),
Integer(BSON.NUMBER_INT),
Timestamp(BSON.TIMESTAMP),
Long(BSON.NUMBER_LONG),
MinKey(BSON.MINKEY),
MaxKey(BSON.MAXKEY);
private final byte _typeCode;
BSONType(byte typeCode) {
_typeCode = typeCode;
}
byte getTypeCode() {
return _typeCode;
}
}
| mattinsler/guiceymongo | src/main/java/com/mattinsler/guiceymongo/data/query/BSONType.java | Java | apache-2.0 | 891 |
/*
* DBeaver - Universal Database Manager
* Copyright (C) 2010-2017 Serge Rider (serge@jkiss.org)
* Copyright (C) 2011-2012 Eugene Fradkin (eugene.fradkin@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jkiss.dbeaver.ext.oracle.views;
import org.eclipse.swt.SWT;
import org.eclipse.swt.layout.GridData;
import org.eclipse.swt.widgets.*;
import org.jkiss.dbeaver.core.DBeaverCore;
import org.jkiss.dbeaver.ext.oracle.model.OracleConstants;
import org.jkiss.dbeaver.model.preferences.DBPPreferenceStore;
import org.jkiss.dbeaver.model.DBPDataSourceContainer;
import org.jkiss.dbeaver.ui.UIUtils;
import org.jkiss.dbeaver.ui.preferences.PreferenceStoreDelegate;
import org.jkiss.dbeaver.ui.preferences.TargetPrefPage;
import org.jkiss.dbeaver.utils.PrefUtils;
/**
* PrefPageOracle
*/
public class PrefPageOracle extends TargetPrefPage
{
public static final String PAGE_ID = "org.jkiss.dbeaver.preferences.oracle.general"; //$NON-NLS-1$
private Text explainTableText;
private Button rowidSupportCheck;
private Button enableDbmsOuputCheck;
public PrefPageOracle()
{
super();
setPreferenceStore(new PreferenceStoreDelegate(DBeaverCore.getGlobalPreferenceStore()));
}
@Override
protected boolean hasDataSourceSpecificOptions(DBPDataSourceContainer dataSourceDescriptor)
{
DBPPreferenceStore store = dataSourceDescriptor.getPreferenceStore();
return
store.contains(OracleConstants.PREF_EXPLAIN_TABLE_NAME) ||
store.contains(OracleConstants.PREF_SUPPORT_ROWID) ||
store.contains(OracleConstants.PREF_DBMS_OUTPUT)
;
}
@Override
protected boolean supportsDataSourceSpecificOptions()
{
return true;
}
@Override
protected Control createPreferenceContent(Composite parent)
{
Composite composite = UIUtils.createPlaceholder(parent, 1);
{
Group planGroup = UIUtils.createControlGroup(composite, "Execution plan", 2, GridData.FILL_HORIZONTAL, 0);
Label descLabel = new Label(planGroup, SWT.WRAP);
descLabel.setText("By default plan table in current or SYS schema will be used.\nYou may set some particular fully qualified plan table name here.");
GridData gd = new GridData(GridData.HORIZONTAL_ALIGN_BEGINNING);
gd.horizontalSpan = 2;
descLabel.setLayoutData(gd);
explainTableText = UIUtils.createLabelText(planGroup, "Plan table", "", SWT.BORDER, new GridData(GridData.FILL_HORIZONTAL));
}
{
Group planGroup = UIUtils.createControlGroup(composite, "Misc", 2, GridData.FILL_HORIZONTAL, 0);
rowidSupportCheck = UIUtils.createLabelCheckbox(planGroup, "Use ROWID to identify rows", true);
enableDbmsOuputCheck = UIUtils.createLabelCheckbox(planGroup, "Enable DBMS Output", true);
}
return composite;
}
@Override
protected void loadPreferences(DBPPreferenceStore store)
{
explainTableText.setText(store.getString(OracleConstants.PREF_EXPLAIN_TABLE_NAME));
rowidSupportCheck.setSelection(store.getBoolean(OracleConstants.PREF_SUPPORT_ROWID));
enableDbmsOuputCheck.setSelection(store.getBoolean(OracleConstants.PREF_DBMS_OUTPUT));
}
@Override
protected void savePreferences(DBPPreferenceStore store)
{
store.setValue(OracleConstants.PREF_EXPLAIN_TABLE_NAME, explainTableText.getText());
store.setValue(OracleConstants.PREF_SUPPORT_ROWID, rowidSupportCheck.getSelection());
store.setValue(OracleConstants.PREF_DBMS_OUTPUT, enableDbmsOuputCheck.getSelection());
PrefUtils.savePreferenceStore(store);
}
@Override
protected void clearPreferences(DBPPreferenceStore store)
{
store.setToDefault(OracleConstants.PREF_EXPLAIN_TABLE_NAME);
store.setToDefault(OracleConstants.PREF_SUPPORT_ROWID);
store.setToDefault(OracleConstants.PREF_DBMS_OUTPUT);
}
@Override
protected String getPropertyPageID()
{
return PAGE_ID;
}
} | ruspl-afed/dbeaver | plugins/org.jkiss.dbeaver.ext.oracle/src/org/jkiss/dbeaver/ext/oracle/views/PrefPageOracle.java | Java | apache-2.0 | 4,753 |
package com.winsun.fruitmix.model;
/**
* Created by Administrator on 2016/7/6.
*/
public class Equipment {
private String serviceName;
private String host;
private int port;
public Equipment(String serviceName, String host, int port) {
this.serviceName = serviceName;
this.host = host;
this.port = port;
}
public Equipment() {
}
public String getServiceName() {
return serviceName;
}
public void setServiceName(String serviceName) {
this.serviceName = serviceName;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
}
| andywu91/fruitMix-android | app/src/main/java/com/winsun/fruitmix/model/Equipment.java | Java | apache-2.0 | 815 |
package com.wangshan.service.impl;
import com.wangshan.dao.UserDao;
import com.wangshan.models.User;
import com.wangshan.service.ValidateService;
import com.wangshan.utils.gabriel.EncryptUtil;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
/**
* Created by Administrator on 2015/11/15.
*/
@Service
public class ValidateServiceImpl implements ValidateService{
@Autowired
private UserDao userDao;
@Override
public Boolean validatePassword(String email, String password){
User user = userDao.getUserByEmail(email);
if(user != null && new EncryptUtil().encrypt(password + "-" + user.getSalt(), "SHA-1").equals(user.getPassword())){
return true;
} else {
return false;
}
}
@Override
public Boolean validateMobileRepeat(String mobile){
return false;
}
@Override
public Boolean validateEmailRepeat(String email){
return false;
}
}
| sanyiwangshan/my_space | backend/src/main/java/com/wangshan/service/impl/ValidateServiceImpl.java | Java | apache-2.0 | 1,047 |
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.util.prop;
import ghidra.util.*;
import java.io.*;
/**
* Handles general storage and retrieval of saveable objects indexed by long
* keys.
*
*/
public class SaveableObjectPropertySet extends PropertySet {
private final static long serialVersionUID = 1;
/**
* Constructor for SaveableObjectPropertySet.
* @param name the name associated with this property set.
*/
public SaveableObjectPropertySet(String name, Class<?> objectClass) {
super(name, objectClass);
if (!Saveable.class.isAssignableFrom(objectClass)) {
throw new IllegalArgumentException("Class "+objectClass+
"does not implement the Saveable interface");
}
try {
objectClass.newInstance();
} catch(Exception e) {
throw new IllegalArgumentException("Class "+objectClass+
"must be public and have a public, no args, constructor");
}
}
/**
* @see PropertySet#getDataSize()
*/
@Override
public int getDataSize() {
return 20;
}
/**
* Stores a saveable object at the given index. Any object currently at
* that index will be replaced by the new object.
* @param index the index at which to store the saveable object.
* @param value the saveable object to store.
*/
public void putObject(long index, Saveable value) {
PropertyPage page = getOrCreatePage(getPageID(index));
int n = page.getSize();
page.addSaveableObject(getPageOffset(index), value);
numProperties += page.getSize() - n;
}
/**
* Retrieves the saveable object stored at the given index.
* @param index the index at which to retrieve the saveable object.
* @return the saveable object stored at the given index or null if no
* object is stored at the index.
*/
public Saveable getObject(long index) {
PropertyPage page = getPage(getPageID(index));
if (page != null) {
return page.getSaveableObject(getPageOffset(index));
}
return null;
}
/* (non-Javadoc)
* @see ghidra.util.prop.PropertySet#moveIndex(long, long)
*/
@Override
protected void moveIndex(long from, long to) {
Saveable value = getObject(from);
remove(from);
putObject(to, value);
}
/**
* saves the property at the given index to the given output stream.
*/
@Override
protected void saveProperty(ObjectOutputStream oos, long index) throws IOException {
Saveable obj = getObject(index);
oos.writeObject(obj.getClass().getName());
obj.save(new ObjectStorageStreamAdapter(oos));
}
/**
* restores the property from the input stream to the given index.
*/
@Override
protected void restoreProperty(ObjectInputStream ois, long index)
throws IOException, ClassNotFoundException {
try {
String className = (String)ois.readObject();
Class<?> c = Class.forName(className);
Saveable obj = (Saveable)c.newInstance();
obj.restore(new ObjectStorageStreamAdapter(ois));
putObject(index, obj);
} catch (Exception e) {
Msg.showError(this, null, null, null, e);
}
}
/**
*
* @see ghidra.util.prop.PropertySet#applyValue(PropertyVisitor, long)
*/
@Override
public void applyValue(PropertyVisitor visitor, long addr) {
Saveable obj = getObject(addr);
if (obj != null) {
visitor.visit(obj);
}
}
}
| NationalSecurityAgency/ghidra | Ghidra/Framework/Generic/src/main/java/ghidra/util/prop/SaveableObjectPropertySet.java | Java | apache-2.0 | 3,858 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.seda;
import org.apache.camel.CamelExecutionException;
import org.apache.camel.ContextTestSupport;
import org.apache.camel.builder.RouteBuilder;
/**
* @version
*/
public class SedaInOutWithErrorDeadLetterChannelTest extends ContextTestSupport {
public void testInOutWithErrorUsingDLC() throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(0);
getMockEndpoint("mock:dead").expectedMessageCount(1);
try {
template.requestBody("direct:start", "Hello World", String.class);
fail("Should have thrown an exception");
} catch (CamelExecutionException e) {
assertIsInstanceOf(IllegalArgumentException.class, e.getCause());
assertEquals("Damn I cannot do this", e.getCause().getMessage());
}
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
errorHandler(deadLetterChannel("mock:dead").maximumRedeliveries(2).redeliveryDelay(0).handled(false));
from("direct:start").to("seda:foo");
from("seda:foo").transform(constant("Bye World"))
.throwException(new IllegalArgumentException("Damn I cannot do this"))
.to("mock:result");
}
};
}
} | everttigchelaar/camel-svn | camel-core/src/test/java/org/apache/camel/component/seda/SedaInOutWithErrorDeadLetterChannelTest.java | Java | apache-2.0 | 2,341 |
package com.asura.monitor.platform.dao;
import com.asura.framework.base.paging.PagingResult;
import com.asura.framework.base.paging.SearchMap;
import com.asura.framework.dao.mybatis.base.MybatisDaoContext;
import com.asura.framework.dao.mybatis.paginator.domain.PageBounds;
import com.asura.common.dao.BaseDao;
import com.asura.monitor.platform.entity.MonitorPlatformServerEntity;
import org.springframework.stereotype.Repository;
import javax.annotation.Resource;
/**
* <p></p>
* <p/>
* <PRE>
* <BR>
* <BR>-----------------------------------------------
* <BR>
* </PRE>
*
* @author zhaozq14
* @version 1.0
* @date 2016-11-07 11:35:05
* @since 1.0
*/
@Repository("com.asura.monitor.configure.dao.MonitorPlatformServerDao")
public class MonitorPlatformServerDao extends BaseDao<MonitorPlatformServerEntity>{
@Resource(name="monitor.MybatisDaoContext")
private MybatisDaoContext mybatisDaoContext;
/**
*
* @param searchMap
* @param pageBounds
* @return
*/
public PagingResult<MonitorPlatformServerEntity> findAll(SearchMap searchMap, PageBounds pageBounds, String sqlId){
return mybatisDaoContext.findForPage(this.getClass().getName()+"."+sqlId,MonitorPlatformServerEntity.class,searchMap,pageBounds);
}
} | AsuraTeam/monitor | server/src/main/java/com/asura/monitor/platform/dao/MonitorPlatformServerDao.java | Java | apache-2.0 | 1,279 |
/**
* Jakarta Bean Validation TCK
*
* License: Apache License, Version 2.0
* See the license.txt file in the root directory or <http://www.apache.org/licenses/LICENSE-2.0>.
*/
package org.hibernate.beanvalidation.tck.tests.constraints.constraintdefinition;
import static org.hibernate.beanvalidation.tck.util.ConstraintViolationAssert.assertNoViolations;
import static org.hibernate.beanvalidation.tck.util.ConstraintViolationAssert.assertThat;
import static org.hibernate.beanvalidation.tck.util.ConstraintViolationAssert.violationOf;
import static org.testng.Assert.assertEquals;
import java.util.Set;
import jakarta.validation.ConstraintViolation;
import jakarta.validation.Validator;
import jakarta.validation.constraints.Size;
import jakarta.validation.groups.Default;
import jakarta.validation.metadata.ConstraintDescriptor;
import org.hibernate.beanvalidation.tck.beanvalidation.Sections;
import org.hibernate.beanvalidation.tck.tests.AbstractTCKTest;
import org.hibernate.beanvalidation.tck.util.TestUtil;
import org.jboss.arquillian.container.test.api.Deployment;
import org.jboss.shrinkwrap.api.spec.WebArchive;
import org.jboss.test.audit.annotations.SpecAssertion;
import org.jboss.test.audit.annotations.SpecVersion;
import org.testng.annotations.Test;
/**
* @author Hardy Ferentschik
* @author Guillaume Smet
*/
@SpecVersion(spec = "beanvalidation", version = "3.0.0")
public class ConstraintDefinitionsTest extends AbstractTCKTest {
@Deployment
public static WebArchive createTestArchive() {
return webArchiveBuilder()
.withTestClassPackage( ConstraintDefinitionsTest.class )
.build();
}
@Test
@SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_CONSTRAINTDEFINITION_PROPERTIES, id = "a")
@SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_MULTIPLECONSTRAINTS, id = "a")
public void testConstraintWithCustomAttributes() {
Validator validator = TestUtil.getValidatorUnderTest();
Set<ConstraintDescriptor<?>> descriptors = validator.getConstraintsForClass( Person.class )
.getConstraintsForProperty( "lastName" )
.getConstraintDescriptors();
assertEquals( descriptors.size(), 2, "There should be two constraints on the lastName property." );
for ( ConstraintDescriptor<?> descriptor : descriptors ) {
assertEquals(
descriptor.getAnnotation().annotationType().getName(),
AlwaysValid.class.getName(),
"Wrong annotation type."
);
}
Set<ConstraintViolation<Person>> constraintViolations = validator.validate( new Person( "John", "Doe" ) );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( AlwaysValid.class )
);
}
@Test
@SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_MULTIPLECONSTRAINTS, id = "a")
@SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_MULTIPLECONSTRAINTS, id = "b")
public void testRepeatableConstraint() {
Validator validator = TestUtil.getValidatorUnderTest();
Set<ConstraintDescriptor<?>> descriptors = validator.getConstraintsForClass( Movie.class )
.getConstraintsForProperty( "title" )
.getConstraintDescriptors();
assertEquals( descriptors.size(), 2, "There should be two constraints on the title property." );
for ( ConstraintDescriptor<?> descriptor : descriptors ) {
assertEquals(
descriptor.getAnnotation().annotationType().getName(),
Size.class.getName(),
"Wrong annotation type."
);
}
Set<ConstraintViolation<Movie>> constraintViolations = validator.validate( new Movie( "Title" ) );
assertNoViolations( constraintViolations );
constraintViolations = validator.validate( new Movie( "A" ) );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( Size.class )
);
constraintViolations = validator.validate( new Movie( "A movie title far too long that does not respect the constraint" ) );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( Size.class )
);
}
@Test
@SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_CONSTRAINTDEFINITION_PROPERTIES_GROUPS, id = "d")
public void testDefaultGroupAssumedWhenNoGroupsSpecified() {
Validator validator = TestUtil.getValidatorUnderTest();
ConstraintDescriptor<?> descriptor = validator.getConstraintsForClass( Person.class )
.getConstraintsForProperty( "firstName" )
.getConstraintDescriptors()
.iterator()
.next();
Set<Class<?>> groups = descriptor.getGroups();
assertEquals( groups.size(), 1, "The group set should only contain one entry." );
assertEquals( groups.iterator().next(), Default.class, "The Default group should be returned." );
}
}
| beanvalidation/beanvalidation-tck | tests/src/main/java/org/hibernate/beanvalidation/tck/tests/constraints/constraintdefinition/ConstraintDefinitionsTest.java | Java | apache-2.0 | 4,672 |
/*
* Copyright (c) 2008-2017, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.map.impl;
import com.hazelcast.config.MaxSizeConfig;
import com.hazelcast.core.IFunction;
import com.hazelcast.nio.serialization.Data;
import com.hazelcast.nio.serialization.SerializableByConvention;
import com.hazelcast.spi.partition.IPartitionService;
import com.hazelcast.util.CollectionUtil;
import com.hazelcast.util.UnmodifiableIterator;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.NoSuchElementException;
import static com.hazelcast.config.MaxSizeConfig.MaxSizePolicy.PER_NODE;
import static com.hazelcast.util.MapUtil.createHashMap;
import static com.hazelcast.util.Preconditions.checkNotNull;
public final class MapKeyLoaderUtil {
private MapKeyLoaderUtil() {
}
/**
* Returns the role for the map key loader based on the passed parameters.
* The partition owner of the map name partition is the sender.
* The first replica of the map name partition is the sender backup.
* Other partition owners are receivers and other partition replicas do
* not have a role.
*
* @param isPartitionOwner if this is the partition owner
* @param isMapNamePartition if this is the partition containing the map name
* @param isMapNamePartitionFirstReplica if this is the first replica for the partition
* containing the map name
* @return the map key loader role
*/
static MapKeyLoader.Role assignRole(boolean isPartitionOwner, boolean isMapNamePartition,
boolean isMapNamePartitionFirstReplica) {
if (isMapNamePartition) {
if (isPartitionOwner) {
// map-name partition owner is the SENDER
return MapKeyLoader.Role.SENDER;
} else {
if (isMapNamePartitionFirstReplica) {
// first replica of the map-name partition is the SENDER_BACKUP
return MapKeyLoader.Role.SENDER_BACKUP;
} else {
// other replicas of the map-name partition do not have a role
return MapKeyLoader.Role.NONE;
}
}
} else {
// ordinary partition owners are RECEIVERs, otherwise no role
return isPartitionOwner ? MapKeyLoader.Role.RECEIVER : MapKeyLoader.Role.NONE;
}
}
/**
* Transforms an iterator of entries to an iterator of entry batches
* where each batch is represented as a map from entry key to
* list of entry values.
* The maximum size of the entry value list in any batch is
* determined by the {@code maxBatch} parameter. Only one
* entry value list may have the {@code maxBatch} size, other
* lists will be smaller.
*
* @param entries the entries to be batched
* @param maxBatch the maximum size of an entry group in a single batch
* @return an iterator with entry batches
*/
static Iterator<Map<Integer, List<Data>>> toBatches(final Iterator<Entry<Integer, Data>> entries,
final int maxBatch) {
return new UnmodifiableIterator<Map<Integer, List<Data>>>() {
@Override
public boolean hasNext() {
return entries.hasNext();
}
@Override
public Map<Integer, List<Data>> next() {
if (!entries.hasNext()) {
throw new NoSuchElementException();
}
return nextBatch(entries, maxBatch);
}
};
}
/**
* Groups entries by the entry key. The entries will be grouped
* until at least one group has up to {@code maxBatch}
* entries or until the {@code entries} have been exhausted.
*
* @param entries the entries to be grouped by key
* @param maxBatch the maximum size of a group
* @return the grouped entries by entry key
*/
private static Map<Integer, List<Data>> nextBatch(Iterator<Entry<Integer, Data>> entries, int maxBatch) {
Map<Integer, List<Data>> batch = createHashMap(maxBatch);
while (entries.hasNext()) {
Entry<Integer, Data> e = entries.next();
List<Data> partitionKeys = CollectionUtil.addToValueList(batch, e.getKey(), e.getValue());
if (partitionKeys.size() >= maxBatch) {
break;
}
}
return batch;
}
/**
* Returns the configured maximum entry count per node if the max
* size policy is {@link MaxSizeConfig.MaxSizePolicy#PER_NODE}
* and is not the default, otherwise returns {@code -1}.
*
* @param maxSizeConfig the max size configuration
* @return the max size per node or {@code -1} if not configured or is the default
* @see MaxSizeConfig#getMaxSizePolicy()
* @see MaxSizeConfig#getSize()
*/
public static int getMaxSizePerNode(MaxSizeConfig maxSizeConfig) {
// max size or -1 if policy is different or not set
double maxSizePerNode = maxSizeConfig.getMaxSizePolicy() == PER_NODE ? maxSizeConfig.getSize() : -1D;
if (maxSizePerNode == MaxSizeConfig.DEFAULT_MAX_SIZE) {
// unlimited
return -1;
}
return (int) maxSizePerNode;
}
/**
* Returns a {@link IFunction} that transforms a {@link Data}
* parameter to an map entry where the key is the partition ID
* and the value is the provided parameter.
*
* @param partitionService the partition service
*/
static IFunction<Data, Entry<Integer, Data>> toPartition(final IPartitionService partitionService) {
return new DataToEntry(partitionService);
}
@SerializableByConvention
private static class DataToEntry implements IFunction<Data, Entry<Integer, Data>> {
private final IPartitionService partitionService;
public DataToEntry(IPartitionService partitionService) {
this.partitionService = partitionService;
}
@Override
public Entry<Integer, Data> apply(Data input) {
// Null-pointer here, in case of null key loaded by MapLoader
checkNotNull(input, "Key loaded by a MapLoader cannot be null.");
Integer partition = partitionService.getPartitionId(input);
return new MapEntrySimple<Integer, Data>(partition, input);
}
}
}
| dbrimley/hazelcast | hazelcast/src/main/java/com/hazelcast/map/impl/MapKeyLoaderUtil.java | Java | apache-2.0 | 7,145 |
package org.targettest.org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.targettest.org.apache.lucene.document.Document;
import org.targettest.org.apache.lucene.document.FieldSelector;
import org.targettest.org.apache.lucene.index.DirectoryReader.MultiTermDocs;
import org.targettest.org.apache.lucene.index.DirectoryReader.MultiTermEnum;
import org.targettest.org.apache.lucene.index.DirectoryReader.MultiTermPositions;
import org.targettest.org.apache.lucene.search.DefaultSimilarity;
import org.targettest.org.apache.lucene.search.FieldCache;
/** An IndexReader which reads multiple indexes, appending
* their content. */
public class MultiReader extends IndexReader implements Cloneable {
protected IndexReader[] subReaders;
private int[] starts; // 1st docno for each segment
private boolean[] decrefOnClose; // remember which subreaders to decRef on close
private Map<String,byte[]> normsCache = new HashMap<String,byte[]>();
private int maxDoc = 0;
private int numDocs = -1;
private boolean hasDeletions = false;
/**
* <p>Construct a MultiReader aggregating the named set of (sub)readers.
* Directory locking for delete, undeleteAll, and setNorm operations is
* left to the subreaders. </p>
* <p>Note that all subreaders are closed if this Multireader is closed.</p>
* @param subReaders set of (sub)readers
* @throws IOException
*/
public MultiReader(IndexReader... subReaders) {
initialize(subReaders, true);
}
/**
* <p>Construct a MultiReader aggregating the named set of (sub)readers.
* Directory locking for delete, undeleteAll, and setNorm operations is
* left to the subreaders. </p>
* @param closeSubReaders indicates whether the subreaders should be closed
* when this MultiReader is closed
* @param subReaders set of (sub)readers
* @throws IOException
*/
public MultiReader(IndexReader[] subReaders, boolean closeSubReaders) {
initialize(subReaders, closeSubReaders);
}
private void initialize(IndexReader[] subReaders, boolean closeSubReaders) {
this.subReaders = subReaders.clone();
starts = new int[subReaders.length + 1]; // build starts array
decrefOnClose = new boolean[subReaders.length];
for (int i = 0; i < subReaders.length; i++) {
starts[i] = maxDoc;
maxDoc += subReaders[i].maxDoc(); // compute maxDocs
if (!closeSubReaders) {
subReaders[i].incRef();
decrefOnClose[i] = true;
} else {
decrefOnClose[i] = false;
}
if (subReaders[i].hasDeletions())
hasDeletions = true;
}
starts[subReaders.length] = maxDoc;
}
/**
* Tries to reopen the subreaders.
* <br>
* If one or more subreaders could be re-opened (i. e. subReader.reopen()
* returned a new instance != subReader), then a new MultiReader instance
* is returned, otherwise this instance is returned.
* <p>
* A re-opened instance might share one or more subreaders with the old
* instance. Index modification operations result in undefined behavior
* when performed before the old instance is closed.
* (see {@link IndexReader#reopen()}).
* <p>
* If subreaders are shared, then the reference count of those
* readers is increased to ensure that the subreaders remain open
* until the last referring reader is closed.
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
@Override
public synchronized IndexReader reopen() throws CorruptIndexException, IOException {
return doReopen(false);
}
/**
* Clones the subreaders.
* (see {@link IndexReader#clone()}).
* <br>
* <p>
* If subreaders are shared, then the reference count of those
* readers is increased to ensure that the subreaders remain open
* until the last referring reader is closed.
*/
@Override
public synchronized Object clone() {
try {
return doReopen(true);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
/**
* If clone is true then we clone each of the subreaders
* @param doClone
* @return New IndexReader, or same one (this) if
* reopen/clone is not necessary
* @throws CorruptIndexException
* @throws IOException
*/
protected IndexReader doReopen(boolean doClone) throws CorruptIndexException, IOException {
ensureOpen();
boolean reopened = false;
IndexReader[] newSubReaders = new IndexReader[subReaders.length];
boolean success = false;
try {
for (int i = 0; i < subReaders.length; i++) {
if (doClone)
newSubReaders[i] = (IndexReader) subReaders[i].clone();
else
newSubReaders[i] = subReaders[i].reopen();
// if at least one of the subreaders was updated we remember that
// and return a new MultiReader
if (newSubReaders[i] != subReaders[i]) {
reopened = true;
}
}
success = true;
} finally {
if (!success && reopened) {
for (int i = 0; i < newSubReaders.length; i++) {
if (newSubReaders[i] != subReaders[i]) {
try {
newSubReaders[i].close();
} catch (IOException ignore) {
// keep going - we want to clean up as much as possible
}
}
}
}
}
if (reopened) {
boolean[] newDecrefOnClose = new boolean[subReaders.length];
for (int i = 0; i < subReaders.length; i++) {
if (newSubReaders[i] == subReaders[i]) {
newSubReaders[i].incRef();
newDecrefOnClose[i] = true;
}
}
MultiReader mr = new MultiReader(newSubReaders);
mr.decrefOnClose = newDecrefOnClose;
return mr;
} else {
return this;
}
}
@Override
public TermFreqVector[] getTermFreqVectors(int n) throws IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVectors(n - starts[i]); // dispatch to segment
}
@Override
public TermFreqVector getTermFreqVector(int n, String field)
throws IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVector(n - starts[i], field);
}
@Override
public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException {
ensureOpen();
int i = readerIndex(docNumber); // find segment num
subReaders[i].getTermFreqVector(docNumber - starts[i], field, mapper);
}
@Override
public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException {
ensureOpen();
int i = readerIndex(docNumber); // find segment num
subReaders[i].getTermFreqVector(docNumber - starts[i], mapper);
}
@Override
public boolean isOptimized() {
return false;
}
@Override
public int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
// NOTE: multiple threads may wind up init'ing
// numDocs... but that's harmless
if (numDocs == -1) { // check cache
int n = 0; // cache miss--recompute
for (int i = 0; i < subReaders.length; i++)
n += subReaders[i].numDocs(); // sum from readers
numDocs = n;
}
return numDocs;
}
@Override
public int maxDoc() {
// Don't call ensureOpen() here (it could affect performance)
return maxDoc;
}
// inherit javadoc
@Override
public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].document(n - starts[i], fieldSelector); // dispatch to segment reader
}
@Override
public boolean isDeleted(int n) {
// Don't call ensureOpen() here (it could affect performance)
int i = readerIndex(n); // find segment num
return subReaders[i].isDeleted(n - starts[i]); // dispatch to segment reader
}
@Override
public boolean hasDeletions() {
// Don't call ensureOpen() here (it could affect performance)
return hasDeletions;
}
@Override
protected void doDelete(int n) throws CorruptIndexException, IOException {
numDocs = -1; // invalidate cache
int i = readerIndex(n); // find segment num
subReaders[i].deleteDocument(n - starts[i]); // dispatch to segment reader
hasDeletions = true;
}
@Override
protected void doUndeleteAll() throws CorruptIndexException, IOException {
for (int i = 0; i < subReaders.length; i++)
subReaders[i].undeleteAll();
hasDeletions = false;
numDocs = -1; // invalidate cache
}
private int readerIndex(int n) { // find reader for doc n:
return DirectoryReader.readerIndex(n, this.starts, this.subReaders.length);
}
@Override
public boolean hasNorms(String field) throws IOException {
ensureOpen();
for (int i = 0; i < subReaders.length; i++) {
if (subReaders[i].hasNorms(field)) return true;
}
return false;
}
@Override
public synchronized byte[] norms(String field) throws IOException {
ensureOpen();
byte[] bytes = normsCache.get(field);
if (bytes != null)
return bytes; // cache hit
if (!hasNorms(field))
return null;
bytes = new byte[maxDoc()];
for (int i = 0; i < subReaders.length; i++)
subReaders[i].norms(field, bytes, starts[i]);
normsCache.put(field, bytes); // update cache
return bytes;
}
@Override
public synchronized void norms(String field, byte[] result, int offset)
throws IOException {
ensureOpen();
byte[] bytes = normsCache.get(field);
for (int i = 0; i < subReaders.length; i++) // read from segments
subReaders[i].norms(field, result, offset + starts[i]);
if (bytes==null && !hasNorms(field)) {
Arrays.fill(result, offset, result.length, DefaultSimilarity.encodeNorm(1.0f));
} else if (bytes != null) { // cache hit
System.arraycopy(bytes, 0, result, offset, maxDoc());
} else {
for (int i = 0; i < subReaders.length; i++) { // read from segments
subReaders[i].norms(field, result, offset + starts[i]);
}
}
}
@Override
protected void doSetNorm(int n, String field, byte value)
throws CorruptIndexException, IOException {
synchronized (normsCache) {
normsCache.remove(field); // clear cache
}
int i = readerIndex(n); // find segment num
subReaders[i].setNorm(n-starts[i], field, value); // dispatch
}
@Override
public TermEnum terms() throws IOException {
ensureOpen();
return new MultiTermEnum(this, subReaders, starts, null);
}
@Override
public TermEnum terms(Term term) throws IOException {
ensureOpen();
return new MultiTermEnum(this, subReaders, starts, term);
}
@Override
public int docFreq(Term t) throws IOException {
ensureOpen();
int total = 0; // sum freqs in segments
for (int i = 0; i < subReaders.length; i++)
total += subReaders[i].docFreq(t);
return total;
}
@Override
public TermDocs termDocs() throws IOException {
ensureOpen();
return new MultiTermDocs(this, subReaders, starts);
}
@Override
public TermPositions termPositions() throws IOException {
ensureOpen();
return new MultiTermPositions(this, subReaders, starts);
}
@Override
protected void doCommit(Map<String,String> commitUserData) throws IOException {
for (int i = 0; i < subReaders.length; i++)
subReaders[i].commit(commitUserData);
}
@Override
protected synchronized void doClose() throws IOException {
for (int i = 0; i < subReaders.length; i++) {
if (decrefOnClose[i]) {
subReaders[i].decRef();
} else {
subReaders[i].close();
}
}
// NOTE: only needed in case someone had asked for
// FieldCache for top-level reader (which is generally
// not a good idea):
FieldCache.DEFAULT.purge(this);
}
@Override
public Collection<String> getFieldNames (IndexReader.FieldOption fieldNames) {
ensureOpen();
return DirectoryReader.getFieldNames(fieldNames, this.subReaders);
}
/**
* Checks recursively if all subreaders are up to date.
*/
@Override
public boolean isCurrent() throws CorruptIndexException, IOException {
for (int i = 0; i < subReaders.length; i++) {
if (!subReaders[i].isCurrent()) {
return false;
}
}
// all subreaders are up to date
return true;
}
/** Not implemented.
* @throws UnsupportedOperationException
*/
@Override
public long getVersion() {
throw new UnsupportedOperationException("MultiReader does not support this method.");
}
@Override
public IndexReader[] getSequentialSubReaders() {
return subReaders;
}
}
| chrishumphreys/provocateur | provocateur-thirdparty/src/main/java/org/targettest/org/apache/lucene/index/MultiReader.java | Java | apache-2.0 | 14,073 |
/*
* Copyright Strimzi authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.strimzi.systemtest.kafka;
import io.fabric8.kubernetes.api.model.ConfigMap;
import io.fabric8.kubernetes.api.model.HasMetadata;
import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
import io.fabric8.kubernetes.api.model.Pod;
import io.fabric8.kubernetes.api.model.Quantity;
import io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder;
import io.fabric8.kubernetes.api.model.Secret;
import io.fabric8.kubernetes.api.model.SecurityContextBuilder;
import io.fabric8.kubernetes.api.model.Service;
import io.fabric8.kubernetes.api.model.apps.StatefulSet;
import io.fabric8.kubernetes.client.dsl.base.CustomResourceDefinitionContext;
import io.strimzi.api.kafka.Crds;
import io.strimzi.api.kafka.KafkaTopicList;
import io.strimzi.api.kafka.model.EntityOperatorSpec;
import io.strimzi.api.kafka.model.EntityTopicOperatorSpec;
import io.strimzi.api.kafka.model.EntityUserOperatorSpec;
import io.strimzi.api.kafka.model.Kafka;
import io.strimzi.api.kafka.model.KafkaClusterSpec;
import io.strimzi.api.kafka.model.KafkaResources;
import io.strimzi.api.kafka.model.KafkaTopic;
import io.strimzi.api.kafka.model.SystemProperty;
import io.strimzi.api.kafka.model.SystemPropertyBuilder;
import io.strimzi.api.kafka.model.ZookeeperClusterSpec;
import io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener;
import io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder;
import io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType;
import io.strimzi.api.kafka.model.storage.JbodStorage;
import io.strimzi.api.kafka.model.storage.JbodStorageBuilder;
import io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder;
import io.strimzi.operator.common.model.Labels;
import io.strimzi.systemtest.AbstractST;
import io.strimzi.systemtest.Constants;
import io.strimzi.systemtest.Environment;
import io.strimzi.systemtest.resources.operator.SetupClusterOperator;
import io.strimzi.systemtest.annotations.OpenShiftOnly;
import io.strimzi.systemtest.annotations.ParallelNamespaceTest;
import io.strimzi.systemtest.cli.KafkaCmdClient;
import io.strimzi.systemtest.kafkaclients.internalClients.InternalKafkaClient;
import io.strimzi.systemtest.resources.ResourceOperation;
import io.strimzi.systemtest.resources.crd.KafkaResource;
import io.strimzi.systemtest.resources.crd.KafkaTopicResource;
import io.strimzi.systemtest.templates.crd.KafkaClientsTemplates;
import io.strimzi.systemtest.templates.crd.KafkaTemplates;
import io.strimzi.systemtest.templates.crd.KafkaTopicTemplates;
import io.strimzi.systemtest.templates.crd.KafkaUserTemplates;
import io.strimzi.systemtest.utils.StUtils;
import io.strimzi.systemtest.utils.kafkaUtils.KafkaTopicUtils;
import io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils;
import io.strimzi.systemtest.utils.kubeUtils.controllers.ConfigMapUtils;
import io.strimzi.systemtest.utils.kubeUtils.controllers.DeploymentUtils;
import io.strimzi.systemtest.utils.kubeUtils.controllers.StatefulSetUtils;
import io.strimzi.systemtest.utils.kubeUtils.objects.PersistentVolumeClaimUtils;
import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils;
import io.strimzi.systemtest.utils.kubeUtils.objects.ServiceUtils;
import io.strimzi.test.TestUtils;
import io.strimzi.test.executor.ExecResult;
import io.strimzi.test.timemeasuring.Operation;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.hamcrest.CoreMatchers;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.extension.ExtensionContext;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.stream.Collectors;
import static io.strimzi.api.kafka.model.KafkaResources.kafkaStatefulSetName;
import static io.strimzi.api.kafka.model.KafkaResources.zookeeperStatefulSetName;
import static io.strimzi.systemtest.Constants.CRUISE_CONTROL;
import static io.strimzi.systemtest.Constants.INTERNAL_CLIENTS_USED;
import static io.strimzi.systemtest.Constants.LOADBALANCER_SUPPORTED;
import static io.strimzi.systemtest.Constants.REGRESSION;
import static io.strimzi.systemtest.Constants.STATEFUL_SET;
import static io.strimzi.systemtest.utils.StUtils.configMap2Properties;
import static io.strimzi.systemtest.utils.StUtils.stringToProperties;
import static io.strimzi.test.TestUtils.fromYamlString;
import static io.strimzi.test.TestUtils.map;
import static io.strimzi.test.k8s.KubeClusterResource.cmdKubeClient;
import static io.strimzi.test.k8s.KubeClusterResource.kubeClient;
import static java.util.Arrays.asList;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.emptyOrNullString;
import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.hasItems;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue;
import static org.junit.jupiter.api.Assumptions.assumeFalse;
@Tag(REGRESSION)
@SuppressWarnings("checkstyle:ClassFanOutComplexity")
class KafkaST extends AbstractST {
private static final Logger LOGGER = LogManager.getLogger(KafkaST.class);
private static final String TEMPLATE_PATH = TestUtils.USER_PATH + "/../packaging/examples/templates/cluster-operator";
public static final String NAMESPACE = "kafka-cluster-test";
private static final String OPENSHIFT_CLUSTER_NAME = "openshift-my-cluster";
@ParallelNamespaceTest
@OpenShiftOnly
void testDeployKafkaClusterViaTemplate(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
cluster.createCustomResources(extensionContext, TEMPLATE_PATH);
String templateName = "strimzi-ephemeral";
cmdKubeClient(namespaceName).createResourceAndApply(templateName, map("CLUSTER_NAME", OPENSHIFT_CLUSTER_NAME));
StatefulSetUtils.waitForAllStatefulSetPodsReady(namespaceName, KafkaResources.zookeeperStatefulSetName(OPENSHIFT_CLUSTER_NAME), 3, ResourceOperation.getTimeoutForResourceReadiness(STATEFUL_SET));
StatefulSetUtils.waitForAllStatefulSetPodsReady(namespaceName, KafkaResources.kafkaStatefulSetName(OPENSHIFT_CLUSTER_NAME), 3, ResourceOperation.getTimeoutForResourceReadiness(STATEFUL_SET));
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(OPENSHIFT_CLUSTER_NAME), 1);
//Testing docker images
testDockerImagesForKafkaCluster(OPENSHIFT_CLUSTER_NAME, NAMESPACE, namespaceName, 3, 3, false);
//Testing labels
verifyLabelsForKafkaCluster(NAMESPACE, namespaceName, OPENSHIFT_CLUSTER_NAME, templateName);
LOGGER.info("Deleting Kafka cluster {} after test", OPENSHIFT_CLUSTER_NAME);
cmdKubeClient(namespaceName).deleteByName("Kafka", OPENSHIFT_CLUSTER_NAME);
//Wait for kafka deletion
cmdKubeClient(namespaceName).waitForResourceDeletion(Kafka.RESOURCE_KIND, OPENSHIFT_CLUSTER_NAME);
kubeClient(namespaceName).listPods(namespaceName).stream()
.filter(p -> p.getMetadata().getName().startsWith(OPENSHIFT_CLUSTER_NAME))
.forEach(p -> PodUtils.deletePodWithWait(p.getMetadata().getName()));
StatefulSetUtils.waitForStatefulSetDeletion(namespaceName, KafkaResources.kafkaStatefulSetName(OPENSHIFT_CLUSTER_NAME));
StatefulSetUtils.waitForStatefulSetDeletion(namespaceName, KafkaResources.zookeeperStatefulSetName(OPENSHIFT_CLUSTER_NAME));
DeploymentUtils.waitForDeploymentDeletion(namespaceName, KafkaResources.entityOperatorDeploymentName(OPENSHIFT_CLUSTER_NAME));
}
@ParallelNamespaceTest
void testEODeletion(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
// Get pod name to check termination process
Pod pod = kubeClient(namespaceName).listPods(namespaceName).stream()
.filter(p -> p.getMetadata().getName().startsWith(KafkaResources.entityOperatorDeploymentName(clusterName)))
.findAny()
.orElseThrow();
assertThat("Entity operator pod does not exist", pod, notNullValue());
LOGGER.info("Setting entity operator to null");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> kafka.getSpec().setEntityOperator(null), namespaceName);
// Wait when EO(UO + TO) will be removed
DeploymentUtils.waitForDeploymentDeletion(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName));
PodUtils.deletePodWithWait(namespaceName, pod.getMetadata().getName());
LOGGER.info("Entity operator was deleted");
}
@ParallelNamespaceTest
@SuppressWarnings({"checkstyle:MethodLength", "checkstyle:JavaNCSS"})
void testCustomAndUpdatedValues(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LinkedHashMap<String, String> envVarGeneral = new LinkedHashMap<>();
envVarGeneral.put("TEST_ENV_1", "test.env.one");
envVarGeneral.put("TEST_ENV_2", "test.env.two");
LinkedHashMap<String, String> envVarUpdated = new LinkedHashMap<>();
envVarUpdated.put("TEST_ENV_2", "updated.test.env.two");
envVarUpdated.put("TEST_ENV_3", "test.env.three");
// Kafka Broker config
Map<String, Object> kafkaConfig = new HashMap<>();
kafkaConfig.put("offsets.topic.replication.factor", "1");
kafkaConfig.put("transaction.state.log.replication.factor", "1");
kafkaConfig.put("default.replication.factor", "1");
Map<String, Object> updatedKafkaConfig = new HashMap<>();
updatedKafkaConfig.put("offsets.topic.replication.factor", "2");
updatedKafkaConfig.put("transaction.state.log.replication.factor", "2");
updatedKafkaConfig.put("default.replication.factor", "2");
// Zookeeper Config
Map<String, Object> zookeeperConfig = new HashMap<>();
zookeeperConfig.put("tickTime", "2000");
zookeeperConfig.put("initLimit", "5");
zookeeperConfig.put("syncLimit", "2");
zookeeperConfig.put("autopurge.purgeInterval", "1");
Map<String, Object> updatedZookeeperConfig = new HashMap<>();
updatedZookeeperConfig.put("tickTime", "2500");
updatedZookeeperConfig.put("initLimit", "3");
updatedZookeeperConfig.put("syncLimit", "5");
final int initialDelaySeconds = 30;
final int timeoutSeconds = 10;
final int updatedInitialDelaySeconds = 31;
final int updatedTimeoutSeconds = 11;
final int periodSeconds = 10;
final int successThreshold = 1;
final int failureThreshold = 3;
final int updatedPeriodSeconds = 5;
final int updatedFailureThreshold = 1;
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 2)
.editSpec()
.editKafka()
.withNewReadinessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endReadinessProbe()
.withNewLivenessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endLivenessProbe()
.withConfig(kafkaConfig)
.withNewTemplate()
.withNewKafkaContainer()
.withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
.endKafkaContainer()
.endTemplate()
.endKafka()
.editZookeeper()
.withReplicas(2)
.withNewReadinessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.endReadinessProbe()
.withNewLivenessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.endLivenessProbe()
.withConfig(zookeeperConfig)
.withNewTemplate()
.withNewZookeeperContainer()
.withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
.endZookeeperContainer()
.endTemplate()
.endZookeeper()
.editEntityOperator()
.withNewTemplate()
.withNewTopicOperatorContainer()
.withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
.endTopicOperatorContainer()
.withNewUserOperatorContainer()
.withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
.endUserOperatorContainer()
.withNewTlsSidecarContainer()
.withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
.endTlsSidecarContainer()
.endTemplate()
.editUserOperator()
.withNewReadinessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endReadinessProbe()
.withNewLivenessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endLivenessProbe()
.endUserOperator()
.editTopicOperator()
.withNewReadinessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endReadinessProbe()
.withNewLivenessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endLivenessProbe()
.endTopicOperator()
.withNewTlsSidecar()
.withNewReadinessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endReadinessProbe()
.withNewLivenessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endLivenessProbe()
.endTlsSidecar()
.endEntityOperator()
.endSpec()
.build());
final Map<String, String> kafkaSnapshot = StatefulSetUtils.ssSnapshot(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
final Map<String, String> zkSnapshot = StatefulSetUtils.ssSnapshot(namespaceName, KafkaResources.zookeeperStatefulSetName(clusterName));
final Map<String, String> eoPod = DeploymentUtils.depSnapshot(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName));
LOGGER.info("Verify values before update");
checkReadinessLivenessProbe(namespaceName, kafkaStatefulSetName(clusterName), "kafka", initialDelaySeconds, timeoutSeconds,
periodSeconds, successThreshold, failureThreshold);
checkKafkaConfiguration(namespaceName, kafkaStatefulSetName(clusterName), kafkaConfig, clusterName);
checkSpecificVariablesInContainer(namespaceName, kafkaStatefulSetName(clusterName), "kafka", envVarGeneral);
String kafkaConfiguration = kubeClient().getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName)).getData().get("server.config");
assertThat(kafkaConfiguration, containsString("offsets.topic.replication.factor=1"));
assertThat(kafkaConfiguration, containsString("transaction.state.log.replication.factor=1"));
assertThat(kafkaConfiguration, containsString("default.replication.factor=1"));
String kafkaConfigurationFromPod = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "cat", "/tmp/strimzi.properties").out();
assertThat(kafkaConfigurationFromPod, containsString("offsets.topic.replication.factor=1"));
assertThat(kafkaConfigurationFromPod, containsString("transaction.state.log.replication.factor=1"));
assertThat(kafkaConfigurationFromPod, containsString("default.replication.factor=1"));
LOGGER.info("Testing Zookeepers");
checkReadinessLivenessProbe(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", initialDelaySeconds, timeoutSeconds,
periodSeconds, successThreshold, failureThreshold);
checkComponentConfiguration(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", "ZOOKEEPER_CONFIGURATION", zookeeperConfig);
checkSpecificVariablesInContainer(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", envVarGeneral);
LOGGER.info("Checking configuration of TO and UO");
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", initialDelaySeconds, timeoutSeconds,
periodSeconds, successThreshold, failureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", envVarGeneral);
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", initialDelaySeconds, timeoutSeconds,
periodSeconds, successThreshold, failureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", envVarGeneral);
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", initialDelaySeconds, timeoutSeconds,
periodSeconds, successThreshold, failureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", envVarGeneral);
LOGGER.info("Updating configuration of Kafka cluster");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
KafkaClusterSpec kafkaClusterSpec = k.getSpec().getKafka();
kafkaClusterSpec.getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
kafkaClusterSpec.getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
kafkaClusterSpec.getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
kafkaClusterSpec.getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
kafkaClusterSpec.getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
kafkaClusterSpec.getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
kafkaClusterSpec.getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
kafkaClusterSpec.getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
kafkaClusterSpec.setConfig(updatedKafkaConfig);
kafkaClusterSpec.getTemplate().getKafkaContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
ZookeeperClusterSpec zookeeperClusterSpec = k.getSpec().getZookeeper();
zookeeperClusterSpec.getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
zookeeperClusterSpec.getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
zookeeperClusterSpec.getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
zookeeperClusterSpec.getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
zookeeperClusterSpec.getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
zookeeperClusterSpec.getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
zookeeperClusterSpec.getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
zookeeperClusterSpec.getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
zookeeperClusterSpec.setConfig(updatedZookeeperConfig);
zookeeperClusterSpec.getTemplate().getZookeeperContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
// Configuring TO and UO to use new values for InitialDelaySeconds and TimeoutSeconds
EntityOperatorSpec entityOperatorSpec = k.getSpec().getEntityOperator();
entityOperatorSpec.getTopicOperator().getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getTopicOperator().getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getTopicOperator().getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getTopicOperator().getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getTopicOperator().getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getTopicOperator().getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getTopicOperator().getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getTopicOperator().getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getUserOperator().getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getUserOperator().getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getUserOperator().getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getUserOperator().getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getUserOperator().getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getUserOperator().getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getUserOperator().getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getUserOperator().getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getTlsSidecar().getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getTlsSidecar().getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getTlsSidecar().getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getTlsSidecar().getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getTlsSidecar().getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getTlsSidecar().getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getTlsSidecar().getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getTlsSidecar().getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getTemplate().getTopicOperatorContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
entityOperatorSpec.getTemplate().getUserOperatorContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
entityOperatorSpec.getTemplate().getTlsSidecarContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
}, namespaceName);
StatefulSetUtils.waitTillSsHasRolled(namespaceName, KafkaResources.zookeeperStatefulSetName(clusterName), 2, zkSnapshot);
StatefulSetUtils.waitTillSsHasRolled(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName), 2, kafkaSnapshot);
DeploymentUtils.waitTillDepHasRolled(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoPod);
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
LOGGER.info("Verify values after update");
checkReadinessLivenessProbe(namespaceName, kafkaStatefulSetName(clusterName), "kafka", updatedInitialDelaySeconds, updatedTimeoutSeconds,
updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkKafkaConfiguration(namespaceName, kafkaStatefulSetName(clusterName), updatedKafkaConfig, clusterName);
checkSpecificVariablesInContainer(namespaceName, kafkaStatefulSetName(clusterName), "kafka", envVarUpdated);
kafkaConfiguration = kubeClient(namespaceName).getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName)).getData().get("server.config");
assertThat(kafkaConfiguration, containsString("offsets.topic.replication.factor=2"));
assertThat(kafkaConfiguration, containsString("transaction.state.log.replication.factor=2"));
assertThat(kafkaConfiguration, containsString("default.replication.factor=2"));
kafkaConfigurationFromPod = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "cat", "/tmp/strimzi.properties").out();
assertThat(kafkaConfigurationFromPod, containsString("offsets.topic.replication.factor=2"));
assertThat(kafkaConfigurationFromPod, containsString("transaction.state.log.replication.factor=2"));
assertThat(kafkaConfigurationFromPod, containsString("default.replication.factor=2"));
LOGGER.info("Testing Zookeepers");
checkReadinessLivenessProbe(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", updatedInitialDelaySeconds, updatedTimeoutSeconds,
updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkComponentConfiguration(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", "ZOOKEEPER_CONFIGURATION", updatedZookeeperConfig);
checkSpecificVariablesInContainer(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", envVarUpdated);
LOGGER.info("Getting entity operator to check configuration of TO and UO");
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", updatedInitialDelaySeconds, updatedTimeoutSeconds,
updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", envVarUpdated);
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", updatedInitialDelaySeconds, updatedTimeoutSeconds,
updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", envVarUpdated);
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", updatedInitialDelaySeconds, updatedTimeoutSeconds,
updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", envVarUpdated);
}
@ParallelNamespaceTest
void testJvmAndResources(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
ArrayList<SystemProperty> javaSystemProps = new ArrayList<>();
javaSystemProps.add(new SystemPropertyBuilder().withName("javax.net.debug")
.withValue("verbose").build());
Map<String, String> jvmOptionsXX = new HashMap<>();
jvmOptionsXX.put("UseG1GC", "true");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1)
.editSpec()
.editKafka()
.withResources(new ResourceRequirementsBuilder()
.addToLimits("memory", new Quantity("1.5Gi"))
.addToLimits("cpu", new Quantity("1"))
.addToRequests("memory", new Quantity("1Gi"))
.addToRequests("cpu", new Quantity("50m"))
.build())
.withNewJvmOptions()
.withXmx("1g")
.withXms("512m")
.withXx(jvmOptionsXX)
.endJvmOptions()
.endKafka()
.editZookeeper()
.withResources(
new ResourceRequirementsBuilder()
.addToLimits("memory", new Quantity("1G"))
.addToLimits("cpu", new Quantity("0.5"))
.addToRequests("memory", new Quantity("0.5G"))
.addToRequests("cpu", new Quantity("25m"))
.build())
.withNewJvmOptions()
.withXmx("1G")
.withXms("512M")
.withXx(jvmOptionsXX)
.endJvmOptions()
.endZookeeper()
.withNewEntityOperator()
.withNewTopicOperator()
.withResources(
new ResourceRequirementsBuilder()
.addToLimits("memory", new Quantity("1024Mi"))
.addToLimits("cpu", new Quantity("500m"))
.addToRequests("memory", new Quantity("384Mi"))
.addToRequests("cpu", new Quantity("0.025"))
.build())
.withNewJvmOptions()
.withXmx("2G")
.withXms("1024M")
.withJavaSystemProperties(javaSystemProps)
.endJvmOptions()
.endTopicOperator()
.withNewUserOperator()
.withResources(
new ResourceRequirementsBuilder()
.addToLimits("memory", new Quantity("512M"))
.addToLimits("cpu", new Quantity("300m"))
.addToRequests("memory", new Quantity("256M"))
.addToRequests("cpu", new Quantity("30m"))
.build())
.withNewJvmOptions()
.withXmx("1G")
.withXms("512M")
.withJavaSystemProperties(javaSystemProps)
.endJvmOptions()
.endUserOperator()
.endEntityOperator()
.endSpec()
.build());
// Make snapshots for Kafka cluster to meke sure that there is no rolling update after CO reconciliation
final String zkStsName = KafkaResources.zookeeperStatefulSetName(clusterName);
final String kafkaStsName = kafkaStatefulSetName(clusterName);
final String eoDepName = KafkaResources.entityOperatorDeploymentName(clusterName);
final Map<String, String> zkPods = StatefulSetUtils.ssSnapshot(namespaceName, zkStsName);
final Map<String, String> kafkaPods = StatefulSetUtils.ssSnapshot(namespaceName, kafkaStsName);
final Map<String, String> eoPods = DeploymentUtils.depSnapshot(namespaceName, eoDepName);
assertResources(namespaceName, KafkaResources.kafkaPodName(clusterName, 0), "kafka",
"1536Mi", "1", "1Gi", "50m");
assertExpectedJavaOpts(namespaceName, KafkaResources.kafkaPodName(clusterName, 0), "kafka",
"-Xmx1g", "-Xms512m", "-XX:+UseG1GC");
assertResources(namespaceName, KafkaResources.zookeeperPodName(clusterName, 0), "zookeeper",
"1G", "500m", "500M", "25m");
assertExpectedJavaOpts(namespaceName, KafkaResources.zookeeperPodName(clusterName, 0), "zookeeper",
"-Xmx1G", "-Xms512M", "-XX:+UseG1GC");
Optional<Pod> pod = kubeClient(namespaceName).listPods(namespaceName)
.stream().filter(p -> p.getMetadata().getName().startsWith(KafkaResources.entityOperatorDeploymentName(clusterName)))
.findFirst();
assertThat("EO pod does not exist", pod.isPresent(), is(true));
assertResources(namespaceName, pod.get().getMetadata().getName(), "topic-operator",
"1Gi", "500m", "384Mi", "25m");
assertResources(namespaceName, pod.get().getMetadata().getName(), "user-operator",
"512M", "300m", "256M", "30m");
assertExpectedJavaOpts(namespaceName, pod.get().getMetadata().getName(), "topic-operator",
"-Xmx2G", "-Xms1024M", null);
assertExpectedJavaOpts(namespaceName, pod.get().getMetadata().getName(), "user-operator",
"-Xmx1G", "-Xms512M", null);
String eoPod = eoPods.keySet().toArray()[0].toString();
kubeClient(namespaceName).getPod(namespaceName, eoPod).getSpec().getContainers().forEach(container -> {
if (!container.getName().equals("tls-sidecar")) {
LOGGER.info("Check if -D java options are present in {}", container.getName());
String javaSystemProp = container.getEnv().stream().filter(envVar ->
envVar.getName().equals("STRIMZI_JAVA_SYSTEM_PROPERTIES")).findFirst().orElseThrow().getValue();
String javaOpts = container.getEnv().stream().filter(envVar ->
envVar.getName().equals("STRIMZI_JAVA_OPTS")).findFirst().orElseThrow().getValue();
assertThat(javaSystemProp, is("-Djavax.net.debug=verbose"));
if (container.getName().equals("topic-operator")) {
assertThat(javaOpts, is("-Xms1024M -Xmx2G"));
}
if (container.getName().equals("user-operator")) {
assertThat(javaOpts, is("-Xms512M -Xmx1G"));
}
}
});
LOGGER.info("Checking no rolling update for Kafka cluster");
StatefulSetUtils.waitForNoRollingUpdate(namespaceName, zkStsName, zkPods);
StatefulSetUtils.waitForNoRollingUpdate(namespaceName, kafkaStsName, kafkaPods);
DeploymentUtils.waitForNoRollingUpdate(namespaceName, eoDepName, eoPods);
}
@ParallelNamespaceTest
void testForTopicOperator(ExtensionContext extensionContext) throws InterruptedException {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
final String cliTopicName = "topic-from-cli";
//Creating topics for testing
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
KafkaTopicUtils.waitForKafkaTopicReady(namespaceName, topicName);
assertThat(KafkaTopicResource.kafkaTopicClient().inNamespace(namespaceName).withName(topicName).get().getMetadata().getName(), is(topicName));
assertThat(KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0), hasItem(topicName));
KafkaCmdClient.createTopicUsingPodCli(namespaceName, clusterName, 0, cliTopicName, 1, 1);
assertThat(KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0), hasItems(topicName, cliTopicName));
assertThat(cmdKubeClient(namespaceName).list(KafkaTopic.RESOURCE_KIND), hasItems(cliTopicName, topicName));
//Updating first topic using pod CLI
KafkaCmdClient.updateTopicPartitionsCountUsingPodCli(namespaceName, clusterName, 0, topicName, 2);
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
assertThat(KafkaCmdClient.describeTopicUsingPodCli(namespaceName, clusterName, 0, topicName),
hasItems("PartitionCount:2"));
KafkaTopic testTopic = fromYamlString(cmdKubeClient().get(KafkaTopic.RESOURCE_KIND, topicName), KafkaTopic.class);
assertThat(testTopic, is(CoreMatchers.notNullValue()));
assertThat(testTopic.getSpec(), is(CoreMatchers.notNullValue()));
assertThat(testTopic.getSpec().getPartitions(), is(Integer.valueOf(2)));
//Updating second topic via KafkaTopic update
KafkaTopicResource.replaceTopicResourceInSpecificNamespace(cliTopicName, topic -> topic.getSpec().setPartitions(2), namespaceName);
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
assertThat(KafkaCmdClient.describeTopicUsingPodCli(namespaceName, clusterName, 0, cliTopicName),
hasItems("PartitionCount:2"));
testTopic = fromYamlString(cmdKubeClient(namespaceName).get(KafkaTopic.RESOURCE_KIND, cliTopicName), KafkaTopic.class);
assertThat(testTopic, is(CoreMatchers.notNullValue()));
assertThat(testTopic.getSpec(), is(CoreMatchers.notNullValue()));
assertThat(testTopic.getSpec().getPartitions(), is(Integer.valueOf(2)));
//Deleting first topic by deletion of CM
cmdKubeClient(namespaceName).deleteByName(KafkaTopic.RESOURCE_KIND, cliTopicName);
//Deleting another topic using pod CLI
KafkaCmdClient.deleteTopicUsingPodCli(namespaceName, clusterName, 0, topicName);
KafkaTopicUtils.waitForKafkaTopicDeletion(namespaceName, topicName);
//Checking all topics were deleted
Thread.sleep(Constants.TIMEOUT_TEARDOWN);
List<String> topics = KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0);
assertThat(topics, not(hasItems(topicName)));
assertThat(topics, not(hasItems(cliTopicName)));
}
@ParallelNamespaceTest
void testRemoveTopicOperatorFromEntityOperator(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Deploying Kafka cluster {}", clusterName);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
String eoPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName))
.get(0).getMetadata().getName();
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getEntityOperator().setTopicOperator(null), namespaceName);
//Waiting when EO pod will be recreated without TO
PodUtils.deletePodWithWait(namespaceName, eoPodName);
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1);
PodUtils.waitUntilPodContainersCount(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 2);
//Checking that TO was removed
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), not(containsString("topic-operator")));
});
});
eoPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName))
.get(0).getMetadata().getName();
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getEntityOperator().setTopicOperator(new EntityTopicOperatorSpec()), namespaceName);
//Waiting when EO pod will be recreated with TO
PodUtils.deletePodWithWait(namespaceName, eoPodName);
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1);
//Checking that TO was created
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), anyOf(
containsString("topic-operator"),
containsString("user-operator"),
containsString("tls-sidecar"))
);
});
});
}
@ParallelNamespaceTest
void testRemoveUserOperatorFromEntityOperator(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Deploying Kafka cluster {}", clusterName);
String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
String eoPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName))
.get(0).getMetadata().getName();
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getEntityOperator().setUserOperator(null), namespaceName);
//Waiting when EO pod will be recreated without UO
PodUtils.deletePodWithWait(namespaceName, eoPodName);
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1);
PodUtils.waitUntilPodContainersCount(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 2);
//Checking that UO was removed
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), not(containsString("user-operator")));
});
});
eoPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName))
.get(0).getMetadata().getName();
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getEntityOperator().setUserOperator(new EntityUserOperatorSpec()), namespaceName);
//Waiting when EO pod will be recreated with UO
PodUtils.deletePodWithWait(namespaceName, eoPodName);
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1);
//Checking that UO was created
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), anyOf(
containsString("topic-operator"),
containsString("user-operator"),
containsString("tls-sidecar"))
);
});
});
timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId));
}
@ParallelNamespaceTest
void testRemoveUserAndTopicOperatorsFromEntityOperator(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
// TODO issue #4152 - temporarily disabled for Namespace RBAC scoped
assumeFalse(Environment.isNamespaceRbacScope());
LOGGER.info("Deploying Kafka cluster {}", clusterName);
String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
String eoDeploymentName = KafkaResources.entityOperatorDeploymentName(clusterName);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
k.getSpec().getEntityOperator().setTopicOperator(null);
k.getSpec().getEntityOperator().setUserOperator(null);
}, namespaceName);
PodUtils.waitUntilPodStabilityReplicasCount(namespaceName, eoDeploymentName, 0);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
k.getSpec().getEntityOperator().setTopicOperator(new EntityTopicOperatorSpec());
k.getSpec().getEntityOperator().setUserOperator(new EntityUserOperatorSpec());
}, namespaceName);
DeploymentUtils.waitForDeploymentReady(namespaceName, eoDeploymentName);
//Checking that EO was created
kubeClient().listPodsByPrefixInName(namespaceName, eoDeploymentName).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), anyOf(
containsString("topic-operator"),
containsString("user-operator"),
containsString("tls-sidecar"))
);
});
});
timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId));
}
@ParallelNamespaceTest
void testEntityOperatorWithoutTopicOperator(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Deploying Kafka cluster without TO in EO");
String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3)
.editSpec()
.withNewEntityOperator()
.withNewUserOperator()
.endUserOperator()
.endEntityOperator()
.endSpec()
.build());
timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId));
//Checking that TO was not deployed
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), not(containsString("topic-operator")));
});
});
}
@ParallelNamespaceTest
void testEntityOperatorWithoutUserOperator(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Deploying Kafka cluster without UO in EO");
String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3)
.editSpec()
.withNewEntityOperator()
.withNewTopicOperator()
.endTopicOperator()
.endEntityOperator()
.endSpec()
.build());
timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId));
//Checking that UO was not deployed
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), not(containsString("user-operator")));
});
});
}
@ParallelNamespaceTest
void testEntityOperatorWithoutUserAndTopicOperators(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Deploying Kafka cluster without UO and TO in EO");
String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3)
.editSpec()
.withNewEntityOperator()
.endEntityOperator()
.endSpec()
.build());
timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId));
//Checking that EO was not deployed
assertThat("EO should not be deployed", kubeClient().listPodsByPrefixInName(KafkaResources.entityOperatorDeploymentName(clusterName)).size(), is(0));
}
@ParallelNamespaceTest
void testTopicWithoutLabels(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
// Negative scenario: creating topic without any labels and make sure that TO can't handle this topic
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
// Creating topic without any label
resourceManager.createResource(extensionContext, false, KafkaTopicTemplates.topic(clusterName, "topic-without-labels", 1, 1, 1)
.editMetadata()
.withLabels(null)
.endMetadata()
.build());
// Checking that resource was created
assertThat(cmdKubeClient(namespaceName).list("kafkatopic"), hasItems("topic-without-labels"));
// Checking that TO didn't handle new topic and zk pods don't contain new topic
assertThat(KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0), not(hasItems("topic-without-labels")));
// Checking TO logs
String tOPodName = cmdKubeClient(namespaceName).listResourcesByLabel("pod", Labels.STRIMZI_NAME_LABEL + "=" + clusterName + "-entity-operator").get(0);
String tOlogs = kubeClient(namespaceName).logsInSpecificNamespace(namespaceName, tOPodName, "topic-operator");
assertThat(tOlogs, not(containsString("Created topic 'topic-without-labels'")));
//Deleting topic
cmdKubeClient(namespaceName).deleteByName("kafkatopic", "topic-without-labels");
KafkaTopicUtils.waitForKafkaTopicDeletion(namespaceName, "topic-without-labels");
//Checking all topics were deleted
List<String> topics = KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0);
assertThat(topics, not(hasItems("topic-without-labels")));
}
@ParallelNamespaceTest
void testKafkaJBODDeleteClaimsTrueFalse(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final int kafkaReplicas = 2;
final String diskSizeGi = "10";
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(
new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizeGi + "Gi").build(),
new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize(diskSizeGi + "Gi").build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, kafkaReplicas, jbodStorage).build());
// kafka cluster already deployed
verifyVolumeNamesAndLabels(namespaceName, clusterName, kafkaReplicas, 2, diskSizeGi);
final int volumesCount = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).size();
LOGGER.info("Deleting cluster");
cmdKubeClient(namespaceName).deleteByName("kafka", clusterName);
LOGGER.info("Waiting for PVC deletion");
PersistentVolumeClaimUtils.waitForPVCDeletion(namespaceName, volumesCount, jbodStorage, clusterName);
}
@ParallelNamespaceTest
void testKafkaJBODDeleteClaimsTrue(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final int kafkaReplicas = 2;
final String diskSizeGi = "10";
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(
new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(0).withSize(diskSizeGi + "Gi").build(),
new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize(diskSizeGi + "Gi").build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, kafkaReplicas, jbodStorage).build());
// kafka cluster already deployed
verifyVolumeNamesAndLabels(namespaceName, clusterName, kafkaReplicas, 2, diskSizeGi);
final int volumesCount = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).size();
LOGGER.info("Deleting cluster");
cmdKubeClient(namespaceName).deleteByName("kafka", clusterName);
LOGGER.info("Waiting for PVC deletion");
PersistentVolumeClaimUtils.waitForPVCDeletion(namespaceName, volumesCount, jbodStorage, clusterName);
}
@ParallelNamespaceTest
void testKafkaJBODDeleteClaimsFalse(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final int kafkaReplicas = 2;
final String diskSizeGi = "10";
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(
new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizeGi + "Gi").build(),
new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(1).withSize(diskSizeGi + "Gi").build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, kafkaReplicas, jbodStorage).build());
// kafka cluster already deployed
verifyVolumeNamesAndLabels(namespaceName, clusterName, kafkaReplicas, 2, diskSizeGi);
int volumesCount = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).size();
LOGGER.info("Deleting cluster");
cmdKubeClient(namespaceName).deleteByName("kafka", clusterName);
LOGGER.info("Waiting for PVC deletion");
PersistentVolumeClaimUtils.waitForPVCDeletion(namespaceName, volumesCount, jbodStorage, clusterName);
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testPersistentStorageSize(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String[] diskSizes = {"70Gi", "20Gi"};
final int kafkaRepl = 2;
final int diskCount = 2;
JbodStorage jbodStorage = new JbodStorageBuilder()
.withVolumes(
new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizes[0]).build(),
new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(1).withSize(diskSizes[1]).build()
).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, kafkaRepl)
.editSpec()
.editKafka()
.withStorage(jbodStorage)
.endKafka()
.editZookeeper().
withReplicas(1)
.endZookeeper()
.endSpec()
.build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
List<PersistentVolumeClaim> volumes = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(
persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList());
checkStorageSizeForVolumes(volumes, diskSizes, kafkaRepl, diskCount);
String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
LOGGER.info("Checking produced and consumed messages to pod:{}", kafkaClientsPodName);
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
}
@ParallelNamespaceTest
@Tag(LOADBALANCER_SUPPORTED)
void testRegenerateCertExternalAddressChange(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Creating kafka without external listener");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1).build());
final String brokerSecret = clusterName + "-kafka-brokers";
Secret secretsWithoutExt = kubeClient(namespaceName).getSecret(namespaceName, brokerSecret);
LOGGER.info("Editing kafka with external listener");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
List<GenericKafkaListener> lst = asList(
new GenericKafkaListenerBuilder()
.withName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.withPort(9092)
.withType(KafkaListenerType.INTERNAL)
.withTls(false)
.build(),
new GenericKafkaListenerBuilder()
.withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME)
.withPort(9094)
.withType(KafkaListenerType.LOADBALANCER)
.withTls(true)
.withNewConfiguration()
.withFinalizers(LB_FINALIZERS)
.endConfiguration()
.build()
);
kafka.getSpec().getKafka().setListeners(lst);
}, namespaceName);
StatefulSetUtils.waitTillSsHasRolled(namespaceName, kafkaStatefulSetName(clusterName), 3, StatefulSetUtils.ssSnapshot(namespaceName, kafkaStatefulSetName(clusterName)));
Secret secretsWithExt = kubeClient(namespaceName).getSecret(namespaceName, brokerSecret);
LOGGER.info("Checking secrets");
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).forEach(kafkaPod -> {
String kafkaPodName = kafkaPod.getMetadata().getName();
assertThat(secretsWithExt.getData().get(kafkaPodName + ".crt"), is(not(secretsWithoutExt.getData().get(kafkaPodName + ".crt"))));
assertThat(secretsWithExt.getData().get(kafkaPodName + ".key"), is(not(secretsWithoutExt.getData().get(kafkaPodName + ".key"))));
});
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testLabelModificationDoesNotBreakCluster(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
Map<String, String> labels = new HashMap<>();
final String[] labelKeys = {"label-name-1", "label-name-2", ""};
final String[] labelValues = {"name-of-the-label-1", "name-of-the-label-2", ""};
labels.put(labelKeys[0], labelValues[0]);
labels.put(labelKeys[1], labelValues[1]);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1)
.editMetadata()
.withLabels(labels)
.endMetadata()
.build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
Map<String, String> kafkaPods = StatefulSetUtils.ssSnapshot(namespaceName, kafkaStatefulSetName(clusterName));
LOGGER.info("Waiting for kafka stateful set labels changed {}", labels);
StatefulSetUtils.waitForStatefulSetLabelsChange(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName), labels);
LOGGER.info("Getting labels from stateful set resource");
StatefulSet statefulSet = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
LOGGER.info("Verifying default labels in the Kafka CR");
assertThat("Label exists in stateful set with concrete value",
labelValues[0].equals(statefulSet.getSpec().getTemplate().getMetadata().getLabels().get(labelKeys[0])));
assertThat("Label exists in stateful set with concrete value",
labelValues[1].equals(statefulSet.getSpec().getTemplate().getMetadata().getLabels().get(labelKeys[1])));
labelValues[0] = "new-name-of-the-label-1";
labelValues[1] = "new-name-of-the-label-2";
labelKeys[2] = "label-name-3";
labelValues[2] = "name-of-the-label-3";
LOGGER.info("Setting new values of labels from {} to {} | from {} to {} and adding one {} with value {}",
"name-of-the-label-1", labelValues[0], "name-of-the-label-2", labelValues[1], labelKeys[2], labelValues[2]);
LOGGER.info("Edit kafka labels in Kafka CR");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, resource -> {
resource.getMetadata().getLabels().put(labelKeys[0], labelValues[0]);
resource.getMetadata().getLabels().put(labelKeys[1], labelValues[1]);
resource.getMetadata().getLabels().put(labelKeys[2], labelValues[2]);
}, namespaceName);
labels.put(labelKeys[0], labelValues[0]);
labels.put(labelKeys[1], labelValues[1]);
labels.put(labelKeys[2], labelValues[2]);
LOGGER.info("Waiting for kafka service labels changed {}", labels);
ServiceUtils.waitForServiceLabelsChange(namespaceName, KafkaResources.brokersServiceName(clusterName), labels);
LOGGER.info("Verifying kafka labels via services");
Service service = kubeClient(namespaceName).getService(namespaceName, KafkaResources.brokersServiceName(clusterName));
verifyPresentLabels(labels, service);
LOGGER.info("Waiting for kafka config map labels changed {}", labels);
ConfigMapUtils.waitForConfigMapLabelsChange(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName), labels);
LOGGER.info("Verifying kafka labels via config maps");
ConfigMap configMap = kubeClient(namespaceName).getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName));
verifyPresentLabels(labels, configMap);
LOGGER.info("Waiting for kafka stateful set labels changed {}", labels);
StatefulSetUtils.waitForStatefulSetLabelsChange(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName), labels);
LOGGER.info("Verifying kafka labels via stateful set");
statefulSet = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
verifyPresentLabels(labels, statefulSet);
StatefulSetUtils.waitTillSsHasRolled(namespaceName, kafkaStatefulSetName(clusterName), 3, kafkaPods);
LOGGER.info("Verifying via kafka pods");
labels = kubeClient(namespaceName).getPod(namespaceName, KafkaResources.kafkaPodName(clusterName, 0)).getMetadata().getLabels();
assertThat("Label exists in kafka pods", labelValues[0].equals(labels.get(labelKeys[0])));
assertThat("Label exists in kafka pods", labelValues[1].equals(labels.get(labelKeys[1])));
assertThat("Label exists in kafka pods", labelValues[2].equals(labels.get(labelKeys[2])));
LOGGER.info("Removing labels: {} -> {}, {} -> {}, {} -> {}", labelKeys[0], labels.get(labelKeys[0]),
labelKeys[1], labels.get(labelKeys[1]), labelKeys[2], labels.get(labelKeys[2]));
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, resource -> {
resource.getMetadata().getLabels().remove(labelKeys[0]);
resource.getMetadata().getLabels().remove(labelKeys[1]);
resource.getMetadata().getLabels().remove(labelKeys[2]);
}, namespaceName);
labels.remove(labelKeys[0]);
labels.remove(labelKeys[1]);
labels.remove(labelKeys[2]);
LOGGER.info("Waiting for kafka service labels deletion {}", labels.toString());
ServiceUtils.waitForServiceLabelsDeletion(namespaceName, KafkaResources.brokersServiceName(clusterName), labelKeys[0], labelKeys[1], labelKeys[2]);
LOGGER.info("Verifying kafka labels via services");
service = kubeClient(namespaceName).getService(namespaceName, KafkaResources.brokersServiceName(clusterName));
verifyNullLabels(labelKeys, service);
LOGGER.info("Verifying kafka labels via config maps");
ConfigMapUtils.waitForConfigMapLabelsDeletion(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName), labelKeys[0], labelKeys[1], labelKeys[2]);
configMap = kubeClient(namespaceName).getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName));
verifyNullLabels(labelKeys, configMap);
LOGGER.info("Waiting for kafka stateful set labels changed {}", labels);
String statefulSetName = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).getMetadata().getName();
StatefulSetUtils.waitForStatefulSetLabelsDeletion(namespaceName, statefulSetName, labelKeys[0], labelKeys[1], labelKeys[2]);
statefulSet = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
LOGGER.info("Verifying kafka labels via stateful set");
verifyNullLabels(labelKeys, statefulSet);
StatefulSetUtils.waitTillSsHasRolled(namespaceName, kafkaStatefulSetName(clusterName), 3, kafkaPods);
LOGGER.info("Waiting for kafka pod labels deletion {}", labels.toString());
PodUtils.waitUntilPodLabelsDeletion(namespaceName, KafkaResources.kafkaPodName(clusterName, 0), labelKeys[0], labelKeys[1], labelKeys[2]);
labels = kubeClient(namespaceName).getPod(namespaceName, KafkaResources.kafkaPodName(clusterName, 0)).getMetadata().getLabels();
LOGGER.info("Verifying via kafka pods");
verifyNullLabels(labelKeys, labels);
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testAppDomainLabels(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1).build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName =
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
Map<String, String> labels;
LOGGER.info("---> PODS <---");
List<Pod> pods = kubeClient(namespaceName).listPods(namespaceName, clusterName).stream()
.filter(pod -> pod.getMetadata().getName().startsWith(clusterName))
.filter(pod -> !pod.getMetadata().getName().startsWith(clusterName + "-" + Constants.KAFKA_CLIENTS))
.collect(Collectors.toList());
for (Pod pod : pods) {
LOGGER.info("Getting labels from {} pod", pod.getMetadata().getName());
verifyAppLabels(pod.getMetadata().getLabels());
}
LOGGER.info("---> STATEFUL SETS <---");
LOGGER.info("Getting labels from stateful set of kafka resource");
labels = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).getMetadata().getLabels();
verifyAppLabels(labels);
LOGGER.info("Getting labels from stateful set of zookeeper resource");
labels = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.zookeeperStatefulSetName(clusterName)).getMetadata().getLabels();
verifyAppLabels(labels);
LOGGER.info("---> SERVICES <---");
List<Service> services = kubeClient(namespaceName).listServices(namespaceName).stream()
.filter(service -> service.getMetadata().getName().startsWith(clusterName))
.collect(Collectors.toList());
for (Service service : services) {
LOGGER.info("Getting labels from {} service", service.getMetadata().getName());
verifyAppLabels(service.getMetadata().getLabels());
}
LOGGER.info("---> SECRETS <---");
List<Secret> secrets = kubeClient(namespaceName).listSecrets(namespaceName).stream()
.filter(secret -> secret.getMetadata().getName().startsWith(clusterName) && secret.getType().equals("Opaque"))
.collect(Collectors.toList());
for (Secret secret : secrets) {
LOGGER.info("Getting labels from {} secret", secret.getMetadata().getName());
verifyAppLabelsForSecretsAndConfigMaps(secret.getMetadata().getLabels());
}
LOGGER.info("---> CONFIG MAPS <---");
List<ConfigMap> configMaps = kubeClient(namespaceName).listConfigMapsInSpecificNamespace(namespaceName, clusterName);
for (ConfigMap configMap : configMaps) {
LOGGER.info("Getting labels from {} config map", configMap.getMetadata().getName());
verifyAppLabelsForSecretsAndConfigMaps(configMap.getMetadata().getLabels());
}
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
}
@ParallelNamespaceTest
void testUOListeningOnlyUsersInSameCluster(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
final String firstClusterName = "my-cluster-1";
final String secondClusterName = "my-cluster-2";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(firstClusterName, 3, 1).build());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(secondClusterName, 3, 1).build());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(firstClusterName, userName).build());
LOGGER.info("Verifying that user {} in cluster {} is created", userName, firstClusterName);
String entityOperatorPodName = kubeClient(namespaceName).listPodNamesInSpecificNamespace(namespaceName, Labels.STRIMZI_NAME_LABEL, KafkaResources.entityOperatorDeploymentName(firstClusterName)).get(0);
String uOLogs = kubeClient(namespaceName).logsInSpecificNamespace(namespaceName, entityOperatorPodName, "user-operator");
assertThat(uOLogs, containsString("User " + userName + " in namespace " + namespaceName + " was ADDED"));
LOGGER.info("Verifying that user {} in cluster {} is not created", userName, secondClusterName);
entityOperatorPodName = kubeClient(namespaceName).listPodNamesInSpecificNamespace(namespaceName, Labels.STRIMZI_NAME_LABEL, KafkaResources.entityOperatorDeploymentName(secondClusterName)).get(0);
uOLogs = kubeClient(namespaceName).logsInSpecificNamespace(namespaceName, entityOperatorPodName, "user-operator");
assertThat(uOLogs, not(containsString("User " + userName + " in namespace " + namespaceName + " was ADDED")));
LOGGER.info("Verifying that user belongs to {} cluster", firstClusterName);
String kafkaUserResource = cmdKubeClient(namespaceName).getResourceAsYaml("kafkauser", userName);
assertThat(kafkaUserResource, containsString(Labels.STRIMZI_CLUSTER_LABEL + ": " + firstClusterName));
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testMessagesAreStoredInDisk(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1).build());
Map<String, String> kafkaPodsSnapshot = StatefulSetUtils.ssSnapshot(namespaceName, kafkaStatefulSetName(clusterName));
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, 1, 1).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName =
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
TestUtils.waitFor("KafkaTopic creation inside kafka pod", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT,
() -> cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash",
"-c", "cd /var/lib/kafka/data/kafka-log0; ls -1").out().contains(topicName));
String topicDirNameInPod = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash",
"-c", "cd /var/lib/kafka/data/kafka-log0; ls -1 | sed -n '/" + topicName + "/p'").out();
String commandToGetDataFromTopic =
"cd /var/lib/kafka/data/kafka-log0/" + topicDirNameInPod + "/;cat 00000000000000000000.log";
LOGGER.info("Executing command {} in {}", commandToGetDataFromTopic, KafkaResources.kafkaPodName(clusterName, 0));
String topicData = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0),
"/bin/bash", "-c", commandToGetDataFromTopic).out();
LOGGER.info("Topic {} is present in kafka broker {} with no data", topicName, KafkaResources.kafkaPodName(clusterName, 0));
assertThat("Topic contains data", topicData, emptyOrNullString());
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
LOGGER.info("Executing command {} in {}", commandToGetDataFromTopic, KafkaResources.kafkaPodName(clusterName, 0));
topicData = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c",
commandToGetDataFromTopic).out();
assertThat("Topic has no data", topicData, notNullValue());
List<Pod> kafkaPods = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
for (Pod kafkaPod : kafkaPods) {
LOGGER.info("Deleting kafka pod {}", kafkaPod.getMetadata().getName());
kubeClient(namespaceName).deletePod(namespaceName, kafkaPod);
}
LOGGER.info("Wait for kafka to rolling restart ...");
StatefulSetUtils.waitTillSsHasRolled(namespaceName, kafkaStatefulSetName(clusterName), 1, kafkaPodsSnapshot);
LOGGER.info("Executing command {} in {}", commandToGetDataFromTopic, KafkaResources.kafkaPodName(clusterName, 0));
topicData = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c",
commandToGetDataFromTopic).out();
assertThat("Topic has no data", topicData, notNullValue());
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testConsumerOffsetFiles(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final Map<String, Object> kafkaConfig = new HashMap<>();
kafkaConfig.put("offsets.topic.replication.factor", "3");
kafkaConfig.put("offsets.topic.num.partitions", "100");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1)
.editSpec()
.editKafka()
.withConfig(kafkaConfig)
.endKafka()
.endSpec()
.build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, 3, 1).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName =
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
String commandToGetFiles = "cd /var/lib/kafka/data/kafka-log0/;" +
"ls -1 | sed -n \"s#__consumer_offsets-\\([0-9]*\\)#\\1#p\" | sort -V";
LOGGER.info("Executing command {} in {}", commandToGetFiles, KafkaResources.kafkaPodName(clusterName, 0));
String result = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0),
"/bin/bash", "-c", commandToGetFiles).out();
// TODO / FIXME
//assertThat("Folder kafka-log0 has data in files:\n" + result, result.equals(""));
LOGGER.info("Result: \n" + result);
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
LOGGER.info("Executing command {} in {}", commandToGetFiles, KafkaResources.kafkaPodName(clusterName, 0));
result = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0),
"/bin/bash", "-c", commandToGetFiles).out();
StringBuilder stringToMatch = new StringBuilder();
for (int i = 0; i < 100; i++) {
stringToMatch.append(i).append("\n");
}
assertThat("Folder kafka-log0 doesn't contain 100 files", result, containsString(stringToMatch.toString()));
}
@ParallelNamespaceTest
void testLabelsAndAnnotationForPVC(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String labelAnnotationKey = "testKey";
final String firstValue = "testValue";
final String changedValue = "editedTestValue";
Map<String, String> pvcLabel = new HashMap<>();
pvcLabel.put(labelAnnotationKey, firstValue);
Map<String, String> pvcAnnotation = pvcLabel;
Map<String, String> statefulSetLabels = new HashMap<>();
statefulSetLabels.put("app.kubernetes.io/part-of", "some-app");
statefulSetLabels.put("app.kubernetes.io/managed-by", "some-app");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1)
.editSpec()
.editKafka()
.withNewTemplate()
.withNewStatefulset()
.withNewMetadata()
.withLabels(statefulSetLabels)
.endMetadata()
.endStatefulset()
.withNewPersistentVolumeClaim()
.withNewMetadata()
.addToLabels(pvcLabel)
.addToAnnotations(pvcAnnotation)
.endMetadata()
.endPersistentVolumeClaim()
.endTemplate()
.withStorage(new JbodStorageBuilder().withVolumes(
new PersistentClaimStorageBuilder()
.withDeleteClaim(false)
.withId(0)
.withSize("20Gi")
.build(),
new PersistentClaimStorageBuilder()
.withDeleteClaim(true)
.withId(1)
.withSize("10Gi")
.build())
.build())
.endKafka()
.editZookeeper()
.withNewTemplate()
.withNewPersistentVolumeClaim()
.withNewMetadata()
.addToLabels(pvcLabel)
.addToAnnotations(pvcAnnotation)
.endMetadata()
.endPersistentVolumeClaim()
.endTemplate()
.withNewPersistentClaimStorage()
.withDeleteClaim(false)
.withId(0)
.withSize("3Gi")
.endPersistentClaimStorage()
.endZookeeper()
.endSpec()
.build());
LOGGER.info("Check if Kubernetes labels are applied");
Map<String, String> actualStatefulSetLabels = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).getMetadata().getLabels();
assertThat(actualStatefulSetLabels.get("app.kubernetes.io/part-of"), is("some-app"));
assertThat(actualStatefulSetLabels.get("app.kubernetes.io/managed-by"), is("some-app"));
LOGGER.info("Kubernetes labels are correctly set and present");
List<PersistentVolumeClaim> pvcs = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(
persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList());
assertThat(pvcs.size(), is(7));
for (PersistentVolumeClaim pvc : pvcs) {
LOGGER.info("Verifying that PVC label {} - {} = {}", pvc.getMetadata().getName(), firstValue, pvc.getMetadata().getLabels().get(labelAnnotationKey));
assertThat(firstValue, is(pvc.getMetadata().getLabels().get(labelAnnotationKey)));
assertThat(firstValue, is(pvc.getMetadata().getAnnotations().get(labelAnnotationKey)));
}
pvcLabel.put(labelAnnotationKey, changedValue);
pvcAnnotation.put(labelAnnotationKey, changedValue);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
LOGGER.info("Replacing kafka && zookeeper labels and annotations from {} to {}", labelAnnotationKey, changedValue);
kafka.getSpec().getKafka().getTemplate().getPersistentVolumeClaim().getMetadata().setLabels(pvcLabel);
kafka.getSpec().getKafka().getTemplate().getPersistentVolumeClaim().getMetadata().setAnnotations(pvcAnnotation);
kafka.getSpec().getZookeeper().getTemplate().getPersistentVolumeClaim().getMetadata().setLabels(pvcLabel);
kafka.getSpec().getZookeeper().getTemplate().getPersistentVolumeClaim().getMetadata().setAnnotations(pvcAnnotation);
}, namespaceName);
PersistentVolumeClaimUtils.waitUntilPVCLabelsChange(namespaceName, clusterName, pvcLabel, labelAnnotationKey);
PersistentVolumeClaimUtils.waitUntilPVCAnnotationChange(namespaceName, clusterName, pvcAnnotation, labelAnnotationKey);
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
pvcs = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(
persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList());
LOGGER.info(pvcs.toString());
assertThat(pvcs.size(), is(7));
for (PersistentVolumeClaim pvc : pvcs) {
LOGGER.info("Verifying replaced PVC label {} - {} = {}", pvc.getMetadata().getName(), firstValue, pvc.getMetadata().getLabels().get(labelAnnotationKey));
assertThat(pvc.getMetadata().getLabels().get(labelAnnotationKey), is(changedValue));
assertThat(pvc.getMetadata().getAnnotations().get(labelAnnotationKey), is(changedValue));
}
}
@ParallelNamespaceTest
void testKafkaOffsetsReplicationFactorHigherThanReplicas(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, false, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1)
.editSpec()
.editKafka()
.addToConfig("offsets.topic.replication.factor", 4)
.addToConfig("transaction.state.log.min.isr", 4)
.addToConfig("transaction.state.log.replication.factor", 4)
.endKafka()
.endSpec().build());
KafkaUtils.waitUntilKafkaStatusConditionContainsMessage(clusterName, namespaceName,
"Kafka configuration option .* should be set to " + 3 + " or less because 'spec.kafka.replicas' is " + 3);
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
@Tag(CRUISE_CONTROL)
void testReadOnlyRootFileSystem(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3)
.editSpec()
.editKafka()
.withNewTemplate()
.withNewKafkaContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endKafkaContainer()
.endTemplate()
.endKafka()
.editZookeeper()
.withNewTemplate()
.withNewZookeeperContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endZookeeperContainer()
.endTemplate()
.endZookeeper()
.editEntityOperator()
.withNewTemplate()
.withNewTlsSidecarContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endTlsSidecarContainer()
.withNewTopicOperatorContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endTopicOperatorContainer()
.withNewUserOperatorContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endUserOperatorContainer()
.endTemplate()
.endEntityOperator()
.editOrNewKafkaExporter()
.withNewTemplate()
.withNewContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endContainer()
.endTemplate()
.endKafkaExporter()
.editOrNewCruiseControl()
.withNewTemplate()
.withNewTlsSidecarContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endTlsSidecarContainer()
.withNewCruiseControlContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endCruiseControlContainer()
.endTemplate()
.endCruiseControl()
.endSpec()
.build());
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
LOGGER.info("Checking produced and consumed messages to pod:{}", kafkaClientsPodName);
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
}
protected void checkKafkaConfiguration(String namespaceName, String podNamePrefix, Map<String, Object> config, String clusterName) {
LOGGER.info("Checking kafka configuration");
List<Pod> pods = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, podNamePrefix);
Properties properties = configMap2Properties(kubeClient(namespaceName).getConfigMap(namespaceName, clusterName + "-kafka-config"));
for (Map.Entry<String, Object> property : config.entrySet()) {
String key = property.getKey();
Object val = property.getValue();
assertThat(properties.keySet().contains(key), is(true));
assertThat(properties.getProperty(key), is(val));
}
for (Pod pod: pods) {
ExecResult result = cmdKubeClient(namespaceName).execInPod(pod.getMetadata().getName(), "/bin/bash", "-c", "cat /tmp/strimzi.properties");
Properties execProperties = stringToProperties(result.out());
for (Map.Entry<String, Object> property : config.entrySet()) {
String key = property.getKey();
Object val = property.getValue();
assertThat(execProperties.keySet().contains(key), is(true));
assertThat(execProperties.getProperty(key), is(val));
}
}
}
void checkStorageSizeForVolumes(List<PersistentVolumeClaim> volumes, String[] diskSizes, int kafkaRepl, int diskCount) {
int k = 0;
for (int i = 0; i < kafkaRepl; i++) {
for (int j = 0; j < diskCount; j++) {
LOGGER.info("Checking volume {} and size of storage {}", volumes.get(k).getMetadata().getName(),
volumes.get(k).getSpec().getResources().getRequests().get("storage"));
assertThat(volumes.get(k).getSpec().getResources().getRequests().get("storage"), is(new Quantity(diskSizes[i])));
k++;
}
}
}
void verifyVolumeNamesAndLabels(String namespaceName, String clusterName, int kafkaReplicas, int diskCountPerReplica, String diskSizeGi) {
ArrayList<String> pvcs = new ArrayList<>();
kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream()
.filter(pvc -> pvc.getMetadata().getName().contains(clusterName + "-kafka"))
.forEach(volume -> {
String volumeName = volume.getMetadata().getName();
pvcs.add(volumeName);
LOGGER.info("Checking labels for volume:" + volumeName);
assertThat(volume.getMetadata().getLabels().get(Labels.STRIMZI_CLUSTER_LABEL), is(clusterName));
assertThat(volume.getMetadata().getLabels().get(Labels.STRIMZI_KIND_LABEL), is(Kafka.RESOURCE_KIND));
assertThat(volume.getMetadata().getLabels().get(Labels.STRIMZI_NAME_LABEL), is(clusterName.concat("-kafka")));
assertThat(volume.getSpec().getResources().getRequests().get("storage"), is(new Quantity(diskSizeGi, "Gi")));
});
LOGGER.info("Checking PVC names included in JBOD array");
for (int i = 0; i < kafkaReplicas; i++) {
for (int j = 0; j < diskCountPerReplica; j++) {
assertThat(pvcs.contains("data-" + j + "-" + clusterName + "-kafka-" + i), is(true));
}
}
LOGGER.info("Checking PVC on Kafka pods");
for (int i = 0; i < kafkaReplicas; i++) {
ArrayList<String> dataSourcesOnPod = new ArrayList<>();
ArrayList<String> pvcsOnPod = new ArrayList<>();
LOGGER.info("Getting list of mounted data sources and PVCs on Kafka pod " + i);
for (int j = 0; j < diskCountPerReplica; j++) {
dataSourcesOnPod.add(kubeClient(namespaceName).getPod(namespaceName, clusterName.concat("-kafka-" + i))
.getSpec().getVolumes().get(j).getName());
pvcsOnPod.add(kubeClient(namespaceName).getPod(namespaceName, clusterName.concat("-kafka-" + i))
.getSpec().getVolumes().get(j).getPersistentVolumeClaim().getClaimName());
}
LOGGER.info("Verifying mounted data sources and PVCs on Kafka pod " + i);
for (int j = 0; j < diskCountPerReplica; j++) {
assertThat(dataSourcesOnPod.contains("data-" + j), is(true));
assertThat(pvcsOnPod.contains("data-" + j + "-" + clusterName + "-kafka-" + i), is(true));
}
}
}
void verifyPresentLabels(Map<String, String> labels, HasMetadata resources) {
for (Map.Entry<String, String> label : labels.entrySet()) {
assertThat("Label exists with concrete value in HasMetadata(Services, CM, STS) resources",
label.getValue().equals(resources.getMetadata().getLabels().get(label.getKey())));
}
}
void verifyNullLabels(String[] labelKeys, Map<String, String> labels) {
for (String labelKey : labelKeys) {
assertThat(labels.get(labelKey), nullValue());
}
}
void verifyNullLabels(String[] labelKeys, HasMetadata resources) {
for (String labelKey : labelKeys) {
assertThat(resources.getMetadata().getLabels().get(labelKey), nullValue());
}
}
void verifyAppLabels(Map<String, String> labels) {
LOGGER.info("Verifying labels {}", labels);
assertThat("Label " + Labels.STRIMZI_CLUSTER_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_CLUSTER_LABEL));
assertThat("Label " + Labels.STRIMZI_KIND_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_KIND_LABEL));
assertThat("Label " + Labels.STRIMZI_NAME_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_NAME_LABEL));
}
void verifyAppLabelsForSecretsAndConfigMaps(Map<String, String> labels) {
LOGGER.info("Verifying labels {}", labels);
assertThat("Label " + Labels.STRIMZI_CLUSTER_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_CLUSTER_LABEL));
assertThat("Label " + Labels.STRIMZI_KIND_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_KIND_LABEL));
}
@BeforeAll
void setup(ExtensionContext extensionContext) {
install = new SetupClusterOperator.SetupClusterOperatorBuilder()
.withExtensionContext(extensionContext)
.withNamespace(NAMESPACE)
.withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES)
.createInstallation()
.runInstallation();
}
protected void afterEachMayOverride(ExtensionContext extensionContext) throws Exception {
resourceManager.deleteResources(extensionContext);
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
if (cluster.getListOfDeployedResources().contains(TEMPLATE_PATH)) {
cluster.deleteCustomResources(extensionContext, TEMPLATE_PATH);
}
if (KafkaResource.kafkaClient().inNamespace(namespaceName).withName(OPENSHIFT_CLUSTER_NAME).get() != null) {
cmdKubeClient(namespaceName).deleteByName(Kafka.RESOURCE_KIND, OPENSHIFT_CLUSTER_NAME);
}
kubeClient(namespaceName).listPods(namespaceName).stream()
.filter(p -> p.getMetadata().getName().startsWith(OPENSHIFT_CLUSTER_NAME))
.forEach(p -> PodUtils.deletePodWithWait(p.getMetadata().getName()));
kubeClient(namespaceName).getClient().customResources(CustomResourceDefinitionContext.fromCrd(Crds.kafkaTopic()), KafkaTopic.class, KafkaTopicList.class).inNamespace(namespaceName).delete();
kubeClient(namespaceName).getClient().persistentVolumeClaims().inNamespace(namespaceName).delete();
}
}
| scholzj/barnabas | systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java | Java | apache-2.0 | 107,527 |
package com.basicalgorithms.coding_games;
import java.util.HashSet;
import java.util.Objects;
import java.util.Scanner;
import java.util.Set;
/**
* Original question: https://www.codingame.com/multiplayer/bot-programming/coders-strike-back
*/
public class CodersStrikeBack {
static double longestDist = Integer.MIN_VALUE;
static Point initialPoint = null;
static boolean hasFinishedOneLap;
static Point from = null;
static Point lastCheckpoint = null;
static final Set<Point> visitedCheckPoints = new HashSet<>();
static boolean hasBoosted = false;
public static void main(String args[]) {
Scanner in = new Scanner(System.in);
// game loop
while (true) {
int x = in.nextInt();
int y = in.nextInt();
int nextCheckpointX = in.nextInt(); // x position of the next check point
int nextCheckpointY = in.nextInt(); // y position of the next check point
int nextCheckpointDist = in.nextInt(); // distance to the next checkpoint
int nextCheckpointAngle = in.nextInt(); // angle between your pod orientation and the direction of the next checkpoint
int opponentX = in.nextInt();
int opponentY = in.nextInt();
// Write an action using System.out.println()
// To debug: System.err.println("Debug messages...");
// You have to output the target position
// followed by the power (0 <= thrust <= 100)
// i.e.: "x y thrust"
final Point nextCheckpoint = new Point(nextCheckpointX, nextCheckpointY);
final Point currentPosition = new Point(x, y);
final Point enemyPosition = new Point(opponentX, opponentY);
if (visitedCheckPoints.size() > 1 && enemyInRange(currentPosition, enemyPosition)) {
ramEnemyShip(currentPosition, enemyPosition);
} else {
cruise(currentPosition, nextCheckpoint, nextCheckpointAngle);
}
if (!nextCheckpoint.equals(lastCheckpoint)) {
from = lastCheckpoint;
}
lastCheckpoint = nextCheckpoint;
}
}
private static void ramEnemyShip(final Point currentPosition, final Point enemyPosition) {
sailToDestination((enemyPosition.x), enemyPosition.y, "100");
}
private static boolean enemyInRange(final Point currentPosition, final Point enemyPosition) {
return getDistant(currentPosition, enemyPosition) <= 1000;
}
private static void cruise(
final Point currentPosition,
final Point nextCheckpoint,
final int nextCheckpointAngle) {
if (initialPoint == null) {
initialPoint = currentPosition;
}
int thrust = isWithinAngle(nextCheckpointAngle) ? 100 : 0;
String power = String.valueOf(thrust);
visitedCheckPoints.add(nextCheckpoint);
System.err.println(
"Checkpoint added:" + " nextCheckpointX=" + nextCheckpoint.x + ", nextCheckpointY=" + nextCheckpoint.y);
for (final Point visitedCheckPoint : visitedCheckPoints) {
System.err.println("Visited checkpoint: (" + visitedCheckPoint.x + ", " + visitedCheckPoint.y + ")");
}
if (shouldSlowDown(currentPosition, nextCheckpoint)) {
power = String.valueOf(35);
}
if (hasFinishedOneLap(nextCheckpoint) &&
isLongestDistant(from, nextCheckpoint) &&
isWithinSharpAngle(nextCheckpointAngle) &&
!hasBoosted) {
power = "BOOST";
hasBoosted = true;
System.err.println("Boosted!!!");
}
sailToDestination(nextCheckpoint.x, nextCheckpoint.y, power);
}
private static boolean shouldSlowDown(
final Point currentPosition,
final Point nextCheckpoint) {
return getDistant(currentPosition, nextCheckpoint) < 1000;
}
private static void sailToDestination(final int nextCheckpointX, final int nextCheckpointY, final String power) {
System.out.println(nextCheckpointX + " " + nextCheckpointY + " " + power);
System.err.println("Thrust:" + power);
}
private static boolean isWithinAngle(final int nextCheckpointAngle) {
return -90 < nextCheckpointAngle && nextCheckpointAngle < 90;
}
private static boolean isWithinSharpAngle(final int nextCheckpointAngle) {
return -15 < nextCheckpointAngle && nextCheckpointAngle < 15;
}
private static boolean hasFinishedOneLap(final Point point) {
if (hasFinishedOneLap) {
return true;
}
if (initialPoint == null) { return false; }
hasFinishedOneLap = getDistant(initialPoint, point) <= 600;
return hasFinishedOneLap;
}
private static boolean isLongestDistant(final Point from, final Point endPoint) {
if (from == null) {
return false;
}
System.err.println("Start Point: (" + from.x + ", " + from.y + "); End Point: ("
+ endPoint.x + ", " + endPoint.y + ") ");
double dist = getDistant(from, endPoint);
System.err.println("dist=" + dist + ", longestDist=" + longestDist);
if (dist >= longestDist) {
longestDist = dist;
return true;
}
return false;
}
private static double getDistant(final Point from, final Point endPoint) {
return Math.sqrt(Math.pow(from.x - endPoint.x, 2) + Math.pow(from.y - endPoint.y, 2));
}
private static class Point {
final int x;
final int y;
private Point(final int t1, final int t2) {
this.x = t1;
this.y = t2;
}
@Override
public boolean equals(final Object o) {
if (this == o) { return true; }
if (!(o instanceof Point)) { return false; }
final Point point = (Point) o;
return x == point.x &&
y == point.y;
}
@Override
public int hashCode() {
return Objects.hash(x, y);
}
}
}
| Ericliu001/basic-algorithms | src/test/java/com/basicalgorithms/coding_games/CodersStrikeBack.java | Java | apache-2.0 | 6,186 |
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.jdisc.client;
import com.google.inject.AbstractModule;
import com.google.inject.Inject;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
/**
* @author Simon Thoresen Hult
*/
public class ClientDriverTestCase {
@Test
public void requireThatApplicationInstanceInjectionWorks() throws Exception {
MyModule module = new MyModule();
ClientDriver.runApplication(new MyApplication(module));
assertEquals(5, module.state);
}
@Test
public void requireThatApplicationClassInjectionWorks() throws Exception {
MyModule module = new MyModule();
ClientDriver.runApplication(MyApplication.class, module);
assertEquals(5, module.state);
}
private static class MyApplication implements ClientApplication {
final MyModule module;
@Inject
MyApplication(MyModule module) {
this.module = module;
module.state = 1;
}
@Override
public void start() {
if (++module.state != 2) {
throw new IllegalStateException();
}
}
@Override
public void run() {
if (++module.state != 3) {
throw new IllegalStateException();
}
}
@Override
public void stop() {
if (++module.state != 4) {
throw new IllegalStateException();
}
}
@Override
public void destroy() {
if (++module.state != 5) {
throw new IllegalStateException();
}
}
}
private static class MyModule extends AbstractModule {
int state = 0;
@Override
protected void configure() {
bind(MyModule.class).toInstance(this);
}
}
}
| vespa-engine/vespa | jdisc_core/src/test/java/com/yahoo/jdisc/client/ClientDriverTestCase.java | Java | apache-2.0 | 1,946 |
package sample.multiversion;
public interface Core {
String getVersion();
String getDependencyVersion();
}
| omacarena/only-short-poc | java.multiversion/v1/src/main/sample/multiversion/Core.java | Java | apache-2.0 | 117 |
package org.example;
import org.camunda.bpm.spring.boot.starter.annotation.EnableProcessApplication;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
@SpringBootApplication
@EnableProcessApplication("dynamic-tenant-designation")
public class CamundaApplication {
public static void main(String... args) {
SpringApplication.run(CamundaApplication.class, args);
}
}
| camunda/camunda-consulting | snippets/dynamic-tenant-designation/src/main/java/org/example/CamundaApplication.java | Java | apache-2.0 | 445 |
package org.galaxy.myhttp;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* To work on unit tests, switch the Test Artifact in the Build Variants view.
*/
public class ExampleUnitTest {
@Test
public void addition_isCorrect() throws Exception {
assertEquals(4, 2 + 2);
}
} | galaxy-captain/MyHttp | app/src/test/java/org/galaxy/myhttp/ExampleUnitTest.java | Java | apache-2.0 | 310 |
package org.commcare;
import org.commcare.models.database.UnencryptedHybridFileBackedSqlStorage;
import org.commcare.models.database.UnencryptedHybridFileBackedSqlStorageMock;
import org.javarosa.core.services.storage.Persistable;
/**
* Delegator around CommCareApp allowing the test suite to override logic.
*
* @author Phillip Mates (pmates@dimagi.com).
*/
public class CommCareTestApp extends CommCareApp {
private final CommCareApp app;
public CommCareTestApp(CommCareApp app) {
super(app.getAppRecord());
fileRoot = app.fileRoot;
setAppResourceState(app.getAppResourceState());
this.app = app;
}
@Override
public <T extends Persistable> UnencryptedHybridFileBackedSqlStorage<T> getFileBackedStorage(String name, Class<T> c) {
return new UnencryptedHybridFileBackedSqlStorageMock<>(name, c, app.buildAndroidDbHelper(), app);
}
}
| dimagi/commcare-android | app/unit-tests/src/org/commcare/CommCareTestApp.java | Java | apache-2.0 | 906 |
/**
* Copyright (C) 2014-2015 LinkedIn Corp. (pinot-core@linkedin.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.linkedin.pinot.core.startree;
import java.io.BufferedOutputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.joda.time.DateTime;
import org.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Objects;
import com.google.common.collect.BiMap;
import com.google.common.collect.HashBiMap;
import com.linkedin.pinot.common.data.DimensionFieldSpec;
import com.linkedin.pinot.common.data.MetricFieldSpec;
import com.linkedin.pinot.common.data.FieldSpec.DataType;
import com.linkedin.pinot.common.data.Schema;
import com.linkedin.pinot.common.utils.Pairs.IntPair;
import com.linkedin.pinot.core.data.GenericRow;
import com.linkedin.pinot.core.segment.creator.impl.V1Constants;
/**
* Uses file to build the star tree. Each row is divided into dimension and metrics. Time is added to dimension list.
* We use the split order to build the tree. In most cases, split order will be ranked depending on the cardinality (descending order).
* Time column will be excluded or last entry in split order irrespective of its cardinality
* This is a recursive algorithm where we branch on one dimension at every level.
*
* <b>Psuedo algo</b>
* <code>
*
* build(){
* let table(1,N) consists of N input rows
* table.sort(1,N) //sort the table on all dimensions, according to split order
* constructTree(table, 0, N, 0);
* }
* constructTree(table,start,end, level){
* splitDimensionName = dimensionsSplitOrder[level]
* groupByResult<dimName, length> = table.groupBy(dimensionsSplitOrder[level]); //returns the number of rows for each value in splitDimension
* int rangeStart = 0;
* for each ( entry<dimName,length> groupByResult){
* if(entry.length > minThreshold){
* constructTree(table, rangeStart, rangeStart + entry.length, level +1);
* }
* rangeStart = rangeStart + entry.length;
* updateStarTree() //add new child
* }
*
* //create a star tree node
*
* aggregatedRows = table.uniqueAfterRemovingAttributeAndAggregateMetrics(start,end, splitDimensionName);
* for(each row in aggregatedRows_
* table.add(row);
* if(aggregateRows.size > minThreshold) {
* table.sort(end, end + aggregatedRows.size);
* constructStarTree(table, end, end + aggregatedRows.size, level +1);
* }
* }
* </code>
*/
public class OffHeapStarTreeBuilder implements StarTreeBuilder {
private static final Logger LOG = LoggerFactory.getLogger(OffHeapStarTreeBuilder.class);
File dataFile;
private DataOutputStream dataBuffer;
int rawRecordCount = 0;
int aggRecordCount = 0;
private List<String> dimensionsSplitOrder;
private Set<String> skipStarNodeCreationForDimensions;
private Set<String> skipMaterializationForDimensions;
private int maxLeafRecords;
private StarTree starTree;
private StarTreeIndexNode starTreeRootIndexNode;
private int numDimensions;
private int numMetrics;
private List<String> dimensionNames;
private List<String> metricNames;
private String timeColumnName;
private List<DataType> dimensionTypes;
private List<DataType> metricTypes;
private Map<String, Object> dimensionNameToStarValueMap;
private HashBiMap<String, Integer> dimensionNameToIndexMap;
private Map<String, Integer> metricNameToIndexMap;
private int dimensionSizeBytes;
private int metricSizeBytes;
private File outDir;
private Map<String, HashBiMap<Object, Integer>> dictionaryMap;
boolean debugMode = false;
private int[] sortOrder;
private int skipMaterializationCardinalityThreshold;
public void init(StarTreeBuilderConfig builderConfig) throws Exception {
Schema schema = builderConfig.schema;
timeColumnName = schema.getTimeColumnName();
this.dimensionsSplitOrder = builderConfig.dimensionsSplitOrder;
skipStarNodeCreationForDimensions = builderConfig.getSkipStarNodeCreationForDimensions();
skipMaterializationForDimensions = builderConfig.getSkipMaterializationForDimensions();
skipMaterializationCardinalityThreshold = builderConfig.getSkipMaterializationCardinalityThreshold();
this.maxLeafRecords = builderConfig.maxLeafRecords;
this.outDir = builderConfig.getOutDir();
if (outDir == null) {
outDir = new File(System.getProperty("java.io.tmpdir"), V1Constants.STAR_TREE_INDEX_DIR + "_" + DateTime.now());
}
LOG.debug("Index output directory:{}", outDir);
dimensionTypes = new ArrayList<>();
dimensionNames = new ArrayList<>();
dimensionNameToIndexMap = HashBiMap.create();
dimensionNameToStarValueMap = new HashMap<>();
dictionaryMap = new HashMap<>();
//READ DIMENSIONS COLUMNS
List<DimensionFieldSpec> dimensionFieldSpecs = schema.getDimensionFieldSpecs();
for (int index = 0; index < dimensionFieldSpecs.size(); index++) {
DimensionFieldSpec spec = dimensionFieldSpecs.get(index);
String dimensionName = spec.getName();
dimensionNames.add(dimensionName);
dimensionNameToIndexMap.put(dimensionName, index);
Object starValue;
starValue = getAllStarValue(spec);
dimensionNameToStarValueMap.put(dimensionName, starValue);
dimensionTypes.add(spec.getDataType());
HashBiMap<Object, Integer> dictionary = HashBiMap.create();
dictionaryMap.put(dimensionName, dictionary);
}
//treat time column as just another dimension, only difference is that we will never split on this dimension unless explicitly specified in split order
if (timeColumnName != null) {
dimensionNames.add(timeColumnName);
dimensionTypes.add(schema.getTimeFieldSpec().getDataType());
int index = dimensionNameToIndexMap.size();
dimensionNameToIndexMap.put(timeColumnName, index);
HashBiMap<Object, Integer> dictionary = HashBiMap.create();
dictionaryMap.put(schema.getTimeColumnName(), dictionary);
}
dimensionSizeBytes = dimensionNames.size() * Integer.SIZE / 8;
this.numDimensions = dimensionNames.size();
//READ METRIC COLUMNS
this.metricTypes = new ArrayList<>();
this.metricNames = new ArrayList<>();
this.metricNameToIndexMap = new HashMap<>();
this.metricSizeBytes = 0;
List<MetricFieldSpec> metricFieldSpecs = schema.getMetricFieldSpecs();
for (int index = 0; index < metricFieldSpecs.size(); index++) {
MetricFieldSpec spec = metricFieldSpecs.get(index);
String metricName = spec.getName();
metricNames.add(metricName);
metricNameToIndexMap.put(metricName, index);
DataType dataType = spec.getDataType();
metricTypes.add(dataType);
metricSizeBytes += dataType.size();
}
this.numMetrics = metricNames.size();
builderConfig.getOutDir().mkdirs();
dataFile = new File(outDir, "star-tree.buf");
dataBuffer = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(dataFile)));
//INITIALIZE THE ROOT NODE
this.starTreeRootIndexNode = new StarTreeIndexNode();
this.starTreeRootIndexNode.setDimensionName(StarTreeIndexNode.all());
this.starTreeRootIndexNode.setDimensionValue(StarTreeIndexNode.all());
this.starTreeRootIndexNode.setLevel(0);
LOG.debug("dimensionNames:{}", dimensionNames);
LOG.debug("metricNames:{}", metricNames);
}
/**
* Validate the split order by removing any dimensions that may be part of the skip materialization list.
* @param dimensionsSplitOrder
* @param skipMaterializationForDimensions
* @return
*/
private List<String> sanitizeSplitOrder(List<String> dimensionsSplitOrder,
Set<String> skipMaterializationForDimensions) {
List<String> validatedSplitOrder = new ArrayList<String>();
for (String dimension : dimensionsSplitOrder) {
if (skipMaterializationForDimensions == null || !skipMaterializationForDimensions.contains(dimension)) {
LOG.info("Adding dimension {} to split order", dimension);
validatedSplitOrder.add(dimension);
} else {
LOG.info(
"Dimension {} cannot be part of 'dimensionSplitOrder' and 'skipMaterializationForDimensions', removing it from split order",
dimension);
}
}
return validatedSplitOrder;
}
private Object getAllStarValue(DimensionFieldSpec spec) throws Exception {
switch (spec.getDataType()) {
case STRING:
return "ALL";
case BOOLEAN:
case BYTE:
case CHAR:
case DOUBLE:
case FLOAT:
case INT:
case LONG:
return spec.getDefaultNullValue();
case OBJECT:
case SHORT:
case DOUBLE_ARRAY:
case CHAR_ARRAY:
case FLOAT_ARRAY:
case INT_ARRAY:
case LONG_ARRAY:
case SHORT_ARRAY:
case STRING_ARRAY:
case BYTE_ARRAY:
default:
throw new Exception("Unsupported dimension data type" + spec);
}
}
public GenericRow toGenericRow(DimensionBuffer dimensionKey, MetricBuffer metricsHolder) {
GenericRow row = new GenericRow();
Map<String, Object> map = new HashMap<>();
for (int i = 0; i < dimensionNames.size(); i++) {
String dimName = dimensionNames.get(i);
BiMap<Integer, Object> inverseDictionary = dictionaryMap.get(dimName).inverse();
Object dimValue = inverseDictionary.get(dimensionKey.getDimension(i));
if (dimValue == null) {
dimValue = dimensionNameToStarValueMap.get(dimName);
}
map.put(dimName, dimValue);
}
for (int i = 0; i < numMetrics; i++) {
String metName = metricNames.get(i);
map.put(metName, metricsHolder.get(i));
}
row.init(map);
return row;
}
public void append(GenericRow row) throws Exception {
DimensionBuffer dimension = new DimensionBuffer(numDimensions);
for (int i = 0; i < dimensionNames.size(); i++) {
String dimName = dimensionNames.get(i);
Map<Object, Integer> dictionary = dictionaryMap.get(dimName);
Object dimValue = row.getValue(dimName);
if (dimValue == null) {
//TODO: Have another default value to represent STAR. Using default value to represent STAR as of now.
//It does not matter during query execution, since we know that values is STAR from the star tree
dimValue = dimensionNameToStarValueMap.get(dimName);
}
if (!dictionary.containsKey(dimValue)) {
dictionary.put(dimValue, dictionary.size());
}
dimension.setDimension(i, dictionary.get(dimValue));
}
Number[] numbers = new Number[numMetrics];
for (int i = 0; i < numMetrics; i++) {
String metName = metricNames.get(i);
numbers[i] = (Number) row.getValue(metName);
}
MetricBuffer metrics = new MetricBuffer(numbers);
append(dimension, metrics);
}
public void append(DimensionBuffer dimension, MetricBuffer metrics) throws Exception {
appendToRawBuffer(dimension, metrics);
}
private void appendToRawBuffer(DimensionBuffer dimension, MetricBuffer metrics) throws IOException {
appendToBuffer(dataBuffer, dimension, metrics);
rawRecordCount++;
}
private void appendToAggBuffer(DimensionBuffer dimension, MetricBuffer metrics) throws IOException {
appendToBuffer(dataBuffer, dimension, metrics);
aggRecordCount++;
}
private void appendToBuffer(DataOutputStream dos, DimensionBuffer dimensions, MetricBuffer metricHolder)
throws IOException {
for (int i = 0; i < numDimensions; i++) {
dos.writeInt(dimensions.getDimension(i));
}
dos.write(metricHolder.toBytes(metricSizeBytes, metricTypes));
}
public void build() throws Exception {
if (skipMaterializationForDimensions == null || skipMaterializationForDimensions.isEmpty()) {
skipMaterializationForDimensions = computeDefaultDimensionsToSkipMaterialization();
}
if (dimensionsSplitOrder == null || dimensionsSplitOrder.isEmpty()) {
dimensionsSplitOrder = computeDefaultSplitOrder();
}
// Remove any dimensions from split order that would be not be materialized.
dimensionsSplitOrder = sanitizeSplitOrder(dimensionsSplitOrder, skipMaterializationForDimensions);
LOG.debug("Split order:{}", dimensionsSplitOrder);
long start = System.currentTimeMillis();
dataBuffer.flush();
sort(dataFile, 0, rawRecordCount);
constructStarTree(starTreeRootIndexNode, 0, rawRecordCount, 0, dataFile);
long end = System.currentTimeMillis();
LOG.debug("Took {} ms to build star tree index. Original records:{} Materialized record:{}", (end - start),
rawRecordCount, aggRecordCount);
starTree = new StarTree(starTreeRootIndexNode, dimensionNameToIndexMap);
File treeBinary = new File(outDir, "star-tree.bin");
LOG.debug("Saving tree binary at: {} ", treeBinary);
starTree.writeTree(new BufferedOutputStream(new FileOutputStream(treeBinary)));
printTree(starTreeRootIndexNode, 0);
LOG.debug("Finished build tree. out dir: {} ", outDir);
dataBuffer.close();
}
private void printTree(StarTreeIndexNode node, int level) {
for (int i = 0; i < level; i++) {
LOG.debug(" ");
}
BiMap<Integer, String> inverse = dimensionNameToIndexMap.inverse();
String dimName = "ALL";
Object dimValue = "ALL";
if (node.getDimensionName() != StarTreeIndexNode.all()) {
dimName = inverse.get(node.getDimensionName());
}
if (node.getDimensionValue() != StarTreeIndexNode.all()) {
dimValue = dictionaryMap.get(dimName).inverse().get(node.getDimensionValue());
}
String formattedOutput =
Objects.toStringHelper(node).add("nodeId", node.getNodeId()).add("level", level).add("dimensionName", dimName)
.add("dimensionValue", dimValue).add("childDimensionName", inverse.get(node.getChildDimensionName()))
.add("childCount", node.getChildren() == null ? 0 : node.getChildren().size())
.add("startDocumentId", node.getStartDocumentId()).add("endDocumentId", node.getEndDocumentId())
.add("documentCount", (node.getEndDocumentId() - node.getStartDocumentId())).toString();
LOG.debug(formattedOutput);
if (!node.isLeaf()) {
for (StarTreeIndexNode child : node.getChildren().values()) {
printTree(child, level + 1);
}
}
}
private List<String> computeDefaultSplitOrder() {
ArrayList<String> defaultSplitOrder = new ArrayList<>();
//include only the dimensions not time column. Also, assumes that skipMaterializationForDimensions is built.
for (String dimensionName : dimensionNames) {
if (skipMaterializationForDimensions != null && !skipMaterializationForDimensions.contains(dimensionName)) {
defaultSplitOrder.add(dimensionName);
}
}
if (timeColumnName != null) {
defaultSplitOrder.remove(timeColumnName);
}
Collections.sort(defaultSplitOrder, new Comparator<String>() {
@Override
public int compare(String o1, String o2) {
return dictionaryMap.get(o2).size() - dictionaryMap.get(o1).size(); //descending
}
});
return defaultSplitOrder;
}
private Set<String> computeDefaultDimensionsToSkipMaterialization() {
Set<String> skipDimensions = new HashSet<String>();
for (String dimensionName : dimensionNames) {
if (dictionaryMap.get(dimensionName).size() > skipMaterializationCardinalityThreshold) {
skipDimensions.add(dimensionName);
}
}
return skipDimensions;
}
/*
* Sorts the file on all dimensions
*/
private void sort(File file, int startDocId, int endDocId) throws IOException {
if (debugMode) {
LOG.info("BEFORE SORTING");
printFile(file, startDocId, endDocId);
}
StarTreeDataTable dataSorter = new StarTreeDataTable(file, dimensionSizeBytes, metricSizeBytes, getSortOrder());
dataSorter.sort(startDocId, endDocId, 0, dimensionSizeBytes);
if (debugMode) {
LOG.info("AFTER SORTING");
printFile(file, startDocId, endDocId);
}
}
private int[] getSortOrder() {
if (sortOrder == null) {
sortOrder = new int[dimensionNames.size()];
for (int i = 0; i < dimensionsSplitOrder.size(); i++) {
sortOrder[i] = dimensionNameToIndexMap.get(dimensionsSplitOrder.get(i));
}
//add remaining dimensions that were not part of dimensionsSplitOrder
int counter = 0;
for (String dimName : dimensionNames) {
if (!dimensionsSplitOrder.contains(dimName)) {
sortOrder[dimensionsSplitOrder.size() + counter] = dimensionNameToIndexMap.get(dimName);
counter = counter + 1;
}
}
}
return sortOrder;
}
private void printFile(File file, int startDocId, int endDocId) throws IOException {
LOG.info("Contents of file:{} from:{} to:{}", file.getName(), startDocId, endDocId);
StarTreeDataTable dataSorter = new StarTreeDataTable(file, dimensionSizeBytes, metricSizeBytes, getSortOrder());
Iterator<Pair<byte[], byte[]>> iterator = dataSorter.iterator(startDocId, endDocId);
int numRecordsToPrint = 100;
int counter = 0;
while (iterator.hasNext()) {
Pair<byte[], byte[]> next = iterator.next();
LOG.info("{}, {}", DimensionBuffer.fromBytes(next.getLeft()),
MetricBuffer.fromBytes(next.getRight(), metricTypes));
if (counter++ == numRecordsToPrint) {
break;
}
}
}
private int constructStarTree(StarTreeIndexNode node, int startDocId, int endDocId, int level, File file)
throws Exception {
//node.setStartDocumentId(startDocId);
int docsAdded = 0;
if (level == dimensionsSplitOrder.size() - 1) {
return 0;
}
String splitDimensionName = dimensionsSplitOrder.get(level);
Integer splitDimensionId = dimensionNameToIndexMap.get(splitDimensionName);
LOG.debug("Building tree at level:{} using file:{} from startDoc:{} endDocId:{} splitting on dimension:{}", level,
file.getName(), startDocId, endDocId, splitDimensionName);
Map<Integer, IntPair> sortGroupBy = groupBy(startDocId, endDocId, splitDimensionId, file);
LOG.debug("Group stats:{}", sortGroupBy);
node.setChildDimensionName(splitDimensionId);
node.setChildren(new HashMap<Integer, StarTreeIndexNode>());
for (int childDimensionValue : sortGroupBy.keySet()) {
StarTreeIndexNode child = new StarTreeIndexNode();
child.setDimensionName(splitDimensionId);
child.setDimensionValue(childDimensionValue);
child.setParent(node);
child.setLevel(node.getLevel() + 1);
// n.b. We will number the nodes later using BFS after fully split
// Add child to parent
node.getChildren().put(childDimensionValue, child);
int childDocs = 0;
IntPair range = sortGroupBy.get(childDimensionValue);
if (range.getRight() - range.getLeft() > maxLeafRecords) {
childDocs = constructStarTree(child, range.getLeft(), range.getRight(), level + 1, file);
docsAdded += childDocs;
}
// Either range <= maxLeafRecords, or we did not split further (last level).
if (childDocs == 0) {
child.setStartDocumentId(range.getLeft());
child.setEndDocumentId(range.getRight());
}
}
// Return if star node does not need to be created.
if (skipStarNodeCreationForDimensions != null && skipStarNodeCreationForDimensions.contains(splitDimensionName)) {
return docsAdded;
}
//create star node
StarTreeIndexNode starChild = new StarTreeIndexNode();
starChild.setDimensionName(splitDimensionId);
starChild.setDimensionValue(StarTreeIndexNode.all());
starChild.setParent(node);
starChild.setLevel(node.getLevel() + 1);
// n.b. We will number the nodes later using BFS after fully split
// Add child to parent
node.getChildren().put(StarTreeIndexNode.all(), starChild);
Iterator<Pair<DimensionBuffer, MetricBuffer>> iterator =
uniqueCombinations(startDocId, endDocId, file, splitDimensionId);
int rowsAdded = 0;
int startOffset = rawRecordCount + aggRecordCount;
while (iterator.hasNext()) {
Pair<DimensionBuffer, MetricBuffer> next = iterator.next();
DimensionBuffer dimension = next.getLeft();
MetricBuffer metricsHolder = next.getRight();
LOG.debug("Adding row:{}", dimension);
appendToAggBuffer(dimension, metricsHolder);
rowsAdded++;
}
docsAdded += rowsAdded;
LOG.debug("Added {} additional records at level {}", rowsAdded, level);
//flush
dataBuffer.flush();
int childDocs = 0;
if (rowsAdded >= maxLeafRecords) {
sort(dataFile, startOffset, startOffset + rowsAdded);
childDocs = constructStarTree(starChild, startOffset, startOffset + rowsAdded, level + 1, dataFile);
docsAdded += childDocs;
}
// Either rowsAdded < maxLeafRecords, or we did not split further (last level).
if (childDocs == 0) {
starChild.setStartDocumentId(startOffset);
starChild.setEndDocumentId(startOffset + rowsAdded);
}
//node.setEndDocumentId(endDocId + docsAdded);
return docsAdded;
}
/**
* Assumes the file is already sorted, returns the unique combinations after removing a specified dimension.
* Aggregates the metrics for each unique combination, currently only sum is supported by default
* @param startDocId
* @param endDocId
* @param file
* @param splitDimensionId
* @return
* @throws Exception
*/
private Iterator<Pair<DimensionBuffer, MetricBuffer>> uniqueCombinations(int startDocId, int endDocId, File file,
int splitDimensionId) throws Exception {
StarTreeDataTable dataSorter = new StarTreeDataTable(file, dimensionSizeBytes, metricSizeBytes, getSortOrder());
Iterator<Pair<byte[], byte[]>> iterator1 = dataSorter.iterator(startDocId, endDocId);
File tempFile = new File(outDir, file.getName() + "_" + startDocId + "_" + endDocId + ".unique.tmp");
DataOutputStream dos = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(tempFile)));
while (iterator1.hasNext()) {
Pair<byte[], byte[]> next = iterator1.next();
byte[] dimensionBuffer = next.getLeft();
byte[] metricBuffer = next.getRight();
DimensionBuffer dimensions = DimensionBuffer.fromBytes(dimensionBuffer);
for (int i = 0; i < numDimensions; i++) {
String dimensionName = dimensionNameToIndexMap.inverse().get(i);
if (i == splitDimensionId || (skipMaterializationForDimensions != null &&
skipMaterializationForDimensions.contains(dimensionName))) {
dos.writeInt(StarTreeIndexNode.all());
} else {
dos.writeInt(dimensions.getDimension(i));
}
}
dos.write(metricBuffer);
}
dos.close();
dataSorter = new StarTreeDataTable(tempFile, dimensionSizeBytes, metricSizeBytes, getSortOrder());
dataSorter.sort(0, endDocId - startDocId);
if (debugMode) {
printFile(tempFile, 0, endDocId - startDocId);
}
final Iterator<Pair<byte[], byte[]>> iterator = dataSorter.iterator(0, endDocId - startDocId);
return new Iterator<Pair<DimensionBuffer, MetricBuffer>>() {
Pair<DimensionBuffer, MetricBuffer> prev = null;
boolean done = false;
@Override
public void remove() {
throw new UnsupportedOperationException();
}
@Override
public boolean hasNext() {
return !done;
}
@Override
public Pair<DimensionBuffer, MetricBuffer> next() {
while (iterator.hasNext()) {
Pair<byte[], byte[]> next = iterator.next();
byte[] dimBuffer = next.getLeft();
byte[] metricBuffer = next.getRight();
if (prev == null) {
prev = Pair.of(DimensionBuffer.fromBytes(dimBuffer), MetricBuffer.fromBytes(metricBuffer, metricTypes));
} else {
Pair<DimensionBuffer, MetricBuffer> current =
Pair.of(DimensionBuffer.fromBytes(dimBuffer), MetricBuffer.fromBytes(metricBuffer, metricTypes));
if (!current.getLeft().equals(prev.getLeft())) {
Pair<DimensionBuffer, MetricBuffer> ret = prev;
prev = current;
LOG.debug("Returning unique {}", prev.getLeft());
return ret;
} else {
prev.getRight().aggregate(current.getRight(), metricTypes);
}
}
}
done = true;
LOG.debug("Returning unique {}", prev.getLeft());
return prev;
}
};
}
/**
* sorts the file from start to end on a dimension index
* @param startDocId
* @param endDocId
* @param dimension
* @param file
* @return
*/
private Map<Integer, IntPair> groupBy(int startDocId, int endDocId, Integer dimension, File file) {
StarTreeDataTable dataSorter = new StarTreeDataTable(file, dimensionSizeBytes, metricSizeBytes, getSortOrder());
return dataSorter.groupByIntColumnCount(startDocId, endDocId, dimension);
}
/**
* Iterator to iterate over the records from startDocId to endDocId
*/
@Override
public Iterator<GenericRow> iterator(final int startDocId, final int endDocId) throws Exception {
StarTreeDataTable dataSorter = new StarTreeDataTable(dataFile, dimensionSizeBytes, metricSizeBytes, getSortOrder());
final Iterator<Pair<byte[], byte[]>> iterator = dataSorter.iterator(startDocId, endDocId);
return new Iterator<GenericRow>() {
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
@Override
public GenericRow next() {
Pair<byte[], byte[]> pair = iterator.next();
DimensionBuffer dimensionKey = DimensionBuffer.fromBytes(pair.getLeft());
MetricBuffer metricsHolder = MetricBuffer.fromBytes(pair.getRight(), metricTypes);
return toGenericRow(dimensionKey, metricsHolder);
}
};
}
public JSONObject getStarTreeAsJSON() throws Exception {
JSONObject json = new JSONObject();
toJson(json, starTreeRootIndexNode, dictionaryMap);
return json;
}
private void toJson(JSONObject json, StarTreeIndexNode node, Map<String, HashBiMap<Object, Integer>> dictionaryMap)
throws Exception {
String dimName = "ALL";
Object dimValue = "ALL";
if (node.getDimensionName() != StarTreeIndexNode.all()) {
dimName = dimensionNames.get(node.getDimensionName());
}
if (node.getDimensionValue() != StarTreeIndexNode.all()) {
dimValue = dictionaryMap.get(dimName).inverse().get(node.getDimensionValue());
}
json.put("title", dimName + ":" + dimValue);
if (node.getChildren() != null) {
JSONObject[] childJsons = new JSONObject[node.getChildren().size()];
int index = 0;
for (Integer child : node.getChildren().keySet()) {
StarTreeIndexNode childNode = node.getChildren().get(child);
JSONObject childJson = new JSONObject();
toJson(childJson, childNode, dictionaryMap);
childJsons[index++] = childJson;
}
json.put("nodes", childJsons);
}
}
@Override
public void cleanup() {
if (outDir != null) {
FileUtils.deleteQuietly(outDir);
}
}
@Override
public StarTree getTree() {
return starTree;
}
@Override
public int getTotalRawDocumentCount() {
return rawRecordCount;
}
@Override
public int getTotalAggregateDocumentCount() {
return aggRecordCount;
}
@Override
public int getMaxLeafRecords() {
return maxLeafRecords;
}
@Override
public List<String> getDimensionsSplitOrder() {
return dimensionsSplitOrder;
}
public Map<String, HashBiMap<Object, Integer>> getDictionaryMap() {
return dictionaryMap;
}
public HashBiMap<String, Integer> getDimensionNameToIndexMap() {
return dimensionNameToIndexMap;
}
@Override
public Set<String> getSkipMaterializationForDimensions() {
return skipMaterializationForDimensions;
}
}
| tkao1000/pinot | pinot-core/src/main/java/com/linkedin/pinot/core/startree/OffHeapStarTreeBuilder.java | Java | apache-2.0 | 28,902 |
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.plugins.signing;
import com.google.common.base.Function;
import groovy.lang.Closure;
import org.gradle.api.artifacts.PublishArtifact;
import org.gradle.api.file.FileCollection;
import org.gradle.api.internal.file.collections.ImmutableFileCollection;
import org.gradle.plugins.signing.signatory.Signatory;
import org.gradle.plugins.signing.type.SignatureType;
import org.gradle.util.ConfigureUtil;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
/**
* A sign operation creates digital signatures for one or more files or {@link PublishArtifact publish artifacts}.
*
* <p>The external representation of the signature is specified by the {@link #getSignatureType() signature type property}, while the {@link #signatory} property specifies who is to sign. <p> A sign
* operation manages one or more {@link Signature} objects. The {@code sign} methods are used to register things to generate signatures for. The {@link #execute()} method generates the signatures for
* all of the registered items at that time.
*/
abstract public class SignOperation implements SignatureSpec {
/**
* The file representation of the signature(s).
*/
private SignatureType signatureType;
/**
* The signatory to the generated digital signatures.
*/
private Signatory signatory;
/**
* Whether or not it is required that this signature be generated.
*/
private boolean required;
private final List<Signature> signatures = new ArrayList<Signature>();
public String getDisplayName() {
return "SignOperation";
}
@Override
public String toString() {
return getDisplayName();
}
@Override
public void setSignatureType(SignatureType signatureType) {
this.signatureType = signatureType;
}
@Override
public SignatureType getSignatureType() {
return signatureType;
}
@Override
public void setSignatory(Signatory signatory) {
this.signatory = signatory;
}
@Override
public Signatory getSignatory() {
return signatory;
}
@Override
public void setRequired(boolean required) {
this.required = required;
}
@Override
public boolean isRequired() {
return required;
}
/**
* Registers signatures for the given artifacts.
*
* @return this
* @see Signature#Signature(File, SignatureSpec, Object...)
*/
public SignOperation sign(PublishArtifact... artifacts) {
for (PublishArtifact artifact : artifacts) {
signatures.add(new Signature(artifact, this));
}
return this;
}
/**
* Registers signatures for the given files.
*
* @return this
* @see Signature#Signature(File, SignatureSpec, Object...)
*/
public SignOperation sign(File... files) {
for (File file : files) {
signatures.add(new Signature(file, this));
}
return this;
}
/**
* Registers signatures (with the given classifier) for the given files
*
* @return this
* @see Signature#Signature(PublishArtifact, SignatureSpec, Object...)
*/
public SignOperation sign(String classifier, File... files) {
for (File file : files) {
signatures.add(new Signature(file, classifier, this));
}
return this;
}
/**
* Change the signature type for signature generation.
*/
public SignOperation signatureType(SignatureType type) {
this.signatureType = type;
return this;
}
/**
* Change the signatory for signature generation.
*/
public SignOperation signatory(Signatory signatory) {
this.signatory = signatory;
return this;
}
/**
* Executes the given closure against this object.
*/
public SignOperation configure(Closure closure) {
ConfigureUtil.configureSelf(closure, this);
return this;
}
/**
* Generates actual signature files for all of the registered signatures.
*
* <p>The signatures are generated with the configuration they have at this time, which includes the signature type and signatory of this operation at this time. <p> This method can be called
* multiple times, with the signatures being generated with their current configuration each time.
*
* @return this
* @see Signature#generate()
*/
public SignOperation execute() {
for (Signature signature : signatures) {
signature.generate();
}
return this;
}
/**
* The registered signatures.
*/
public List<Signature> getSignatures() {
return new ArrayList<Signature>(signatures);
}
/**
* Returns the single registered signature.
*
* @return The signature.
* @throws IllegalStateException if there is not exactly one registered signature.
*/
public Signature getSingleSignature() {
final int size = signatures.size();
switch (size) {
case 1:
return signatures.get(0);
case 0:
throw new IllegalStateException("Expected operation to contain exactly one signature, however, it contains no signatures.");
default:
throw new IllegalStateException("Expected operation to contain exactly one signature, however, it contains " + String.valueOf(size) + " signatures.");
}
}
/**
* All of the files that will be signed by this operation.
*/
public FileCollection getFilesToSign() {
return newSignatureFileCollection(new Function<Signature, File>() {
@Override
public File apply(Signature input) {
return input.getToSign();
}
});
}
/**
* All of the signature files that will be generated by this operation.
*/
public FileCollection getSignatureFiles() {
return newSignatureFileCollection(new Function<Signature, File>() {
@Override
public File apply(Signature input) {
return input.getFile();
}
});
}
private FileCollection newSignatureFileCollection(Function<Signature, File> getFile) {
return ImmutableFileCollection.of(collectSignatureFiles(getFile));
}
private ArrayList<File> collectSignatureFiles(Function<Signature, File> getFile) {
ArrayList<File> files = new ArrayList<File>(signatures.size());
for (Signature signature : signatures) {
File file = getFile.apply(signature);
if (file != null) {
files.add(file);
}
}
return files;
}
}
| robinverduijn/gradle | subprojects/signing/src/main/java/org/gradle/plugins/signing/SignOperation.java | Java | apache-2.0 | 7,371 |
/*
* Copyright 2013-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.cloudfoundry.client.v2.spaces;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import org.cloudfoundry.Nullable;
import org.immutables.value.Value;
import java.util.List;
/**
* The request payload for the Update a Space operation
*/
@JsonSerialize
@Value.Immutable
abstract class _UpdateSpaceRequest {
/**
* Allow SSH
*/
@JsonProperty("allow_ssh")
@Nullable
abstract Boolean getAllowSsh();
/**
* The auditor ids
*/
@JsonProperty("auditor_guids")
@Nullable
abstract List<String> getAuditorIds();
/**
* The developer ids
*/
@JsonProperty("developer_guids")
@Nullable
abstract List<String> getDeveloperIds();
/**
* The domain ids
*/
@JsonProperty("domain_guids")
@Nullable
abstract List<String> getDomainIds();
/**
* The manager ids
*/
@JsonProperty("manager_guids")
@Nullable
abstract List<String> getManagerIds();
/**
* The name
*/
@JsonProperty("name")
@Nullable
abstract String getName();
/**
* The organization id
*/
@JsonProperty("organization_guid")
@Nullable
abstract String getOrganizationId();
/**
* The security group ids
*/
@JsonProperty("security_group_guids")
@Nullable
abstract List<String> getSecurityGroupIds();
/**
* The space id
*/
@JsonIgnore
abstract String getSpaceId();
}
| cloudfoundry/cf-java-client | cloudfoundry-client/src/main/java/org/cloudfoundry/client/v2/spaces/_UpdateSpaceRequest.java | Java | apache-2.0 | 2,193 |
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package agent.lldb.manager.evt;
import agent.lldb.lldb.DebugThreadInfo;
/**
* The event corresponding with SBThread.eBroadcastBitThreadResumed
*/
public class LldbThreadResumedEvent extends AbstractLldbEvent<DebugThreadInfo> {
public LldbThreadResumedEvent(DebugThreadInfo info) {
super(info);
}
}
| NationalSecurityAgency/ghidra | Ghidra/Debug/Debugger-agent-lldb/src/main/java/agent/lldb/manager/evt/LldbThreadResumedEvent.java | Java | apache-2.0 | 893 |
/**
* Copyright (C) 2013
* by 52 North Initiative for Geospatial Open Source Software GmbH
*
* Contact: Andreas Wytzisk
* 52 North Initiative for Geospatial Open Source Software GmbH
* Martin-Luther-King-Weg 24
* 48155 Muenster, Germany
* info@52north.org
*
* This program is free software; you can redistribute and/or modify it under
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*
* This program is distributed WITHOUT ANY WARRANTY; even without the implied
* WARRANTY OF MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with
* this program (see gnu-gpl v2.txt). If not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA or
* visit the Free Software Foundation web page, http://www.fsf.org.
*/
package org.n52.sos.binding.rest.resources;
import org.n52.sos.binding.rest.requests.RestRequest;
/**
* @author <a href="mailto:e.h.juerrens@52north.org">Eike Hinderk Jürrens</a>
*
*/
public class OptionsRestRequest implements RestRequest {
private String resourceType;
private boolean isGlobalResource;
private boolean isResourceCollection;
public OptionsRestRequest(String resourceType, boolean isGlobalResource, boolean isResourceCollection) {
this.resourceType = resourceType;
this.isGlobalResource = isGlobalResource;
this.isResourceCollection = isResourceCollection;
}
public String getResourceType()
{
return resourceType;
}
public boolean isGlobalResource()
{
return isGlobalResource;
}
public boolean isResourceCollection()
{
return isResourceCollection;
}
}
| sauloperez/sos | src/bindings/rest/code/src/main/java/org/n52/sos/binding/rest/resources/OptionsRestRequest.java | Java | apache-2.0 | 1,863 |
package org.adligo.tests4j.system.shared.trials;
import org.adligo.tests4j.shared.common.ClassMethods;
import org.adligo.tests4j.shared.xml.I_XML_Builder;
public class TrialParamValue implements I_TrialParamValue {
public static final String TAG_NAME = "value";
public static final String CLASS_NAME = "class";
public static final String PARAMETER_VALUE_MUST_BE_A_NON_VOID_PRIMITIVE_OR_STRING =
"Parameter value must be a non Void primitive or String.";
private Object value_;
public TrialParamValue(Object value) {
if (value == null) {
throw new NullPointerException();
}
Class<?> c = value.getClass();
if ( (ClassMethods.isPrimitiveClass(c) && !ClassMethods.isClass(Void.class, c))
|| ClassMethods.isClass(String.class, c)) {
value_ = value;
} else {
throw new IllegalArgumentException(
PARAMETER_VALUE_MUST_BE_A_NON_VOID_PRIMITIVE_OR_STRING);
}
}
@Override
public String getClassName() {
return value_.getClass().getName();
}
@Override
public Object getValue() {
return value_;
}
@Override
public void toXml(I_XML_Builder builder) {
builder.addIndent();
builder.addStartTag(TAG_NAME);
String name = ClassMethods.getSimpleName(value_.getClass());
builder.addAttribute(CLASS_NAME, name);
builder.endHeader();
builder.addText(value_.toString());
builder.addEndTag(TAG_NAME);
builder.endLine();
}
}
| adligo/tests4j.adligo.org | src/org/adligo/tests4j/system/shared/trials/TrialParamValue.java | Java | apache-2.0 | 1,384 |
package com.sequenceiq.freeipa.entity.util;
import com.sequenceiq.cloudbreak.converter.DefaultEnumConverter;
import com.sequenceiq.freeipa.api.v1.kerberos.model.KerberosType;
public class KerberosTypeConverter extends DefaultEnumConverter<KerberosType> {
@Override
public KerberosType getDefault() {
return KerberosType.FREEIPA;
}
}
| hortonworks/cloudbreak | freeipa/src/main/java/com/sequenceiq/freeipa/entity/util/KerberosTypeConverter.java | Java | apache-2.0 | 356 |
package com.lyubenblagoev.postfixrest.security;
import com.lyubenblagoev.postfixrest.entity.User;
import com.lyubenblagoev.postfixrest.repository.UserRepository;
import org.springframework.security.core.userdetails.UserDetails;
import org.springframework.security.core.userdetails.UserDetailsService;
import org.springframework.security.core.userdetails.UsernameNotFoundException;
import org.springframework.stereotype.Service;
import java.util.Optional;
@Service
public class CustomUserDetailsService implements UserDetailsService {
private final UserRepository userRepository;
public CustomUserDetailsService(UserRepository userRepository) {
this.userRepository = userRepository;
}
@Override
public UserDetails loadUserByUsername(String username) throws UsernameNotFoundException {
return userRepository.findByEmail(username)
.map(u -> new UserPrincipal(u))
.orElseThrow(() -> new UsernameNotFoundException("No user found for " + username));
}
}
| lyubenblagoev/postfix-rest-server | src/main/java/com/lyubenblagoev/postfixrest/security/CustomUserDetailsService.java | Java | apache-2.0 | 1,026 |
/*******************************************************************************
* Copyright (c) 2012, 2015 Pivotal Software, Inc.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License,
* Version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributors:
* Pivotal Software, Inc. - initial API and implementation
********************************************************************************/
package cn.dockerfoundry.ide.eclipse.server.core.internal;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import org.cloudfoundry.client.lib.domain.CloudService;
import org.eclipse.core.runtime.IStatus;
import org.eclipse.core.runtime.Status;
import org.eclipse.wst.server.core.IModule;
import cn.dockerfoundry.ide.eclipse.server.core.internal.application.ModuleChangeEvent;
import cn.dockerfoundry.ide.eclipse.server.core.internal.client.CloudRefreshEvent;
/**
* Fires server refresh events. Only one handler is active per workbench runtime
* session.
*
*/
public class ServerEventHandler {
private static ServerEventHandler handler;
public static ServerEventHandler getDefault() {
if (handler == null) {
handler = new ServerEventHandler();
}
return handler;
}
private final List<CloudServerListener> applicationListeners = new CopyOnWriteArrayList<CloudServerListener>();
public synchronized void addServerListener(CloudServerListener listener) {
if (listener != null && !applicationListeners.contains(listener)) {
applicationListeners.add(listener);
}
}
public synchronized void removeServerListener(CloudServerListener listener) {
applicationListeners.remove(listener);
}
public void fireServicesUpdated(DockerFoundryServer server, List<DockerApplicationService> services) {
fireServerEvent(new CloudRefreshEvent(server, null, CloudServerEvent.EVENT_UPDATE_SERVICES, services));
}
public void firePasswordUpdated(DockerFoundryServer server) {
fireServerEvent(new CloudServerEvent(server, CloudServerEvent.EVENT_UPDATE_PASSWORD));
}
public void fireServerRefreshed(DockerFoundryServer server) {
fireServerEvent(new CloudServerEvent(server, CloudServerEvent.EVENT_SERVER_REFRESHED));
}
public void fireAppInstancesChanged(DockerFoundryServer server, IModule module) {
fireServerEvent(new ModuleChangeEvent(server, CloudServerEvent.EVENT_INSTANCES_UPDATED, module,
Status.OK_STATUS));
}
public void fireApplicationRefreshed(DockerFoundryServer server, IModule module) {
fireServerEvent(new ModuleChangeEvent(server, CloudServerEvent.EVENT_APPLICATION_REFRESHED, module,
Status.OK_STATUS));
}
public void fireAppDeploymentChanged(DockerFoundryServer server, IModule module) {
fireServerEvent(new ModuleChangeEvent(server, CloudServerEvent.EVENT_APP_DEPLOYMENT_CHANGED, module,
Status.OK_STATUS));
}
public void fireError(DockerFoundryServer server, IModule module, IStatus status) {
fireServerEvent(new ModuleChangeEvent(server, CloudServerEvent.EVENT_CLOUD_OP_ERROR, module, status));
}
public synchronized void fireServerEvent(CloudServerEvent event) {
CloudServerListener[] listeners = applicationListeners.toArray(new CloudServerListener[0]);
for (CloudServerListener listener : listeners) {
listener.serverChanged(event);
}
}
}
| osswangxining/dockerfoundry | cn.dockerfoundry.ide.eclipse.server.core/src/cn/dockerfoundry/ide/eclipse/server/core/internal/ServerEventHandler.java | Java | apache-2.0 | 3,808 |
/*
* Copyright 2016 Bjoern Bilger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.jrestless.core.container;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.ByteArrayOutputStream;
import java.io.OutputStream;
import javax.ws.rs.core.MultivaluedHashMap;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.core.Response.Status;
import org.glassfish.jersey.server.ContainerResponse;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import com.jrestless.core.container.JRestlessHandlerContainer.JRestlessContainerResponse;
import com.jrestless.core.container.JRestlessHandlerContainer.JRestlessContainerResponseWriter;
import com.jrestless.core.container.io.JRestlessResponseWriter;
public class JRestlessContainerResponseWriterTest {
private JRestlessContainerResponseWriter containerResponseWriter;
private JRestlessContainerResponse response;
@BeforeEach
public void setup() {
JRestlessResponseWriter responseWriter = mock(JRestlessResponseWriter.class);
when(responseWriter.getEntityOutputStream()).thenReturn(new ByteArrayOutputStream());
response = spy(new JRestlessContainerResponse(responseWriter));
containerResponseWriter = new JRestlessContainerResponseWriter(response);
}
@Test
public void commit_ResponseNotYetClosed_ShouldCloseResponse() {
containerResponseWriter.commit();
verify(response, times(1)).close();
}
@Test
public void writeResponseStatusAndHeaders_ContextHeaderAndStatusGiven_ShouldUpdateResponseStatusAndHeaders() {
MultivaluedMap<String, String> actualHeaders = new MultivaluedHashMap<>();
actualHeaders.add("header0", "value0_0");
actualHeaders.add("header0", "value0_1");
actualHeaders.add("header1", "value1_0");
MultivaluedMap<String, String> expectedHeaders = new MultivaluedHashMap<>();
expectedHeaders.add("header0", "value0_0");
expectedHeaders.add("header0", "value0_1");
expectedHeaders.add("header1", "value1_0");
ContainerResponse context = mock(ContainerResponse.class);
when(context.getStatusInfo()).thenReturn(Status.CONFLICT);
when(context.getStringHeaders()).thenReturn(actualHeaders);
containerResponseWriter.writeResponseStatusAndHeaders(-1, context);
assertEquals(Status.CONFLICT, response.getStatusType());
assertEquals(expectedHeaders, response.getHeaders());
}
@Test
public void writeResponseStatusAndHeaders_ShouldReturnEntityOutputStreamOfResponse() {
ContainerResponse context = mock(ContainerResponse.class);
when(context.getStringHeaders()).thenReturn(new MultivaluedHashMap<>());
when(context.getStatusInfo()).thenReturn(Status.OK);
OutputStream entityOutputStream = containerResponseWriter.writeResponseStatusAndHeaders(-1, context);
assertSame(response.getEntityOutputStream(), entityOutputStream);
}
@Test
public void failure_ResponseNotYetCommitted_ShouldSetInternalServerErrorStatusOnFail() {
ContainerResponse context = mock(ContainerResponse.class);
when(context.getStatusInfo()).thenReturn(Status.OK);
when(context.getStringHeaders()).thenReturn(new MultivaluedHashMap<>());
containerResponseWriter.writeResponseStatusAndHeaders(-1, context);
containerResponseWriter.failure(new RuntimeException());
assertEquals(Status.INTERNAL_SERVER_ERROR, response.getStatusType());
}
@Test
public void failure_ResponseNotYetCommitted_ShouldCommitOnFailure() {
containerResponseWriter = spy(containerResponseWriter);
containerResponseWriter.failure(new RuntimeException());
verify(containerResponseWriter, times(1)).commit();
}
@Test
public void failure_ResponseNotYetCommitted_ShouldRethrowOnCommitFailure() {
containerResponseWriter = spy(containerResponseWriter);
containerResponseWriter.failure(new RuntimeException());
doThrow(CommitException.class).when(containerResponseWriter).commit();
assertThrows(RuntimeException.class, () -> containerResponseWriter.failure(new RuntimeException()));
}
@Test
public void enableResponseBuffering_Always_ShouldBeDisabled() {
assertFalse(containerResponseWriter.enableResponseBuffering());
}
@Test
public void setSuspendTimeout_Always_ShouldBeUnsupported() {
assertThrows(UnsupportedOperationException.class, () -> containerResponseWriter.setSuspendTimeout(1, null));
}
@Test
public void suspend_Always_ShouldBeUnsupported() {
assertThrows(UnsupportedOperationException.class, () -> containerResponseWriter.suspend(1, null, null));
}
@SuppressWarnings("serial")
private static class CommitException extends RuntimeException {
}
}
| bbilger/jrestless | core/jrestless-core-container/src/test/java/com/jrestless/core/container/JRestlessContainerResponseWriterTest.java | Java | apache-2.0 | 5,472 |
/**
* Copyright (c) 2015-2022, Michael Yang 杨福海 (fuhai999@gmail.com).
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.jboot.support.metric.annotation;
import java.lang.annotation.*;
@Documented
@Target(ElementType.METHOD)
@Inherited
@Retention(RetentionPolicy.RUNTIME)
public @interface EnableMetricTimer {
String value() default "";
}
| yangfuhai/jboot | src/main/java/io/jboot/support/metric/annotation/EnableMetricTimer.java | Java | apache-2.0 | 888 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.fhuss.kafka.streams.cep.core.state.internal;
import java.util.Objects;
/**
* Class for aggregated state.
*
* @param <K> the record key type.
*/
public class Aggregated<K> {
private final K key;
private final Aggregate aggregate;
/**
* Creates a new {@link Aggregated} instance.
* @param key the record key
* @param aggregate the instance of {@link Aggregate}.
*/
public Aggregated(final K key, final Aggregate aggregate) {
this.key = key;
this.aggregate = aggregate;
}
public K getKey() {
return key;
}
public Aggregate getAggregate() {
return aggregate;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Aggregated<?> that = (Aggregated<?>) o;
return Objects.equals(key, that.key) &&
Objects.equals(aggregate, that.aggregate);
}
@Override
public int hashCode() {
return Objects.hash(key, aggregate);
}
@Override
public String toString() {
return "Aggregated{" +
"key=" + key +
", aggregate=" + aggregate +
'}';
}
}
| fhussonnois/kafkastreams-cep | core/src/main/java/com/github/fhuss/kafka/streams/cep/core/state/internal/Aggregated.java | Java | apache-2.0 | 2,076 |
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.apigateway.model.transform;
import java.util.List;
import javax.annotation.Generated;
import com.amazonaws.SdkClientException;
import com.amazonaws.services.apigateway.model.*;
import com.amazonaws.protocol.*;
import com.amazonaws.annotation.SdkInternalApi;
/**
* UpdateMethodResponseRequestMarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
@SdkInternalApi
public class UpdateMethodResponseRequestMarshaller {
private static final MarshallingInfo<String> RESTAPIID_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PATH)
.marshallLocationName("restapi_id").build();
private static final MarshallingInfo<String> RESOURCEID_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PATH)
.marshallLocationName("resource_id").build();
private static final MarshallingInfo<String> HTTPMETHOD_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PATH)
.marshallLocationName("http_method").build();
private static final MarshallingInfo<String> STATUSCODE_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PATH)
.marshallLocationName("status_code").build();
private static final MarshallingInfo<List> PATCHOPERATIONS_BINDING = MarshallingInfo.builder(MarshallingType.LIST)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("patchOperations").build();
private static final UpdateMethodResponseRequestMarshaller instance = new UpdateMethodResponseRequestMarshaller();
public static UpdateMethodResponseRequestMarshaller getInstance() {
return instance;
}
/**
* Marshall the given parameter object.
*/
public void marshall(UpdateMethodResponseRequest updateMethodResponseRequest, ProtocolMarshaller protocolMarshaller) {
if (updateMethodResponseRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(updateMethodResponseRequest.getRestApiId(), RESTAPIID_BINDING);
protocolMarshaller.marshall(updateMethodResponseRequest.getResourceId(), RESOURCEID_BINDING);
protocolMarshaller.marshall(updateMethodResponseRequest.getHttpMethod(), HTTPMETHOD_BINDING);
protocolMarshaller.marshall(updateMethodResponseRequest.getStatusCode(), STATUSCODE_BINDING);
protocolMarshaller.marshall(updateMethodResponseRequest.getPatchOperations(), PATCHOPERATIONS_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
}
| jentfoo/aws-sdk-java | aws-java-sdk-api-gateway/src/main/java/com/amazonaws/services/apigateway/model/transform/UpdateMethodResponseRequestMarshaller.java | Java | apache-2.0 | 3,397 |
package org.aksw.servicecat.web.api;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import org.aksw.servicecat.core.ServiceAnalyzerProcessor;
import org.springframework.beans.factory.annotation.Autowired;
@org.springframework.stereotype.Service
@Path("/services")
public class ServletServiceApi {
@Autowired
private ServiceAnalyzerProcessor processor;
@GET
@Produces(MediaType.APPLICATION_JSON)
@Path("/put")
public String registerService(@QueryParam("url") String serviceUrl)
{
processor.process(serviceUrl);
String result = "{}";
return result;
}
}
| GeoKnow/SparqlServiceCatalogue | servicecat-webapp/src/main/java/org/aksw/servicecat/web/api/ServletServiceApi.java | Java | apache-2.0 | 728 |
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.macie2.model.transform;
import javax.annotation.Generated;
import com.amazonaws.SdkClientException;
import com.amazonaws.services.macie2.model.*;
import com.amazonaws.protocol.*;
import com.amazonaws.annotation.SdkInternalApi;
/**
* AccountDetailMarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
@SdkInternalApi
public class AccountDetailMarshaller {
private static final MarshallingInfo<String> ACCOUNTID_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PAYLOAD)
.marshallLocationName("accountId").build();
private static final MarshallingInfo<String> EMAIL_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PAYLOAD)
.marshallLocationName("email").build();
private static final AccountDetailMarshaller instance = new AccountDetailMarshaller();
public static AccountDetailMarshaller getInstance() {
return instance;
}
/**
* Marshall the given parameter object.
*/
public void marshall(AccountDetail accountDetail, ProtocolMarshaller protocolMarshaller) {
if (accountDetail == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(accountDetail.getAccountId(), ACCOUNTID_BINDING);
protocolMarshaller.marshall(accountDetail.getEmail(), EMAIL_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
}
| aws/aws-sdk-java | aws-java-sdk-macie2/src/main/java/com/amazonaws/services/macie2/model/transform/AccountDetailMarshaller.java | Java | apache-2.0 | 2,226 |
package org.apache.rya.indexing.external;
import java.net.UnknownHostException;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.Connector;
import org.apache.accumulo.core.client.TableExistsException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.mock.MockInstance;
import org.apache.accumulo.core.client.security.tokens.PasswordToken;
import org.apache.rya.indexing.pcj.storage.PcjException;
import org.apache.rya.indexing.pcj.storage.accumulo.PcjVarOrderFactory;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.openrdf.model.URI;
import org.openrdf.model.impl.LiteralImpl;
import org.openrdf.model.impl.URIImpl;
import org.openrdf.model.vocabulary.RDF;
import org.openrdf.model.vocabulary.RDFS;
import org.openrdf.query.BindingSet;
import org.openrdf.query.MalformedQueryException;
import org.openrdf.query.QueryEvaluationException;
import org.openrdf.query.QueryLanguage;
import org.openrdf.query.QueryResultHandlerException;
import org.openrdf.query.TupleQueryResultHandler;
import org.openrdf.query.TupleQueryResultHandlerException;
import org.openrdf.repository.RepositoryException;
import org.openrdf.repository.sail.SailRepository;
import org.openrdf.repository.sail.SailRepositoryConnection;
import org.openrdf.sail.SailException;
import com.google.common.base.Optional;
import org.apache.rya.api.persist.RyaDAOException;
import org.apache.rya.rdftriplestore.inference.InferenceEngineException;
public class AccumuloConstantPcjIntegrationTest {
private SailRepositoryConnection conn, pcjConn;
private SailRepository repo, pcjRepo;
private Connector accCon;
String prefix = "table_";
String tablename = "table_INDEX_";
URI obj, obj2, subclass, subclass2, talksTo;
@Before
public void init() throws RepositoryException,
TupleQueryResultHandlerException, QueryEvaluationException,
MalformedQueryException, AccumuloException,
AccumuloSecurityException, TableExistsException,
TableNotFoundException, RyaDAOException, InferenceEngineException,
NumberFormatException, UnknownHostException, SailException {
repo = PcjIntegrationTestingUtil.getNonPcjRepo(prefix, "instance");
conn = repo.getConnection();
pcjRepo = PcjIntegrationTestingUtil.getPcjRepo(prefix, "instance");
pcjConn = pcjRepo.getConnection();
final URI sub = new URIImpl("uri:entity");
subclass = new URIImpl("uri:class");
obj = new URIImpl("uri:obj");
talksTo = new URIImpl("uri:talksTo");
conn.add(sub, RDF.TYPE, subclass);
conn.add(sub, RDFS.LABEL, new LiteralImpl("label"));
conn.add(sub, talksTo, obj);
final URI sub2 = new URIImpl("uri:entity2");
subclass2 = new URIImpl("uri:class2");
obj2 = new URIImpl("uri:obj2");
conn.add(sub2, RDF.TYPE, subclass2);
conn.add(sub2, RDFS.LABEL, new LiteralImpl("label2"));
conn.add(sub2, talksTo, obj2);
accCon = new MockInstance("instance").getConnector("root",new PasswordToken(""));
}
@After
public void close() throws RepositoryException, AccumuloException,
AccumuloSecurityException, TableNotFoundException {
PcjIntegrationTestingUtil.closeAndShutdown(conn, repo);
PcjIntegrationTestingUtil.closeAndShutdown(pcjConn, pcjRepo);
PcjIntegrationTestingUtil.deleteCoreRyaTables(accCon, prefix);
PcjIntegrationTestingUtil.deleteIndexTables(accCon, 2, prefix);
}
@Test
public void testEvaluateTwoIndexVarInstantiate1() throws PcjException,
RepositoryException, AccumuloException, AccumuloSecurityException,
TableNotFoundException, TableExistsException,
MalformedQueryException, SailException, QueryEvaluationException,
TupleQueryResultHandlerException {
final URI superclass = new URIImpl("uri:superclass");
final URI superclass2 = new URIImpl("uri:superclass2");
conn.add(subclass, RDF.TYPE, superclass);
conn.add(subclass2, RDF.TYPE, superclass2);
conn.add(obj, RDFS.LABEL, new LiteralImpl("label"));
conn.add(obj2, RDFS.LABEL, new LiteralImpl("label2"));
conn.add(obj, RDFS.LABEL, new LiteralImpl("label"));
conn.add(obj2, RDFS.LABEL, new LiteralImpl("label2"));
final String indexSparqlString = ""//
+ "SELECT ?dog ?pig ?duck " //
+ "{" //
+ " ?pig a ?dog . "//
+ " ?pig <http://www.w3.org/2000/01/rdf-schema#label> ?duck "//
+ "}";//
final String indexSparqlString2 = ""//
+ "SELECT ?o ?f ?e ?c ?l " //
+ "{" //
+ " ?e <uri:talksTo> ?o . "//
+ " ?o <http://www.w3.org/2000/01/rdf-schema#label> ?l. "//
+ " ?c a ?f . " //
+ "}";//
final String queryString = ""//
+ "SELECT ?c ?l ?f ?o " //
+ "{" //
+ " <uri:entity> a ?c . "//
+ " <uri:entity> <http://www.w3.org/2000/01/rdf-schema#label> ?l. "//
+ " <uri:entity> <uri:talksTo> ?o . "//
+ " ?o <http://www.w3.org/2000/01/rdf-schema#label> ?l. "//
+ " ?c a ?f . " //
+ "}";//
PcjIntegrationTestingUtil.createAndPopulatePcj(conn, accCon, tablename + 1,
indexSparqlString, new String[] { "dog", "pig", "duck" },
Optional.<PcjVarOrderFactory> absent());
PcjIntegrationTestingUtil.createAndPopulatePcj(conn, accCon, tablename + 2,
indexSparqlString2, new String[] { "o", "f", "e", "c", "l" },
Optional.<PcjVarOrderFactory> absent());
final CountingResultHandler crh1 = new CountingResultHandler();
final CountingResultHandler crh2 = new CountingResultHandler();
conn.prepareTupleQuery(QueryLanguage.SPARQL, queryString)
.evaluate(crh1);
PcjIntegrationTestingUtil.deleteCoreRyaTables(accCon, prefix);
pcjConn.prepareTupleQuery(QueryLanguage.SPARQL, queryString).evaluate(crh2);
Assert.assertEquals(crh1.getCount(), crh2.getCount());
}
@Test
public void testEvaluateThreeIndexVarInstantiate() throws PcjException,
RepositoryException, AccumuloException, AccumuloSecurityException,
TableNotFoundException, TableExistsException,
MalformedQueryException, SailException, QueryEvaluationException,
TupleQueryResultHandlerException {
final URI superclass = new URIImpl("uri:superclass");
final URI superclass2 = new URIImpl("uri:superclass2");
final URI sub = new URIImpl("uri:entity");
subclass = new URIImpl("uri:class");
obj = new URIImpl("uri:obj");
talksTo = new URIImpl("uri:talksTo");
final URI howlsAt = new URIImpl("uri:howlsAt");
final URI subType = new URIImpl("uri:subType");
conn.add(subclass, RDF.TYPE, superclass);
conn.add(subclass2, RDF.TYPE, superclass2);
conn.add(obj, RDFS.LABEL, new LiteralImpl("label"));
conn.add(obj2, RDFS.LABEL, new LiteralImpl("label2"));
conn.add(sub, howlsAt, superclass);
conn.add(superclass, subType, obj);
conn.add(obj, RDFS.LABEL, new LiteralImpl("label"));
conn.add(obj2, RDFS.LABEL, new LiteralImpl("label2"));
final String indexSparqlString = ""//
+ "SELECT ?dog ?pig ?duck " //
+ "{" //
+ " ?pig a ?dog . "//
+ " ?pig <http://www.w3.org/2000/01/rdf-schema#label> ?duck "//
+ "}";//
final String indexSparqlString2 = ""//
+ "SELECT ?o ?f ?e ?c ?l " //
+ "{" //
+ " ?e <uri:talksTo> ?o . "//
+ " ?o <http://www.w3.org/2000/01/rdf-schema#label> ?l. "//
+ " ?c a ?f . " //
+ "}";//
final String indexSparqlString3 = ""//
+ "SELECT ?wolf ?sheep ?chicken " //
+ "{" //
+ " ?wolf <uri:howlsAt> ?sheep . "//
+ " ?sheep <uri:subType> ?chicken. "//
+ "}";//
final String queryString = ""//
+ "SELECT ?c ?l ?f ?o " //
+ "{" //
+ " <uri:entity> a ?c . "//
+ " <uri:entity> <http://www.w3.org/2000/01/rdf-schema#label> ?l. "//
+ " <uri:entity> <uri:talksTo> ?o . "//
+ " ?o <http://www.w3.org/2000/01/rdf-schema#label> ?l. "//
+ " ?c a ?f . " //
+ " <uri:entity> <uri:howlsAt> ?f. "//
+ " ?f <uri:subType> <uri:obj>. "//
+ "}";//
PcjIntegrationTestingUtil.createAndPopulatePcj(conn, accCon, tablename + 1,
indexSparqlString, new String[] { "dog", "pig", "duck" },
Optional.<PcjVarOrderFactory> absent());
PcjIntegrationTestingUtil.createAndPopulatePcj(conn, accCon, tablename + 2,
indexSparqlString2, new String[] { "o", "f", "e", "c", "l" },
Optional.<PcjVarOrderFactory> absent());
PcjIntegrationTestingUtil.createAndPopulatePcj(conn, accCon, tablename + 3,
indexSparqlString3,
new String[] { "wolf", "sheep", "chicken" },
Optional.<PcjVarOrderFactory> absent());
final CountingResultHandler crh1 = new CountingResultHandler();
final CountingResultHandler crh2 = new CountingResultHandler();
conn.prepareTupleQuery(QueryLanguage.SPARQL, queryString)
.evaluate(crh1);
PcjIntegrationTestingUtil.deleteCoreRyaTables(accCon, prefix);
pcjConn.prepareTupleQuery(QueryLanguage.SPARQL, queryString).evaluate(
crh2);
Assert.assertEquals(crh1.getCount(), crh2.getCount());
}
@Test
public void testEvaluateFilterInstantiate() throws RepositoryException,
PcjException, MalformedQueryException, SailException,
QueryEvaluationException, TableNotFoundException,
TupleQueryResultHandlerException, AccumuloException,
AccumuloSecurityException {
final URI e1 = new URIImpl("uri:e1");
final URI e2 = new URIImpl("uri:e2");
final URI e3 = new URIImpl("uri:e3");
final URI f1 = new URIImpl("uri:f1");
final URI f2 = new URIImpl("uri:f2");
final URI f3 = new URIImpl("uri:f3");
final URI g1 = new URIImpl("uri:g1");
final URI g2 = new URIImpl("uri:g2");
final URI g3 = new URIImpl("uri:g3");
conn.add(e1, talksTo, f1);
conn.add(f1, talksTo, g1);
conn.add(g1, talksTo, e1);
conn.add(e2, talksTo, f2);
conn.add(f2, talksTo, g2);
conn.add(g2, talksTo, e2);
conn.add(e3, talksTo, f3);
conn.add(f3, talksTo, g3);
conn.add(g3, talksTo, e3);
final String queryString = ""//
+ "SELECT ?x ?y ?z " //
+ "{" //
+ "Filter(?x = <uri:e1>) . " //
+ " ?x <uri:talksTo> ?y. " //
+ " ?y <uri:talksTo> ?z. " //
+ " ?z <uri:talksTo> <uri:e1>. " //
+ "}";//
final String indexSparqlString = ""//
+ "SELECT ?a ?b ?c ?d " //
+ "{" //
+ "Filter(?a = ?d) . " //
+ " ?a <uri:talksTo> ?b. " //
+ " ?b <uri:talksTo> ?c. " //
+ " ?c <uri:talksTo> ?d. " //
+ "}";//
PcjIntegrationTestingUtil.createAndPopulatePcj(conn, accCon, tablename + 1,
indexSparqlString, new String[] { "a", "b", "c", "d" },
Optional.<PcjVarOrderFactory> absent());
final CountingResultHandler crh1 = new CountingResultHandler();
final CountingResultHandler crh2 = new CountingResultHandler();
conn.prepareTupleQuery(QueryLanguage.SPARQL, queryString)
.evaluate(crh1);
PcjIntegrationTestingUtil.deleteCoreRyaTables(accCon, prefix);
pcjConn.prepareTupleQuery(QueryLanguage.SPARQL, queryString).evaluate(crh2);
Assert.assertEquals(crh1.getCount(), crh2.getCount());
}
@Test
public void testEvaluateCompoundFilterInstantiate()
throws RepositoryException, PcjException, MalformedQueryException,
SailException, QueryEvaluationException,
TableNotFoundException,
TupleQueryResultHandlerException, AccumuloException, AccumuloSecurityException {
final URI e1 = new URIImpl("uri:e1");
final URI f1 = new URIImpl("uri:f1");
conn.add(e1, talksTo, e1);
conn.add(e1, talksTo, f1);
conn.add(f1, talksTo, e1);
final String queryString = ""//
+ "SELECT ?x ?y ?z " //
+ "{" //
+ "Filter(?x = <uri:e1> && ?y = <uri:e1>) . " //
+ " ?x <uri:talksTo> ?y. " //
+ " ?y <uri:talksTo> ?z. " //
+ " ?z <uri:talksTo> <uri:e1>. " //
+ "}";//
final String indexSparqlString = ""//
+ "SELECT ?a ?b ?c ?d " //
+ "{" //
+ "Filter(?a = ?d && ?b = ?d) . " //
+ " ?a <uri:talksTo> ?b. " //
+ " ?b <uri:talksTo> ?c. " //
+ " ?c <uri:talksTo> ?d. " //
+ "}";//
PcjIntegrationTestingUtil.createAndPopulatePcj(conn, accCon, tablename + 1,
indexSparqlString, new String[] { "a", "b", "c", "d" },
Optional.<PcjVarOrderFactory> absent());
final CountingResultHandler crh1 = new CountingResultHandler();
final CountingResultHandler crh2 = new CountingResultHandler();
conn.prepareTupleQuery(QueryLanguage.SPARQL, queryString)
.evaluate(crh1);
PcjIntegrationTestingUtil.deleteCoreRyaTables(accCon, prefix);
pcjConn.prepareTupleQuery(QueryLanguage.SPARQL, queryString).evaluate(
crh2);
Assert.assertEquals(2, crh1.getCount());
Assert.assertEquals(crh1.getCount(), crh2.getCount());
}
public static class CountingResultHandler implements
TupleQueryResultHandler {
private int count = 0;
public int getCount() {
return count;
}
public void resetCount() {
count = 0;
}
@Override
public void startQueryResult(final List<String> arg0)
throws TupleQueryResultHandlerException {
}
@Override
public void handleSolution(final BindingSet arg0)
throws TupleQueryResultHandlerException {
count++;
}
@Override
public void endQueryResult() throws TupleQueryResultHandlerException {
}
@Override
public void handleBoolean(final boolean arg0)
throws QueryResultHandlerException {
}
@Override
public void handleLinks(final List<String> arg0)
throws QueryResultHandlerException {
}
}
}
| pujav65/incubator-rya | extras/indexing/src/test/java/org/apache/rya/indexing/external/AccumuloConstantPcjIntegrationTest.java | Java | apache-2.0 | 14,058 |
package de.jpaw.fixedpoint.tests;
import java.math.BigDecimal;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import de.jpaw.fixedpoint.types.MicroUnits;
public class TestConversions {
@Test
public void testFromConversions() throws Exception {
MicroUnits fromLong = MicroUnits.valueOf(2);
MicroUnits fromDouble = MicroUnits.valueOf(2.0);
MicroUnits fromString = MicroUnits.valueOf("2.0");
MicroUnits fromBigDecimal = MicroUnits.valueOf(BigDecimal.valueOf(2));
MicroUnits fromMantissa = MicroUnits.of(2_000_000L);
Assertions.assertEquals(fromMantissa, fromBigDecimal, "from BigDecimal");
Assertions.assertEquals(fromMantissa, fromString, "from String");
Assertions.assertEquals(fromMantissa, fromDouble, "from double");
Assertions.assertEquals(fromMantissa, fromLong, "from long");
}
@Test
public void testToConversions() throws Exception {
MicroUnits value = MicroUnits.valueOf(2);
Assertions.assertEquals("2", value.toString(), "to String");
Assertions.assertEquals(BigDecimal.valueOf(2).setScale(6), value.toBigDecimal(), "to BigDecimal");
Assertions.assertEquals(2, value.intValue(), "to int");
Assertions.assertEquals(2.0, value.doubleValue(), "to double");
Assertions.assertEquals(2_000_000L, value.getMantissa(), "to Mantissa");
}
}
| jpaw/jpaw | jpaw-fixedpoint-core/src/test/java/de/jpaw/fixedpoint/tests/TestConversions.java | Java | apache-2.0 | 1,420 |
package org.dominokit.domino.api.client;
@FunctionalInterface
public
interface ApplicationStartHandler {
void onApplicationStarted();
}
| GwtDomino/domino | domino-api-client/src/main/java/org/dominokit/domino/api/client/ApplicationStartHandler.java | Java | apache-2.0 | 141 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.arquillian.warp.utils;
import java.io.UnsupportedEncodingException;
import org.apache.commons.codec.DecoderException;
/**
* Abstract superclass for Base-N encoders and decoders.
*
* <p>
* This class is not thread-safe. Each thread should use its own instance.
* </p>
*/
public abstract class BaseNCodec {
/**
* MIME chunk size per RFC 2045 section 6.8.
*
* <p>
* The {@value} character limit does not count the trailing CRLF, but counts all other characters, including any equal
* signs.
* </p>
*
* @see <a href="http://www.ietf.org/rfc/rfc2045.txt">RFC 2045 section 6.8</a>
*/
public static final int MIME_CHUNK_SIZE = 76;
/**
* PEM chunk size per RFC 1421 section 4.3.2.4.
*
* <p>
* The {@value} character limit does not count the trailing CRLF, but counts all other characters, including any equal
* signs.
* </p>
*
* @see <a href="http://tools.ietf.org/html/rfc1421">RFC 1421 section 4.3.2.4</a>
*/
public static final int PEM_CHUNK_SIZE = 64;
private static final int DEFAULT_BUFFER_RESIZE_FACTOR = 2;
/**
* Defines the default buffer size - currently {@value} - must be large enough for at least one encoded block+separator
*/
private static final int DEFAULT_BUFFER_SIZE = 8192;
/** Mask used to extract 8 bits, used in decoding bytes */
protected static final int MASK_8BITS = 0xff;
/**
* Byte used to pad output.
*/
protected static final byte PAD_DEFAULT = '='; // Allow static access to default
protected final byte PAD = PAD_DEFAULT; // instance variable just in case it needs to vary later
/** Number of bytes in each full block of unencoded data, e.g. 4 for Base64 and 5 for Base32 */
private final int unencodedBlockSize;
/** Number of bytes in each full block of encoded data, e.g. 3 for Base64 and 8 for Base32 */
private final int encodedBlockSize;
/**
* Chunksize for encoding. Not used when decoding. A value of zero or less implies no chunking of the encoded data. Rounded
* down to nearest multiple of encodedBlockSize.
*/
protected final int lineLength;
/**
* Size of chunk separator. Not used unless {@link #lineLength} > 0.
*/
private final int chunkSeparatorLength;
/**
* Buffer for streaming.
*/
protected byte[] buffer;
/**
* Position where next character should be written in the buffer.
*/
protected int pos;
/**
* Position where next character should be read from the buffer.
*/
private int readPos;
/**
* Boolean flag to indicate the EOF has been reached. Once EOF has been reached, this object becomes useless, and must be
* thrown away.
*/
protected boolean eof;
/**
* Variable tracks how many characters have been written to the current line. Only used when encoding. We use it to make
* sure each encoded line never goes beyond lineLength (if lineLength > 0).
*/
protected int currentLinePos;
/**
* Writes to the buffer only occur after every 3/5 reads when encoding, and every 4/8 reads when decoding. This variable
* helps track that.
*/
protected int modulus;
/**
* Note <code>lineLength</code> is rounded down to the nearest multiple of {@link #encodedBlockSize} If
* <code>chunkSeparatorLength</code> is zero, then chunking is disabled.
*
* @param unencodedBlockSize the size of an unencoded block (e.g. Base64 = 3)
* @param encodedBlockSize the size of an encoded block (e.g. Base64 = 4)
* @param lineLength if > 0, use chunking with a length <code>lineLength</code>
* @param chunkSeparatorLength the chunk separator length, if relevant
*/
protected BaseNCodec(int unencodedBlockSize, int encodedBlockSize, int lineLength, int chunkSeparatorLength) {
this.unencodedBlockSize = unencodedBlockSize;
this.encodedBlockSize = encodedBlockSize;
this.lineLength = (lineLength > 0 && chunkSeparatorLength > 0) ? (lineLength / encodedBlockSize) * encodedBlockSize : 0;
this.chunkSeparatorLength = chunkSeparatorLength;
}
/**
* Returns true if this object has buffered data for reading.
*
* @return true if there is data still available for reading.
*/
boolean hasData() { // package protected for access from I/O streams
return this.buffer != null;
}
/**
* Returns the amount of buffered data available for reading.
*
* @return The amount of buffered data available for reading.
*/
int available() { // package protected for access from I/O streams
return buffer != null ? pos - readPos : 0;
}
/**
* Get the default buffer size. Can be overridden.
*
* @return {@link #DEFAULT_BUFFER_SIZE}
*/
protected int getDefaultBufferSize() {
return DEFAULT_BUFFER_SIZE;
}
/** Increases our buffer by the {@link #DEFAULT_BUFFER_RESIZE_FACTOR}. */
private void resizeBuffer() {
if (buffer == null) {
buffer = new byte[getDefaultBufferSize()];
pos = 0;
readPos = 0;
} else {
byte[] b = new byte[buffer.length * DEFAULT_BUFFER_RESIZE_FACTOR];
System.arraycopy(buffer, 0, b, 0, buffer.length);
buffer = b;
}
}
/**
* Ensure that the buffer has room for <code>size</code> bytes
*
* @param size minimum spare space required
*/
protected void ensureBufferSize(int size) {
if ((buffer == null) || (buffer.length < pos + size)) {
resizeBuffer();
}
}
/**
* Extracts buffered data into the provided byte[] array, starting at position bPos, up to a maximum of bAvail bytes.
* Returns how many bytes were actually extracted.
*
* @param b byte[] array to extract the buffered data into.
* @param bPos position in byte[] array to start extraction at.
* @param bAvail amount of bytes we're allowed to extract. We may extract fewer (if fewer are available).
* @return The number of bytes successfully extracted into the provided byte[] array.
*/
int readResults(byte[] b, int bPos, int bAvail) { // package protected for access from I/O streams
if (buffer != null) {
int len = Math.min(available(), bAvail);
System.arraycopy(buffer, readPos, b, bPos, len);
readPos += len;
if (readPos >= pos) {
buffer = null; // so hasData() will return false, and this method can return -1
}
return len;
}
return eof ? -1 : 0;
}
/**
* Checks if a byte value is whitespace or not. Whitespace is taken to mean: space, tab, CR, LF
*
* @param byteToCheck the byte to check
* @return true if byte is whitespace, false otherwise
*/
protected static boolean isWhiteSpace(byte byteToCheck) {
switch (byteToCheck) {
case ' ':
case '\n':
case '\r':
case '\t':
return true;
default:
return false;
}
}
/**
* Resets this object to its initial newly constructed state.
*/
private void reset() {
buffer = null;
pos = 0;
readPos = 0;
currentLinePos = 0;
modulus = 0;
eof = false;
}
/**
* Encodes an Object using the Base-N algorithm. This method is provided in order to satisfy the requirements of the Encoder
* interface, and will throw an IllegalStateException if the supplied object is not of type byte[].
*
* @param pObject Object to encode
* @return An object (of type byte[]) containing the Base-N encoded data which corresponds to the byte[] supplied.
* @throws IllegalStateException if the parameter supplied is not of type byte[]
*/
public Object encode(Object pObject) {
if (!(pObject instanceof byte[])) {
throw new IllegalStateException("Parameter supplied to Base-N encode is not a byte[]");
}
return encode((byte[]) pObject);
}
/**
* Encodes a byte[] containing binary data, into a String containing characters in the Base-N alphabet.
*
* @param pArray a byte array containing binary data
* @return A String containing only Base-N character data
*/
public String encodeToString(byte[] pArray) {
return newStringUtf8(encode(pArray));
}
/**
* Decodes an Object using the Base-N algorithm. This method is provided in order to satisfy the requirements of the Decoder
* interface, and will throw a DecoderException if the supplied object is not of type byte[] or String.
*
* @param pObject Object to decode
* @return An object (of type byte[]) containing the binary data which corresponds to the byte[] or String supplied.
* @throws DecoderException if the parameter supplied is not of type byte[]
*/
public Object decode(Object pObject) throws IllegalStateException {
if (pObject instanceof byte[]) {
return decode((byte[]) pObject);
} else if (pObject instanceof String) {
return decode((String) pObject);
} else {
throw new IllegalStateException("Parameter supplied to Base-N decode is not a byte[] or a String");
}
}
/**
* Decodes a String containing characters in the Base-N alphabet.
*
* @param pArray A String containing Base-N character data
* @return a byte array containing binary data
*/
public byte[] decode(String pArray) {
return decode(getBytesUtf8(pArray));
}
/**
* Decodes a byte[] containing characters in the Base-N alphabet.
*
* @param pArray A byte array containing Base-N character data
* @return a byte array containing binary data
*/
public byte[] decode(byte[] pArray) {
reset();
if (pArray == null || pArray.length == 0) {
return pArray;
}
decode(pArray, 0, pArray.length);
decode(pArray, 0, -1); // Notify decoder of EOF.
byte[] result = new byte[pos];
readResults(result, 0, result.length);
return result;
}
/**
* Encodes a byte[] containing binary data, into a byte[] containing characters in the alphabet.
*
* @param pArray a byte array containing binary data
* @return A byte array containing only the basen alphabetic character data
*/
public byte[] encode(byte[] pArray) {
reset();
if (pArray == null || pArray.length == 0) {
return pArray;
}
encode(pArray, 0, pArray.length);
encode(pArray, 0, -1); // Notify encoder of EOF.
byte[] buf = new byte[pos - readPos];
readResults(buf, 0, buf.length);
return buf;
}
/**
* Encodes a byte[] containing binary data, into a String containing characters in the appropriate alphabet. Uses UTF8
* encoding.
*
* @param pArray a byte array containing binary data
* @return String containing only character data in the appropriate alphabet.
*/
public String encodeAsString(byte[] pArray) {
return newStringUtf8(encode(pArray));
}
abstract void encode(byte[] pArray, int i, int length); // package protected for access from I/O streams
abstract void decode(byte[] pArray, int i, int length); // package protected for access from I/O streams
/**
* Returns whether or not the <code>octet</code> is in the current alphabet. Does not allow whitespace or pad.
*
* @param value The value to test
*
* @return <code>true</code> if the value is defined in the current alphabet, <code>false</code> otherwise.
*/
protected abstract boolean isInAlphabet(byte value);
/**
* Tests a given byte array to see if it contains only valid characters within the alphabet. The method optionally treats
* whitespace and pad as valid.
*
* @param arrayOctet byte array to test
* @param allowWSPad if <code>true</code>, then whitespace and PAD are also allowed
*
* @return <code>true</code> if all bytes are valid characters in the alphabet or if the byte array is empty;
* <code>false</code>, otherwise
*/
public boolean isInAlphabet(byte[] arrayOctet, boolean allowWSPad) {
for (int i = 0; i < arrayOctet.length; i++) {
if (!isInAlphabet(arrayOctet[i]) && (!allowWSPad || (arrayOctet[i] != PAD) && !isWhiteSpace(arrayOctet[i]))) {
return false;
}
}
return true;
}
/**
* Tests a given String to see if it contains only valid characters within the alphabet. The method treats whitespace and
* PAD as valid.
*
* @param basen String to test
* @return <code>true</code> if all characters in the String are valid characters in the alphabet or if the String is empty;
* <code>false</code>, otherwise
* @see #isInAlphabet(byte[], boolean)
*/
public boolean isInAlphabet(String basen) {
return isInAlphabet(getBytesUtf8(basen), true);
}
/**
* Tests a given byte array to see if it contains any characters within the alphabet or PAD.
*
* Intended for use in checking line-ending arrays
*
* @param arrayOctet byte array to test
* @return <code>true</code> if any byte is a valid character in the alphabet or PAD; <code>false</code> otherwise
*/
protected boolean containsAlphabetOrPad(byte[] arrayOctet) {
if (arrayOctet == null) {
return false;
}
for (byte element : arrayOctet) {
if (PAD == element || isInAlphabet(element)) {
return true;
}
}
return false;
}
/**
* Calculates the amount of space needed to encode the supplied array.
*
* @param pArray byte[] array which will later be encoded
*
* @return amount of space needed to encoded the supplied array. Returns a long since a max-len array will require >
* Integer.MAX_VALUE
*/
public long getEncodedLength(byte[] pArray) {
// Calculate non-chunked size - rounded up to allow for padding
// cast to long is needed to avoid possibility of overflow
long len = ((pArray.length + unencodedBlockSize - 1) / unencodedBlockSize) * (long) encodedBlockSize;
if (lineLength > 0) { // We're using chunking
// Round up to nearest multiple
len += ((len + lineLength - 1) / lineLength) * chunkSeparatorLength;
}
return len;
}
/**
* Constructs a new <code>String</code> by decoding the specified array of bytes using the UTF-8 charset.
*
* @param bytes The bytes to be decoded into characters
* @return A new <code>String</code> decoded from the specified array of bytes using the UTF-8 charset, or <code>null</code>
* if the input byte array was <code>null</code>.
* @throws IllegalStateException Thrown when a {@link UnsupportedEncodingException} is caught, which should never happen
* since the charset is required.
*/
public static String newStringUtf8(byte[] bytes) {
if (bytes == null) {
return null;
}
try {
return new String(bytes, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("UTF-8", e);
}
}
/**
* Encodes the given string into a sequence of bytes using the UTF-8 charset, storing the result into a new byte array.
*
* @param string the String to encode, may be <code>null</code>
* @return encoded bytes, or <code>null</code> if the input string was <code>null</code>
* @throws IllegalStateException Thrown when the charset is missing, which should be never according the the Java
* specification.
* @see <a href="http://download.oracle.com/javase/1.5.0/docs/api/java/nio/charset/Charset.html">Standard charsets</a>
* @see #getBytesUnchecked(String, String)
*/
public static byte[] getBytesUtf8(String string) {
if (string == null) {
return null;
}
try {
return string.getBytes("UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("UTF-8", e);
}
}
}
| aslakknutsen/arquillian-extension-warp | impl/src/main/java/org/jboss/arquillian/warp/utils/BaseNCodec.java | Java | apache-2.0 | 17,879 |
package dk.lessismore.nojpa.reflection.db.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Created : with IntelliJ IDEA.
* User: seb
*/
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
public @interface SearchField {
public static final String NULL = "";
public boolean translate() default false;
public boolean searchReverse() default false;
public float boostFactor() default 3f;
public float reverseBoostFactor() default 0.3f;
public String dynamicSolrPostName() default NULL;
}
| NoJPA-LESS-IS-MORE/NoJPA | nojpa_orm/src/main/java/dk/lessismore/nojpa/reflection/db/annotations/SearchField.java | Java | apache-2.0 | 664 |
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package com.hoang.fu;
/**
*
* @author hoangpt
*/
public class Teacher extends Employee implements ITeacher {
Teacher(String name) {
this.name = name;
}
@Override
float calculateSalary(){
return 500f;
}
@Override
public int calculateBonus() {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
@Override
public float calculateAllowance() {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
}
| hoangphantich/fu.java192x17.1 | hoangpt/Assignment_21/src/com/hoang/fu/Teacher.java | Java | apache-2.0 | 786 |
// Copyright (c) 1999-2004 Brian Wellington (bwelling@xbill.org)
package org.xbill.DNS;
import java.io.*;
import java.util.*;
import org.xbill.DNS.utils.*;
/**
* Transaction Signature - this record is automatically generated by the
* resolver. TSIG records provide transaction security between the
* sender and receiver of a message, using a shared key.
* @see org.xbill.DNS.Resolver
* @see org.xbill.DNS.TSIG
*
* @author Brian Wellington
*/
public class TSIGRecord extends Record {
private static final long serialVersionUID = -88820909016649306L;
private Name alg;
private Date timeSigned;
private int fudge;
private byte [] signature;
private int originalID;
private int error;
private byte [] other;
TSIGRecord() {}
Record
getObject() {
return new TSIGRecord();
}
/**
* Creates a TSIG Record from the given data. This is normally called by
* the TSIG class
* @param alg The shared key's algorithm
* @param timeSigned The time that this record was generated
* @param fudge The fudge factor for time - if the time that the message is
* received is not in the range [now - fudge, now + fudge], the signature
* fails
* @param signature The signature
* @param originalID The message ID at the time of its generation
* @param error The extended error field. Should be 0 in queries.
* @param other The other data field. Currently used only in BADTIME
* responses.
* @see org.xbill.DNS.TSIG
*/
public
TSIGRecord(Name name, int dclass, long ttl, Name alg, Date timeSigned,
int fudge, byte [] signature, int originalID, int error,
byte other[])
{
super(name, Type.TSIG, dclass, ttl);
this.alg = checkName("alg", alg);
this.timeSigned = timeSigned;
this.fudge = checkU16("fudge", fudge);
this.signature = signature;
this.originalID = checkU16("originalID", originalID);
this.error = checkU16("error", error);
this.other = other;
}
void
rrFromWire(DNSInput in) throws IOException {
alg = new Name(in);
long timeHigh = in.readU16();
long timeLow = in.readU32();
long time = (timeHigh << 32) + timeLow;
timeSigned = new Date(time * 1000);
fudge = in.readU16();
int sigLen = in.readU16();
signature = in.readByteArray(sigLen);
originalID = in.readU16();
error = in.readU16();
int otherLen = in.readU16();
if (otherLen > 0)
other = in.readByteArray(otherLen);
else
other = null;
}
void
rdataFromString(Tokenizer st, Name origin) throws IOException {
throw st.exception("no text format defined for TSIG");
}
/** Converts rdata to a String */
String
rrToString() {
StringBuffer sb = new StringBuffer();
sb.append(alg);
sb.append(" ");
if (Options.check("multiline"))
sb.append("(\n\t");
sb.append (timeSigned.getTime() / 1000);
sb.append (" ");
sb.append (fudge);
sb.append (" ");
sb.append (signature.length);
if (Options.check("multiline")) {
sb.append ("\n");
sb.append (base64.formatString(signature, 64, "\t", false));
} else {
sb.append (" ");
sb.append (base64.toString(signature));
}
sb.append (" ");
sb.append (Rcode.TSIGstring(error));
sb.append (" ");
if (other == null)
sb.append (0);
else {
sb.append (other.length);
if (Options.check("multiline"))
sb.append("\n\n\n\t");
else
sb.append(" ");
if (error == Rcode.BADTIME) {
if (other.length != 6) {
sb.append("<invalid BADTIME other data>");
} else {
long time = ((long)(other[0] & 0xFF) << 40) +
((long)(other[1] & 0xFF) << 32) +
((other[2] & 0xFF) << 24) +
((other[3] & 0xFF) << 16) +
((other[4] & 0xFF) << 8) +
((other[5] & 0xFF) );
sb.append("<server time: ");
sb.append(new Date(time * 1000));
sb.append(">");
}
} else {
sb.append("<");
sb.append(base64.toString(other));
sb.append(">");
}
}
if (Options.check("multiline"))
sb.append(" )");
return sb.toString();
}
/** Returns the shared key's algorithm */
public Name
getAlgorithm() {
return alg;
}
/** Returns the time that this record was generated */
public Date
getTimeSigned() {
return timeSigned;
}
/** Returns the time fudge factor */
public int
getFudge() {
return fudge;
}
/** Returns the signature */
public byte []
getSignature() {
return signature;
}
/** Returns the original message ID */
public int
getOriginalID() {
return originalID;
}
/** Returns the extended error */
public int
getError() {
return error;
}
/** Returns the other data */
public byte []
getOther() {
return other;
}
void
rrToWire(DNSOutput out, Compression c, boolean canonical) {
alg.toWire(out, null, canonical);
long time = timeSigned.getTime() / 1000;
int timeHigh = (int) (time >> 32);
long timeLow = (time & 0xFFFFFFFFL);
out.writeU16(timeHigh);
out.writeU32(timeLow);
out.writeU16(fudge);
out.writeU16(signature.length);
out.writeByteArray(signature);
out.writeU16(originalID);
out.writeU16(error);
if (other != null) {
out.writeU16(other.length);
out.writeByteArray(other);
}
else
out.writeU16(0);
}
}
| msdx/AndroidPNClient | androidpn/src/main/java/org/xbill/DNS/TSIGRecord.java | Java | apache-2.0 | 4,956 |
package app.monitor.job;
import core.framework.internal.log.LogManager;
import core.framework.json.JSON;
import core.framework.kafka.MessagePublisher;
import core.framework.log.message.StatMessage;
import core.framework.scheduler.Job;
import core.framework.scheduler.JobContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.time.Duration;
import java.time.Instant;
import java.time.ZonedDateTime;
import java.util.List;
import java.util.Map;
/**
* @author neo
*/
public class KubeMonitorJob implements Job {
public final MessagePublisher<StatMessage> publisher;
public final KubeClient kubeClient;
public final List<String> namespaces;
private final Logger logger = LoggerFactory.getLogger(KubeMonitorJob.class);
public KubeMonitorJob(List<String> namespaces, KubeClient kubeClient, MessagePublisher<StatMessage> publisher) {
this.publisher = publisher;
this.kubeClient = kubeClient;
this.namespaces = namespaces;
}
@Override
public void execute(JobContext context) {
try {
var now = ZonedDateTime.now();
for (String namespace : namespaces) {
KubePodList pods = kubeClient.listPods(namespace);
for (KubePodList.Pod pod : pods.items) {
String errorMessage = check(pod, now);
if (errorMessage != null) {
publishPodFailure(pod, errorMessage);
}
}
}
} catch (Throwable e) {
logger.error(e.getMessage(), e);
publisher.publish(StatMessageFactory.failedToCollect(LogManager.APP_NAME, null, e));
}
}
String check(KubePodList.Pod pod, ZonedDateTime now) {
if (pod.metadata.deletionTimestamp != null) {
Duration elapsed = Duration.between(pod.metadata.deletionTimestamp, now);
if (elapsed.toSeconds() >= 300) {
return "pod is still in deletion, elapsed=" + elapsed;
}
return null;
}
String phase = pod.status.phase;
if ("Succeeded".equals(phase)) return null; // terminated
if ("Failed".equals(phase) || "Unknown".equals(phase)) return "unexpected pod phase, phase=" + phase;
if ("Pending".equals(phase)) {
// newly created pod may not have container status yet, containerStatuses is initialized as empty
for (KubePodList.ContainerStatus status : pod.status.containerStatuses) {
if (status.state.waiting != null && "ImagePullBackOff".equals(status.state.waiting.reason)) {
return "ImagePullBackOff: " + status.state.waiting.message;
}
}
// for unschedulable pod
for (KubePodList.PodCondition condition : pod.status.conditions) {
if ("PodScheduled".equals(condition.type) && "False".equals(condition.status) && Duration.between(condition.lastTransitionTime, now).toSeconds() >= 300) {
return condition.reason + ": " + condition.message;
}
}
}
if ("Running".equals(phase)) {
boolean ready = true;
for (KubePodList.ContainerStatus status : pod.status.containerStatuses) {
if (status.state.waiting != null && "CrashLoopBackOff".equals(status.state.waiting.reason)) {
return "CrashLoopBackOff: " + status.state.waiting.message;
}
boolean containerReady = Boolean.TRUE.equals(status.ready);
if (!containerReady && status.lastState != null && status.lastState.terminated != null) {
var terminated = status.lastState.terminated;
return "pod was terminated, reason=" + terminated.reason + ", exitCode=" + terminated.exitCode;
}
if (!containerReady) {
ready = false;
}
}
if (ready) return null; // all running, all ready
}
ZonedDateTime startTime = pod.status.startTime != null ? pod.status.startTime : pod.metadata.creationTimestamp; // startTime may not be populated yet if pod is just created
Duration elapsed = Duration.between(startTime, now);
if (elapsed.toSeconds() >= 300) {
// can be: 1) took long to be ready after start, or 2) readiness check failed in the middle run
return "pod is not in ready state, uptime=" + elapsed;
}
return null;
}
private void publishPodFailure(KubePodList.Pod pod, String errorMessage) {
var now = Instant.now();
var message = new StatMessage();
message.id = LogManager.ID_GENERATOR.next(now);
message.date = now;
message.result = "ERROR";
message.app = pod.metadata.labels.getOrDefault("app", pod.metadata.name);
message.host = pod.metadata.name;
message.errorCode = "POD_FAILURE";
message.errorMessage = errorMessage;
message.info = Map.of("pod", JSON.toJSON(pod));
publisher.publish(message);
}
}
| neowu/core-ng-project | ext/monitor/src/main/java/app/monitor/job/KubeMonitorJob.java | Java | apache-2.0 | 5,150 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.internal.logging.log4j;
import static org.apache.geode.test.util.ResourceUtils.createFileFromResource;
import static org.apache.geode.test.util.ResourceUtils.getResource;
import static org.assertj.core.api.Assertions.assertThat;
import java.net.URL;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.LogEvent;
import org.apache.logging.log4j.junit.LoggerContextRule;
import org.apache.logging.log4j.test.appender.ListAppender;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TemporaryFolder;
import org.apache.geode.internal.logging.LogService;
import org.apache.geode.test.junit.categories.LoggingTest;
@Category(LoggingTest.class)
public class GemfireVerboseMarkerFilterAcceptIntegrationTest {
private static final String APPENDER_NAME = "LIST";
private static String configFilePath;
private Logger logger;
private String logMessage;
private ListAppender listAppender;
@ClassRule
public static TemporaryFolder temporaryFolder = new TemporaryFolder();
@Rule
public LoggerContextRule loggerContextRule = new LoggerContextRule(configFilePath);
@BeforeClass
public static void setUpLogConfigFile() throws Exception {
String configFileName =
GemfireVerboseMarkerFilterAcceptIntegrationTest.class.getSimpleName() + "_log4j2.xml";
URL resource = getResource(configFileName);
configFilePath = createFileFromResource(resource, temporaryFolder.getRoot(), configFileName)
.getAbsolutePath();
}
@Before
public void setUp() throws Exception {
logger = LogService.getLogger();
logMessage = "this is a log statement";
assertThat(LogService.isUsingGemFireDefaultConfig()).as(LogService.getConfigurationInfo())
.isFalse();
listAppender = loggerContextRule.getListAppender(APPENDER_NAME);
}
@Test
public void gemfireVerboseShouldLogIfGemfireVerboseIsAccept() {
logger.info(LogMarker.GEMFIRE_VERBOSE, logMessage);
LogEvent logEvent = listAppender.getEvents().get(0);
assertThat(logEvent.getLoggerName()).isEqualTo(logger.getName());
assertThat(logEvent.getLevel()).isEqualTo(Level.INFO);
assertThat(logEvent.getMessage().getFormattedMessage()).isEqualTo(logMessage);
}
@Test
public void geodeVerboseShouldLogIfGemfireVerboseIsAccept() {
logger.info(LogMarker.GEODE_VERBOSE, logMessage);
LogEvent logEvent = listAppender.getEvents().get(0);
assertThat(logEvent.getLoggerName()).isEqualTo(logger.getName());
assertThat(logEvent.getLevel()).isEqualTo(Level.INFO);
assertThat(logEvent.getMessage().getFormattedMessage()).isEqualTo(logMessage);
}
}
| pdxrunner/geode | geode-core/src/integrationTest/java/org/apache/geode/internal/logging/log4j/GemfireVerboseMarkerFilterAcceptIntegrationTest.java | Java | apache-2.0 | 3,615 |
package com.yueny.demo.job.controller;
import java.util.List;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.ResponseBody;
import com.yueny.demo.common.example.bo.ModifyDemoBo;
import com.yueny.demo.common.example.service.IDataPrecipitationService;
import lombok.extern.slf4j.Slf4j;
/**
* @author yueny09 <deep_blue_yang@163.com>
*
* @DATE 2016年2月16日 下午8:23:11
*
*/
@Controller
@Slf4j
public class DemoController {
@Autowired
private IDataPrecipitationService dataPrecipitationService;
/**
*
*/
@RequestMapping(value = { "/", "welcome" }, method = RequestMethod.GET)
@ResponseBody
public List<ModifyDemoBo> bar() {
try {
return dataPrecipitationService.queryAll();
} catch (final Exception e) {
log.error("exception:", e);
}
return null;
}
@RequestMapping(value = "/healthy", method = RequestMethod.GET)
@ResponseBody
public String healthy() {
return "OK";
}
}
| yueny/pra | job/job_elastic/src/main/java/com/yueny/demo/job/controller/DemoController.java | Java | apache-2.0 | 1,201 |
/**
* Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.extension;
//#imports
import akka.actor.Extension;
import akka.actor.AbstractExtensionId;
import akka.actor.ExtensionIdProvider;
import akka.actor.ActorSystem;
import akka.actor.ExtendedActorSystem;
import scala.concurrent.duration.Duration;
import com.typesafe.config.Config;
import java.util.concurrent.TimeUnit;
//#imports
import akka.actor.UntypedActor;
import org.junit.Test;
public class SettingsExtensionDocTest {
static
//#extension
public class SettingsImpl implements Extension {
public final String DB_URI;
public final Duration CIRCUIT_BREAKER_TIMEOUT;
public SettingsImpl(Config config) {
DB_URI = config.getString("myapp.db.uri");
CIRCUIT_BREAKER_TIMEOUT =
Duration.create(config.getDuration("myapp.circuit-breaker.timeout",
TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS);
}
}
//#extension
static
//#extensionid
public class Settings extends AbstractExtensionId<SettingsImpl>
implements ExtensionIdProvider {
public final static Settings SettingsProvider = new Settings();
private Settings() {}
public Settings lookup() {
return Settings.SettingsProvider;
}
public SettingsImpl createExtension(ExtendedActorSystem system) {
return new SettingsImpl(system.settings().config());
}
}
//#extensionid
static
//#extension-usage-actor
public class MyActor extends UntypedActor {
// typically you would use static import of the Settings.SettingsProvider field
final SettingsImpl settings =
Settings.SettingsProvider.get(getContext().system());
Connection connection =
connect(settings.DB_URI, settings.CIRCUIT_BREAKER_TIMEOUT);
//#extension-usage-actor
public Connection connect(String dbUri, Duration circuitBreakerTimeout) {
return new Connection();
}
public void onReceive(Object msg) {
}
//#extension-usage-actor
}
//#extension-usage-actor
public static class Connection {
}
@Test
public void demonstrateHowToCreateAndUseAnAkkaExtensionInJava() {
final ActorSystem system = null;
try {
//#extension-usage
// typically you would use static import of the Settings.SettingsProvider field
String dbUri = Settings.SettingsProvider.get(system).DB_URI;
//#extension-usage
} catch (Exception e) {
//do nothing
}
}
}
| Horusiath/akka.net | Documentation/csharp/code/docs/extension/SettingsExtensionDocTest.java | Java | apache-2.0 | 2,450 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.