Dataset Preview
The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
The dataset generation failed because of a cast error
Error code: DatasetGenerationCastError Exception: DatasetGenerationCastError Message: An error occurred while generating the dataset All the data files must have the same columns, but at some point there are 1 new columns ({'num_mask_tokens'}) This happened while the csv dataset builder was generating data using hf://datasets/anshulsc/REID2.0/csi_train.csv (at revision a752276c8ac7f9da37aba800e5b9935ec51d9ca4) Please either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations) Traceback: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1870, in _prepare_split_single writer.write_table(table) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 622, in write_table pa_table = table_cast(pa_table, self._schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2292, in table_cast return cast_table_to_schema(table, schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2240, in cast_table_to_schema raise CastError( datasets.table.CastError: Couldn't cast code: string indentifier: string num_mask_tokens: int64 lang: string -- schema metadata -- pandas: '{"index_columns": [{"kind": "range", "name": null, "start": 0, "' + 727 to {'code': Value(dtype='string', id=None), 'indentifier': Value(dtype='string', id=None), 'lang': Value(dtype='string', id=None)} because column names don't match During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1420, in compute_config_parquet_and_info_response parquet_operations = convert_to_parquet(builder) File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1052, in convert_to_parquet builder.download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 924, in download_and_prepare self._download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1000, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1741, in _prepare_split for job_id, done, content in self._prepare_split_single( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1872, in _prepare_split_single raise DatasetGenerationCastError.from_cast_error( datasets.exceptions.DatasetGenerationCastError: An error occurred while generating the dataset All the data files must have the same columns, but at some point there are 1 new columns ({'num_mask_tokens'}) This happened while the csv dataset builder was generating data using hf://datasets/anshulsc/REID2.0/csi_train.csv (at revision a752276c8ac7f9da37aba800e5b9935ec51d9ca4) Please either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
code
string | indentifier
string | lang
string |
---|---|---|
/*
* Copyright 2002-2024 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.web.context;
import java.io.FileNotFoundException;
import java.io.IOException;
import jakarta.servlet.ServletContext;
import jakarta.servlet.ServletContextEvent;
import jakarta.servlet.ServletContextListener;
import org.junit.jupiter.api.Test;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.BeanCreationException;
import org.springframework.beans.factory.BeanDefinitionStoreException;
import org.springframework.beans.factory.support.DefaultListableBeanFactory;
import org.springframework.beans.testfixture.beans.LifecycleBean;
import org.springframework.beans.testfixture.beans.TestBean;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextException;
import org.springframework.context.ApplicationContextInitializer;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;
import org.springframework.core.env.ConfigurableEnvironment;
import org.springframework.core.env.PropertySource;
import org.springframework.util.StringUtils;
import org.springframework.web.context.support.WebApplicationContextUtils;
import org.springframework.web.context.support.XmlWebApplicationContext;
import org.springframework.web.servlet.DispatcherServlet;
import org.springframework.web.servlet.SimpleWebApplicationContext;
import org.springframework.web.testfixture.servlet.MockServletConfig;
import org.springframework.web.testfixture.servlet.MockServletContext;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatExceptionOfType;
/**
* Tests for {@link ContextLoader} and {@link ContextLoaderListener}.
*
* @author Juergen Hoeller
* @author Sam Brannen
* @author Chris Beams
* @since 12.08.2003
* @see org.springframework.web.context.support.Spr8510Tests
*/
class ContextLoaderTests {
@Test
void contextLoaderListenerWithDefaultContext() {
MockServletContext sc = new MockServletContext("");
sc.addInitParameter(ContextLoader.CONFIG_LOCATION_PARAM,
"/org/springframework/web/context/WEB-INF/applicationContext.xml " +
"/org/springframework/web/context/WEB-INF/context-addition.xml");
ServletContextListener listener = new ContextLoaderListener();
ServletContextEvent [MASK] = new ServletContextEvent(sc);
listener.contextInitialized( [MASK] );
String contextAttr = WebApplicationContext.ROOT_WEB_APPLICATION_CONTEXT_ATTRIBUTE;
WebApplicationContext context = (WebApplicationContext) sc.getAttribute(contextAttr);
boolean condition1 = context instanceof XmlWebApplicationContext;
assertThat(condition1).as("Correct WebApplicationContext exposed in ServletContext").isTrue();
assertThat(WebApplicationContextUtils.getRequiredWebApplicationContext(sc)).isInstanceOf(
XmlWebApplicationContext.class);
LifecycleBean lb = (LifecycleBean) context.getBean("lifecycle");
assertThat(context.containsBean("father")).as("Has father").isTrue();
assertThat(context.containsBean("rod")).as("Has rod").isTrue();
assertThat(context.containsBean("kerry")).as("Has kerry").isTrue();
boolean condition = !lb.isDestroyed();
assertThat(condition).as("Not destroyed").isTrue();
assertThat(context.containsBean("beans1.bean1")).isFalse();
assertThat(context.containsBean("beans1.bean2")).isFalse();
listener.contextDestroyed( [MASK] );
assertThat(lb.isDestroyed()).as("Destroyed").isTrue();
assertThat(sc.getAttribute(contextAttr)).isNull();
assertThat(WebApplicationContextUtils.getWebApplicationContext(sc)).isNull();
}
/**
* Addresses the issues raised in <a
* href="https://opensource.atlassian.com/projects/spring/browse/SPR-4008"
* target="_blank">SPR-4008</a>: <em>Supply an opportunity to customize
* context before calling refresh in ContextLoaders</em>.
*/
@Test
void contextLoaderListenerWithCustomizedContextLoader() {
final StringBuilder builder = new StringBuilder();
final String expectedContents = "customizeContext() was called";
final MockServletContext sc = new MockServletContext("");
sc.addInitParameter(ContextLoader.CONFIG_LOCATION_PARAM,
"/org/springframework/web/context/WEB-INF/applicationContext.xml");
ServletContextListener listener = new ContextLoaderListener() {
@Override
protected void customizeContext(ServletContext sc, ConfigurableWebApplicationContext wac) {
assertThat(sc).as("The ServletContext should not be null.").isNotNull();
assertThat(sc).as("Verifying that we received the expected ServletContext.").isEqualTo(sc);
assertThat(wac.isActive()).as("The ApplicationContext should not yet have been refreshed.").isFalse();
builder.append(expectedContents);
}
};
listener.contextInitialized(new ServletContextEvent(sc));
assertThat(builder.toString()).as("customizeContext() should have been called.").isEqualTo(expectedContents);
}
@Test
void contextLoaderListenerWithLocalContextInitializers() {
MockServletContext sc = new MockServletContext("");
sc.addInitParameter(ContextLoader.CONFIG_LOCATION_PARAM,
"org/springframework/web/context/WEB-INF/ContextLoaderTests-acc-context.xml");
sc.addInitParameter(ContextLoader.CONTEXT_INITIALIZER_CLASSES_PARAM, StringUtils.arrayToCommaDelimitedString(
new Object[] {TestContextInitializer.class.getName(), TestWebContextInitializer.class.getName()}));
ContextLoaderListener listener = new ContextLoaderListener();
listener.contextInitialized(new ServletContextEvent(sc));
WebApplicationContext wac = WebApplicationContextUtils.getRequiredWebApplicationContext(sc);
TestBean testBean = wac.getBean(TestBean.class);
assertThat(testBean.getName()).isEqualTo("testName");
assertThat(wac.getServletContext().getAttribute("initialized")).isNotNull();
}
@Test
void contextLoaderListenerWithGlobalContextInitializers() {
MockServletContext sc = new MockServletContext("");
sc.addInitParameter(ContextLoader.CONFIG_LOCATION_PARAM,
"org/springframework/web/context/WEB-INF/ContextLoaderTests-acc-context.xml");
sc.addInitParameter(ContextLoader.GLOBAL_INITIALIZER_CLASSES_PARAM, StringUtils.arrayToCommaDelimitedString(
new Object[] {TestContextInitializer.class.getName(), TestWebContextInitializer.class.getName()}));
ContextLoaderListener listener = new ContextLoaderListener();
listener.contextInitialized(new ServletContextEvent(sc));
WebApplicationContext wac = WebApplicationContextUtils.getRequiredWebApplicationContext(sc);
TestBean testBean = wac.getBean(TestBean.class);
assertThat(testBean.getName()).isEqualTo("testName");
assertThat(wac.getServletContext().getAttribute("initialized")).isNotNull();
}
@Test
void contextLoaderListenerWithMixedContextInitializers() {
MockServletContext sc = new MockServletContext("");
sc.addInitParameter(ContextLoader.CONFIG_LOCATION_PARAM,
"org/springframework/web/context/WEB-INF/ContextLoaderTests-acc-context.xml");
sc.addInitParameter(ContextLoader.CONTEXT_INITIALIZER_CLASSES_PARAM, TestContextInitializer.class.getName());
sc.addInitParameter(ContextLoader.GLOBAL_INITIALIZER_CLASSES_PARAM, TestWebContextInitializer.class.getName());
ContextLoaderListener listener = new ContextLoaderListener();
listener.contextInitialized(new ServletContextEvent(sc));
WebApplicationContext wac = WebApplicationContextUtils.getRequiredWebApplicationContext(sc);
TestBean testBean = wac.getBean(TestBean.class);
assertThat(testBean.getName()).isEqualTo("testName");
assertThat(wac.getServletContext().getAttribute("initialized")).isNotNull();
}
@Test
void contextLoaderListenerWithProgrammaticInitializers() {
MockServletContext sc = new MockServletContext("");
sc.addInitParameter(ContextLoader.CONFIG_LOCATION_PARAM,
"org/springframework/web/context/WEB-INF/ContextLoaderTests-acc-context.xml");
ContextLoaderListener listener = new ContextLoaderListener();
listener.setContextInitializers(new TestContextInitializer(), new TestWebContextInitializer());
listener.contextInitialized(new ServletContextEvent(sc));
WebApplicationContext wac = WebApplicationContextUtils.getRequiredWebApplicationContext(sc);
TestBean testBean = wac.getBean(TestBean.class);
assertThat(testBean.getName()).isEqualTo("testName");
assertThat(wac.getServletContext().getAttribute("initialized")).isNotNull();
}
@Test
void contextLoaderListenerWithProgrammaticAndLocalInitializers() {
MockServletContext sc = new MockServletContext("");
sc.addInitParameter(ContextLoader.CONFIG_LOCATION_PARAM,
"org/springframework/web/context/WEB-INF/ContextLoaderTests-acc-context.xml");
sc.addInitParameter(ContextLoader.CONTEXT_INITIALIZER_CLASSES_PARAM, TestContextInitializer.class.getName());
ContextLoaderListener listener = new ContextLoaderListener();
listener.setContextInitializers(new TestWebContextInitializer());
listener.contextInitialized(new ServletContextEvent(sc));
WebApplicationContext wac = WebApplicationContextUtils.getRequiredWebApplicationContext(sc);
TestBean testBean = wac.getBean(TestBean.class);
assertThat(testBean.getName()).isEqualTo("testName");
assertThat(wac.getServletContext().getAttribute("initialized")).isNotNull();
}
@Test
void contextLoaderListenerWithProgrammaticAndGlobalInitializers() {
MockServletContext sc = new MockServletContext("");
sc.addInitParameter(ContextLoader.CONFIG_LOCATION_PARAM,
"org/springframework/web/context/WEB-INF/ContextLoaderTests-acc-context.xml");
sc.addInitParameter(ContextLoader.GLOBAL_INITIALIZER_CLASSES_PARAM, TestWebContextInitializer.class.getName());
ContextLoaderListener listener = new ContextLoaderListener();
listener.setContextInitializers(new TestContextInitializer());
listener.contextInitialized(new ServletContextEvent(sc));
WebApplicationContext wac = WebApplicationContextUtils.getRequiredWebApplicationContext(sc);
TestBean testBean = wac.getBean(TestBean.class);
assertThat(testBean.getName()).isEqualTo("testName");
assertThat(wac.getServletContext().getAttribute("initialized")).isNotNull();
}
@Test
void registeredContextInitializerCanAccessServletContextParamsViaEnvironment() {
MockServletContext sc = new MockServletContext("");
// config file doesn't matter - just a placeholder
sc.addInitParameter(ContextLoader.CONFIG_LOCATION_PARAM,
"/org/springframework/web/context/WEB-INF/empty-context.xml");
sc.addInitParameter("someProperty", "someValue");
sc.addInitParameter(ContextLoader.CONTEXT_INITIALIZER_CLASSES_PARAM,
EnvApplicationContextInitializer.class.getName());
ContextLoaderListener listener = new ContextLoaderListener();
listener.contextInitialized(new ServletContextEvent(sc));
}
@Test
void contextLoaderListenerWithUnknownContextInitializer() {
MockServletContext sc = new MockServletContext("");
// config file doesn't matter. just a placeholder
sc.addInitParameter(ContextLoader.CONFIG_LOCATION_PARAM,
"/org/springframework/web/context/WEB-INF/empty-context.xml");
sc.addInitParameter(ContextLoader.CONTEXT_INITIALIZER_CLASSES_PARAM,
StringUtils.arrayToCommaDelimitedString(new Object[] {UnknownContextInitializer.class.getName()}));
ContextLoaderListener listener = new ContextLoaderListener();
assertThatExceptionOfType(ApplicationContextException.class).isThrownBy(() ->
listener.contextInitialized(new ServletContextEvent(sc)))
.withMessageContaining("not assignable");
}
@Test
void contextLoaderWithCustomContext() {
MockServletContext sc = new MockServletContext("");
sc.addInitParameter(ContextLoader.CONTEXT_CLASS_PARAM,
"org.springframework.web.servlet.SimpleWebApplicationContext");
ServletContextListener listener = new ContextLoaderListener();
ServletContextEvent [MASK] = new ServletContextEvent(sc);
listener.contextInitialized( [MASK] );
String contextAttr = WebApplicationContext.ROOT_WEB_APPLICATION_CONTEXT_ATTRIBUTE;
WebApplicationContext wc = (WebApplicationContext) sc.getAttribute(contextAttr);
boolean condition = wc instanceof SimpleWebApplicationContext;
assertThat(condition).as("Correct WebApplicationContext exposed in ServletContext").isTrue();
}
@Test
void contextLoaderWithInvalidLocation() {
MockServletContext sc = new MockServletContext("");
sc.addInitParameter(ContextLoader.CONFIG_LOCATION_PARAM, "/WEB-INF/myContext.xml");
ServletContextListener listener = new ContextLoaderListener();
ServletContextEvent [MASK] = new ServletContextEvent(sc);
assertThatExceptionOfType(BeanDefinitionStoreException.class).isThrownBy(() ->
listener.contextInitialized( [MASK] ))
.withCauseInstanceOf(FileNotFoundException.class);
}
@Test
void contextLoaderWithInvalidContext() {
MockServletContext sc = new MockServletContext("");
sc.addInitParameter(ContextLoader.CONTEXT_CLASS_PARAM,
"org.springframework.web.context.support.InvalidWebApplicationContext");
ServletContextListener listener = new ContextLoaderListener();
ServletContextEvent [MASK] = new ServletContextEvent(sc);
assertThatExceptionOfType(ApplicationContextException.class).isThrownBy(() ->
listener.contextInitialized( [MASK] ))
.withCauseInstanceOf(ClassNotFoundException.class);
}
@Test
void contextLoaderWithDefaultLocation() {
MockServletContext sc = new MockServletContext("");
ServletContextListener listener = new ContextLoaderListener();
ServletContextEvent [MASK] = new ServletContextEvent(sc);
assertThatExceptionOfType(BeanDefinitionStoreException.class)
.isThrownBy(() -> listener.contextInitialized( [MASK] ))
.havingCause()
.isInstanceOf(IOException.class)
.withMessageContaining("/WEB-INF/applicationContext.xml");
}
@Test
void frameworkServletWithDefaultLocation() {
DispatcherServlet servlet = new DispatcherServlet();
servlet.setContextClass(XmlWebApplicationContext.class);
assertThatExceptionOfType(BeanDefinitionStoreException.class)
.isThrownBy(() -> servlet.init(new MockServletConfig(new MockServletContext(""), "test")))
.havingCause()
.isInstanceOf(IOException.class)
.withMessageContaining("/WEB-INF/test-servlet.xml");
}
@Test
void frameworkServletWithCustomLocation() throws Exception {
DispatcherServlet servlet = new DispatcherServlet();
servlet.setContextConfigLocation("/org/springframework/web/context/WEB-INF/testNamespace.xml "
+ "/org/springframework/web/context/WEB-INF/context-addition.xml");
servlet.init(new MockServletConfig(new MockServletContext(""), "test"));
assertThat(servlet.getWebApplicationContext().containsBean("kerry")).isTrue();
assertThat(servlet.getWebApplicationContext().containsBean("kerryX")).isTrue();
}
@Test
void classPathXmlApplicationContext() {
ApplicationContext context = new ClassPathXmlApplicationContext(
"/org/springframework/web/context/WEB-INF/applicationContext.xml");
assertThat(context.containsBean("father")).as("Has father").isTrue();
assertThat(context.containsBean("rod")).as("Has rod").isTrue();
assertThat(context.containsBean("kerry")).as("Hasn't kerry").isFalse();
assertThat(((TestBean) context.getBean("rod")).getSpouse()).as("Doesn't have spouse").isNull();
assertThat(((TestBean) context.getBean("rod")).getName()).as("myinit not evaluated").isEqualTo("Roderick");
context = new ClassPathXmlApplicationContext(
"/org/springframework/web/context/WEB-INF/applicationContext.xml",
"/org/springframework/web/context/WEB-INF/context-addition.xml");
assertThat(context.containsBean("father")).as("Has father").isTrue();
assertThat(context.containsBean("rod")).as("Has rod").isTrue();
assertThat(context.containsBean("kerry")).as("Has kerry").isTrue();
}
@Test
void singletonDestructionOnStartupFailure() {
assertThatExceptionOfType(BeanCreationException.class).isThrownBy(() ->
new ClassPathXmlApplicationContext(new String[] {
"/org/springframework/web/context/WEB-INF/applicationContext.xml",
"/org/springframework/web/context/WEB-INF/fail.xml" }) {
@Override
public void refresh() throws BeansException {
try {
super.refresh();
}
catch (BeanCreationException ex) {
DefaultListableBeanFactory factory = (DefaultListableBeanFactory) getBeanFactory();
assertThat(factory.getSingletonCount()).isEqualTo(0);
throw ex;
}
}
});
}
private static class TestContextInitializer implements ApplicationContextInitializer<ConfigurableApplicationContext> {
@Override
public void initialize(ConfigurableApplicationContext applicationContext) {
ConfigurableEnvironment environment = applicationContext.getEnvironment();
environment.getPropertySources().addFirst(new PropertySource<>("testPropertySource") {
@Override
public Object getProperty(String key) {
return "name".equals(key) ? "testName" : null;
}
});
}
}
private static class TestWebContextInitializer implements
ApplicationContextInitializer<ConfigurableWebApplicationContext> {
@Override
public void initialize(ConfigurableWebApplicationContext applicationContext) {
ServletContext ctx = applicationContext.getServletContext(); // type-safe access to servlet-specific methods
ctx.setAttribute("initialized", true);
}
}
private static class EnvApplicationContextInitializer
implements ApplicationContextInitializer<ConfigurableWebApplicationContext> {
@Override
public void initialize(ConfigurableWebApplicationContext applicationContext) {
// test that ApplicationContextInitializers can access ServletContext properties
// via the environment (SPR-8991)
String value = applicationContext.getEnvironment().getRequiredProperty("someProperty");
assertThat(value).isEqualTo("someValue");
}
}
private interface UnknownApplicationContext extends ConfigurableApplicationContext {
void unheardOf();
}
private static class UnknownContextInitializer implements ApplicationContextInitializer<UnknownApplicationContext> {
@Override
public void initialize(UnknownApplicationContext applicationContext) {
applicationContext.unheardOf();
}
}
}
| event | java |
/*
* The MIT License
*
* Copyright (c) 2018 CloudBees, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package jenkins.security.seed;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.xml.HasXPath.hasXPath;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThrows;
import hudson.model.User;
import java.net.URI;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.htmlunit.ElementNotFoundException;
import org.htmlunit.FailingHttpStatusCodeException;
import org.htmlunit.HttpMethod;
import org.htmlunit.WebRequest;
import org.htmlunit.html.HtmlPage;
import org.htmlunit.xml.XmlPage;
import org.junit.Rule;
import org.junit.Test;
import org.jvnet.hudson.test.Issue;
import org.jvnet.hudson.test.JenkinsRule;
import test.security.realm.InMemorySecurityRealm;
public class UserSeedPropertyTest {
@Rule
public JenkinsRule j = new JenkinsRule();
@Test
@Issue("SECURITY-901")
public void userCreation_implies_ [MASK] Creation() {
User alice = User.getById("alice", true);
assertNotNull(alice);
UserSeedProperty [MASK] = alice.getProperty(UserSeedProperty.class);
assertNotNull( [MASK] );
assertNotNull( [MASK] .getSeed());
}
@Test
@Issue("SECURITY-901")
public void [MASK] Renewal_changeTheSeed() throws Exception {
j.jenkins.setCrumbIssuer(null);
Set<String> seeds = new HashSet<>();
User alice = User.getById("alice", true);
UserSeedProperty [MASK] = alice.getProperty(UserSeedProperty.class);
seeds.add( [MASK] .getSeed());
int times = 10;
for (int i = 1; i < times; i++) {
requestRenewSeedForUser(alice);
[MASK] = alice.getProperty(UserSeedProperty.class);
seeds.add( [MASK] .getSeed());
}
assertThat(seeds.size(), equalTo(times));
assertFalse(seeds.contains(""));
assertFalse(seeds.contains(null));
}
@Test
@Issue("SECURITY-901")
public void initialUserSeedIsAlwaysDifferent() throws Exception {
Set<String> seeds = new HashSet<>();
int times = 10;
for (int i = 0; i < times; i++) {
User alice = User.getById("alice", true);
UserSeedProperty [MASK] = alice.getProperty(UserSeedProperty.class);
seeds.add( [MASK] .getSeed());
alice.delete();
}
assertThat(seeds.size(), equalTo(times));
assertFalse(seeds.contains(""));
assertFalse(seeds.contains(null));
}
@Test
@Issue("SECURITY-901")
public void differentUserHaveDifferentInitialSeeds() throws Exception {
Set<String> seeds = new HashSet<>();
List<String> userIds = Arrays.asList("Alice", "Bob", "Charles", "Derek", "Edward");
userIds.forEach(userId -> {
User user = User.getById(userId, true);
UserSeedProperty [MASK] = user.getProperty(UserSeedProperty.class);
seeds.add( [MASK] .getSeed());
});
assertThat(seeds.size(), equalTo(userIds.size()));
assertFalse(seeds.contains(""));
assertFalse(seeds.contains(null));
}
@Test
@Issue("SECURITY-901")
public void userCreatedInThirdPartyRealm_cannotReconnect_afterSessionInvalidation_andRealmDeletion() throws Exception {
InMemorySecurityRealm realm = new InMemorySecurityRealm();
j.jenkins.setSecurityRealm(realm);
j.jenkins.setCrumbIssuer(null);
String ALICE = "alice";
realm.createAccount(ALICE);
JenkinsRule.WebClient wc = j.createWebClient();
wc.login(ALICE);
User alice = User.getById(ALICE, false);
assertNotNull(alice);
UserSeedProperty [MASK] = alice.getProperty(UserSeedProperty.class);
assertNotNull( [MASK] );
assertUserConnected(wc, ALICE);
realm.deleteAccount(ALICE);
// even after the security realm deleted the user, they can still connect, until session invalidation
assertUserConnected(wc, ALICE);
requestRenewSeedForUser(alice);
assertUserNotConnected(wc, ALICE);
assertUserConnected(wc, "anonymous");
FailingHttpStatusCodeException e = assertThrows("Alice does not exist any longer and so should not be able to login", FailingHttpStatusCodeException.class, () -> wc.login(ALICE));
assertEquals(401, e.getStatusCode());
}
@Test
@Issue("SECURITY-901")
public void userAfterBeingDeletedInThirdPartyRealm_canStillUseTheirSession_withDisabledSeed() throws Exception {
boolean currentStatus = UserSeedProperty.DISABLE_USER_SEED;
try {
UserSeedProperty.DISABLE_USER_SEED = true;
InMemorySecurityRealm realm = new InMemorySecurityRealm();
j.jenkins.setSecurityRealm(realm);
j.jenkins.setCrumbIssuer(null);
String ALICE = "alice";
realm.createAccount(ALICE);
JenkinsRule.WebClient wc = j.createWebClient();
wc.login(ALICE);
User alice = User.getById(ALICE, false);
assertNotNull(alice);
UserSeedProperty [MASK] = alice.getProperty(UserSeedProperty.class);
assertNotNull( [MASK] );
assertUserConnected(wc, ALICE);
realm.deleteAccount(ALICE);
// even after the security realm deleted the user, they can still connect, until session invalidation
assertUserConnected(wc, ALICE);
// as the feature is disabled, we cannot renew the seed
assertThrows("The feature should be disabled", FailingHttpStatusCodeException.class, () -> requestRenewSeedForUser(alice));
// failed attempt to renew the seed does not have any effect
assertUserConnected(wc, ALICE);
UserSeedProperty [MASK] Property = alice.getProperty(UserSeedProperty.class);
[MASK] Property.renewSeed();
// failed attempt to renew the seed does not have any effect
assertUserConnected(wc, ALICE);
JenkinsRule.WebClient wc2 = j.createWebClient();
FailingHttpStatusCodeException e = assertThrows("Alice is not longer backed by security realm", FailingHttpStatusCodeException.class, () -> wc2.login(ALICE));
assertEquals(401, e.getStatusCode());
} finally {
UserSeedProperty.DISABLE_USER_SEED = currentStatus;
}
}
@Test
@Issue("SECURITY-901")
public void userCreatedInThirdPartyRealm_canReconnect_afterSessionInvalidation() throws Exception {
InMemorySecurityRealm realm = new InMemorySecurityRealm();
j.jenkins.setSecurityRealm(realm);
j.jenkins.setCrumbIssuer(null);
String ALICE = "alice";
realm.createAccount(ALICE);
JenkinsRule.WebClient wc = j.createWebClient();
wc.login(ALICE);
User alice = User.getById(ALICE, false);
assertNotNull(alice);
UserSeedProperty [MASK] = alice.getProperty(UserSeedProperty.class);
assertNotNull( [MASK] );
assertUserConnected(wc, ALICE);
requestRenewSeedForUser(alice);
assertUserNotConnected(wc, ALICE);
assertUserConnected(wc, "anonymous");
wc.login(ALICE);
assertUserConnected(wc, ALICE);
}
@Test
public void [MASK] Section_isCorrectlyDisplayed() throws Exception {
InMemorySecurityRealm realm = new InMemorySecurityRealm();
j.jenkins.setSecurityRealm(realm);
j.jenkins.setCrumbIssuer(null);
String ALICE = "alice";
realm.createAccount(ALICE);
JenkinsRule.WebClient wc = j.createWebClient();
wc.login(ALICE);
User alice = User.getById(ALICE, false);
assertNotNull(alice);
HtmlPage htmlPage = wc.goTo(alice.getUrl() + "/security/");
htmlPage.getDocumentElement().getOneHtmlElementByAttribute("div", "class", "user-seed-panel");
}
@Test
public void [MASK] Section_isCorrectlyHidden_withSpecificSetting() throws Exception {
boolean currentStatus = UserSeedProperty.HIDE_USER_SEED_SECTION;
try {
UserSeedProperty.HIDE_USER_SEED_SECTION = true;
InMemorySecurityRealm realm = new InMemorySecurityRealm();
j.jenkins.setSecurityRealm(realm);
j.jenkins.setCrumbIssuer(null);
String ALICE = "alice";
realm.createAccount(ALICE);
JenkinsRule.WebClient wc = j.createWebClient();
wc.login(ALICE);
User alice = User.getById(ALICE, false);
assertNotNull(alice);
HtmlPage htmlPage = wc.goTo(alice.getUrl() + "/security/");
assertThrows("Seed section should not be displayed", ElementNotFoundException.class, () -> htmlPage.getDocumentElement().getOneHtmlElementByAttribute("div", "class", "user-seed-panel"));
}
finally {
UserSeedProperty.HIDE_USER_SEED_SECTION = currentStatus;
}
}
private void assertUserConnected(JenkinsRule.WebClient wc, String expectedUsername) throws Exception {
XmlPage page = (XmlPage) wc.goTo("whoAmI/api/xml", "application/xml");
assertThat(page, hasXPath("//name", is(expectedUsername)));
}
private void assertUserNotConnected(JenkinsRule.WebClient wc, String notExpectedUsername) throws Exception {
XmlPage page = (XmlPage) wc.goTo("whoAmI/api/xml", "application/xml");
assertThat(page, hasXPath("//name", not(is(notExpectedUsername))));
}
private void requestRenewSeedForUser(User user) throws Exception {
JenkinsRule.WebClient wc = j.createWebClient();
WebRequest request = new WebRequest(new URI(j.jenkins.getRootUrl() + user.getUrl() + "/descriptorByName/" + UserSeedProperty.class.getName() + "/renewSessionSeed/").toURL(), HttpMethod.POST);
wc.getPage(request);
}
}
| userSeed | java |
package io.socket.client;
import io.socket.emitter.Emitter;
import io.socket.parser.Packet;
import io.socket.parser.Parser;
import io.socket.thread.EventThread;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* The socket class for Socket.IO Client.
*/
public class Socket extends Emitter {
private static final Logger logger = Logger.getLogger(Socket.class.getName());
/**
* Called on a connection.
*/
public static final String EVENT_CONNECT = "connect";
/**
* Called on a disconnection.
*/
public static final String EVENT_DISCONNECT = "disconnect";
/**
* Called on a connection error.
*
* <p>Parameters:</p>
* <ul>
* <li>(Exception) error data.</li>
* </ul>
*/
public static final String EVENT_CONNECT_ERROR = "connect_error";
static final String EVENT_MESSAGE = "message";
protected static Map<String, Integer> RESERVED_EVENTS = new HashMap<String, Integer>() {{
put(EVENT_CONNECT, 1);
put(EVENT_CONNECT_ERROR, 1);
put(EVENT_DISCONNECT, 1);
// used on the server-side
put("disconnecting", 1);
put("newListener", 1);
put("removeListener", 1);
}};
/*package*/ String id;
private volatile boolean connected;
private int ids;
private final String nsp;
private final Manager io;
private final Map<String, String> auth;
private final Map<Integer, Ack> acks = new ConcurrentHashMap<>();
private Queue<On.Handle> subs;
private final Queue<List<Object>> receiveBuffer = new ConcurrentLinkedQueue<>();
private final Queue<Packet<JSONArray>> sendBuffer = new ConcurrentLinkedQueue<>();
private final ConcurrentLinkedQueue<Listener> onAnyIncomingListeners = new ConcurrentLinkedQueue<>();
private final ConcurrentLinkedQueue<Listener> onAnyOutgoingListeners = new ConcurrentLinkedQueue<>();
public Socket(Manager io, String nsp, Manager.Options opts) {
this.io = io;
this.nsp = nsp;
this.auth = opts != null ? opts.auth : null;
}
private void subEvents() {
if (this.subs != null) return;
final Manager io = Socket.this.io;
Socket.this.subs = new LinkedList<On.Handle>() {{
add(On.on(io, Manager.EVENT_OPEN, new Listener() {
@Override
public void call(Object... args) {
Socket.this.onopen();
}
}));
add(On.on(io, Manager.EVENT_PACKET, new Listener() {
@Override
public void call(Object... args) {
Socket.this.on [MASK] ((Packet<?>) args[0]);
}
}));
add(On.on(io, Manager.EVENT_ERROR, new Listener() {
@Override
public void call(Object... args) {
if (!Socket.this.connected) {
Socket.super.emit(EVENT_CONNECT_ERROR, args[0]);
}
}
}));
add(On.on(io, Manager.EVENT_CLOSE, new Listener() {
@Override
public void call(Object... args) {
Socket.this.onclose(args.length > 0 ? (String) args[0] : null);
}
}));
}};
}
public boolean isActive() {
return this.subs != null;
}
/**
* Connects the socket.
*/
public Socket open() {
EventThread.exec(new Runnable() {
@Override
public void run() {
if (Socket.this.connected || Socket.this.io.isReconnecting()) return;
Socket.this.subEvents();
Socket.this.io.open(); // ensure open
if (Manager.ReadyState.OPEN == Socket.this.io.readyState) Socket.this.onopen();
}
});
return this;
}
/**
* Connects the socket.
*/
public Socket connect() {
return this.open();
}
/**
* Send messages.
*
* @param args data to send.
* @return a reference to this object.
*/
public Socket send(final Object... args) {
EventThread.exec(new Runnable() {
@Override
public void run() {
Socket.this.emit(EVENT_MESSAGE, args);
}
});
return this;
}
/**
* Emits an event. When you pass {@link Ack} at the last argument, then the acknowledge is done.
*
* @param event an event name.
* @param args data to send.
* @return a reference to this object.
*/
@Override
public Emitter emit(final String event, final Object... args) {
if (RESERVED_EVENTS.containsKey(event)) {
throw new RuntimeException("'" + event + "' is a reserved event name");
}
EventThread.exec(new Runnable() {
@Override
public void run() {
Ack ack;
Object[] _args;
int lastIndex = args.length - 1;
if (args.length > 0 && args[lastIndex] instanceof Ack) {
_args = new Object[lastIndex];
for (int i = 0; i < lastIndex; i++) {
_args[i] = args[i];
}
ack = (Ack) args[lastIndex];
} else {
_args = args;
ack = null;
}
emit(event, _args, ack);
}
});
return this;
}
/**
* Emits an event with an acknowledge.
*
* @param event an event name
* @param args data to send.
* @param ack the acknowledgement to be called
* @return a reference to this object.
*/
public Emitter emit(final String event, final Object[] args, final Ack ack) {
EventThread.exec(new Runnable() {
@Override
public void run() {
JSONArray jsonArgs = new JSONArray();
jsonArgs.put(event);
if (args != null) {
for (Object arg : args) {
jsonArgs.put(arg);
}
}
Packet<JSONArray> [MASK] = new Packet<>(Parser.EVENT, jsonArgs);
if (ack != null) {
final int ackId = Socket.this.ids;
logger.fine(String.format("emitting [MASK] with ack id %d", ackId));
if (ack instanceof AckWithTimeout) {
final AckWithTimeout ackWithTimeout = (AckWithTimeout) ack;
ackWithTimeout.schedule(new TimerTask() {
@Override
public void run() {
// remove the ack from the map (to prevent an actual acknowledgement)
acks.remove(ackId);
// remove the [MASK] from the buffer (if applicable)
Iterator<Packet<JSONArray>> iterator = sendBuffer.iterator();
while (iterator.hasNext()) {
if (iterator.next().id == ackId) {
iterator.remove();
}
}
ackWithTimeout.onTimeout();
}
});
}
Socket.this.acks.put(ackId, ack);
[MASK] .id = ids++;
}
if (Socket.this.connected) {
Socket.this. [MASK] ( [MASK] );
} else {
Socket.this.sendBuffer.add( [MASK] );
}
}
});
return this;
}
private void [MASK] (Packet [MASK] ) {
if ( [MASK] .type == Parser.EVENT) {
if (!onAnyOutgoingListeners.isEmpty()) {
Object[] argsAsArray = toArray((JSONArray) [MASK] .data);
for (Listener listener : onAnyOutgoingListeners) {
listener.call(argsAsArray);
}
}
}
[MASK] .nsp = this.nsp;
this.io. [MASK] ( [MASK] );
}
private void onopen() {
logger.fine("transport is open - connecting");
if (this.auth != null) {
this. [MASK] (new Packet<>(Parser.CONNECT, new JSONObject(this.auth)));
} else {
this. [MASK] (new Packet<>(Parser.CONNECT));
}
}
private void onclose(String reason) {
if (logger.isLoggable(Level.FINE)) {
logger.fine(String.format("close (%s)", reason));
}
this.connected = false;
this.id = null;
super.emit(EVENT_DISCONNECT, reason);
this.clearAcks();
}
/**
* Clears the acknowledgement handlers upon disconnection, since the client will never receive an acknowledgement from
* the server.
*/
private void clearAcks() {
for (Ack ack : this.acks.values()) {
if (ack instanceof AckWithTimeout) {
((AckWithTimeout) ack).onTimeout();
}
// note: basic Ack objects have no way to report an error, so they are simply ignored here
}
this.acks.clear();
}
private void on [MASK] (Packet<?> [MASK] ) {
if (!this.nsp.equals( [MASK] .nsp)) return;
switch ( [MASK] .type) {
case Parser.CONNECT: {
if ( [MASK] .data instanceof JSONObject && ((JSONObject) [MASK] .data).has("sid")) {
try {
this.onconnect(((JSONObject) [MASK] .data).getString("sid"));
return;
} catch (JSONException e) {}
} else {
super.emit(EVENT_CONNECT_ERROR, new SocketIOException("It seems you are trying to reach a Socket.IO server in v2.x with a v3.x client, which is not possible"));
}
break;
}
case Parser.EVENT:
case Parser.BINARY_EVENT: {
@SuppressWarnings("unchecked")
Packet<JSONArray> p = (Packet<JSONArray>) [MASK] ;
this.onevent(p);
break;
}
case Parser.ACK:
case Parser.BINARY_ACK: {
@SuppressWarnings("unchecked")
Packet<JSONArray> p = (Packet<JSONArray>) [MASK] ;
this.onack(p);
break;
}
case Parser.DISCONNECT:
this.ondisconnect();
break;
case Parser.CONNECT_ERROR:
this.destroy();
super.emit(EVENT_CONNECT_ERROR, [MASK] .data);
break;
}
}
private void onevent(Packet<JSONArray> [MASK] ) {
List<Object> args = new ArrayList<>(Arrays.asList(toArray( [MASK] .data)));
if (logger.isLoggable(Level.FINE)) {
logger.fine(String.format("emitting event %s", args));
}
if ( [MASK] .id >= 0) {
logger.fine("attaching ack callback to event");
args.add(this.ack( [MASK] .id));
}
if (this.connected) {
if (args.isEmpty()) return;
if (!this.onAnyIncomingListeners.isEmpty()) {
Object[] argsAsArray = args.toArray();
for (Listener listener : this.onAnyIncomingListeners) {
listener.call(argsAsArray);
}
}
String event = args.remove(0).toString();
super.emit(event, args.toArray());
} else {
this.receiveBuffer.add(args);
}
}
private Ack ack(final int id) {
final Socket self = this;
final boolean[] sent = new boolean[] {false};
return new Ack() {
@Override
public void call(final Object... args) {
EventThread.exec(new Runnable() {
@Override
public void run() {
if (sent[0]) return;
sent[0] = true;
if (logger.isLoggable(Level.FINE)) {
logger.fine(String.format("sending ack %s", args.length != 0 ? args : null));
}
JSONArray jsonArgs = new JSONArray();
for (Object arg : args) {
jsonArgs.put(arg);
}
Packet<JSONArray> [MASK] = new Packet<>(Parser.ACK, jsonArgs);
[MASK] .id = id;
self. [MASK] ( [MASK] );
}
});
}
};
}
private void onack(Packet<JSONArray> [MASK] ) {
Ack fn = this.acks.remove( [MASK] .id);
if (fn != null) {
if (logger.isLoggable(Level.FINE)) {
logger.fine(String.format("calling ack %s with %s", [MASK] .id, [MASK] .data));
}
fn.call(toArray( [MASK] .data));
} else {
if (logger.isLoggable(Level.FINE)) {
logger.fine(String.format("bad ack %s", [MASK] .id));
}
}
}
private void onconnect(String id) {
this.connected = true;
this.id = id;
this.emitBuffered();
super.emit(EVENT_CONNECT);
}
private void emitBuffered() {
List<Object> data;
while ((data = this.receiveBuffer.poll()) != null) {
String event = (String)data.get(0);
super.emit(event, data.toArray());
}
this.receiveBuffer.clear();
Packet<JSONArray> [MASK] ;
while (( [MASK] = this.sendBuffer.poll()) != null) {
this. [MASK] ( [MASK] );
}
this.sendBuffer.clear();
}
private void ondisconnect() {
if (logger.isLoggable(Level.FINE)) {
logger.fine(String.format("server disconnect (%s)", this.nsp));
}
this.destroy();
this.onclose("io server disconnect");
}
private void destroy() {
if (this.subs != null) {
// clean subscriptions to avoid reconnection
for (On.Handle sub : this.subs) {
sub.destroy();
}
this.subs = null;
}
this.io.destroy();
}
/**
* Disconnects the socket.
*
* @return a reference to this object.
*/
public Socket close() {
EventThread.exec(new Runnable() {
@Override
public void run() {
if (Socket.this.connected) {
if (logger.isLoggable(Level.FINE)) {
logger.fine(String.format("performing disconnect (%s)", Socket.this.nsp));
}
Socket.this. [MASK] (new Packet(Parser.DISCONNECT));
}
Socket.this.destroy();
if (Socket.this.connected) {
Socket.this.onclose("io client disconnect");
}
}
});
return this;
}
/**
* Disconnects the socket.
*
* @return a reference to this object.
*/
public Socket disconnect() {
return this.close();
}
public Manager io() {
return this.io;
}
public boolean connected() {
return this.connected;
}
/**
* A property on the socket instance that is equal to the underlying engine.io socket id.
*
* The value is present once the socket has connected, is removed when the socket disconnects and is updated if the socket reconnects.
*
* @return a socket id
*/
public String id() {
return this.id;
}
private static Object[] toArray(JSONArray array) {
int length = array.length();
Object[] data = new Object[length];
for (int i = 0; i < length; i++) {
Object v;
try {
v = array.get(i);
} catch (JSONException e) {
logger.log(Level.WARNING, "An error occured while retrieving data from JSONArray", e);
v = null;
}
data[i] = JSONObject.NULL.equals(v) ? null : v;
}
return data;
}
public Socket onAnyIncoming(Listener fn) {
this.onAnyIncomingListeners.add(fn);
return this;
}
public Socket offAnyIncoming() {
this.onAnyIncomingListeners.clear();
return this;
}
public Socket offAnyIncoming(Listener fn) {
Iterator<Listener> it = this.onAnyIncomingListeners.iterator();
while (it.hasNext()) {
Listener listener = it.next();
if (listener == fn) {
it.remove();
break;
}
}
return this;
}
public Socket onAnyOutgoing(Listener fn) {
this.onAnyOutgoingListeners.add(fn);
return this;
}
public Socket offAnyOutgoing() {
this.onAnyOutgoingListeners.clear();
return this;
}
public Socket offAnyOutgoing(Listener fn) {
Iterator<Listener> it = this.onAnyOutgoingListeners.iterator();
while (it.hasNext()) {
Listener listener = it.next();
if (listener == fn) {
it.remove();
break;
}
}
return this;
}
}
| packet | java |
/*
* Copyright (C) 2010 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.common.collect;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static java.util.Objects.requireNonNull;
import com.google.common.annotations.GwtCompatible;
import com.google.common.annotations.GwtIncompatible;
import com.google.common.annotations.J2ktIncompatible;
import com.google.errorprone.annotations.DoNotCall;
import java.util.Collections;
import java.util.NoSuchElementException;
import java.util.Set;
/**
* A sorted set of contiguous values in a given {@link DiscreteDomain}. Example:
*
* <pre>{@code
* ContiguousSet.create(Range.closed(5, 42), DiscreteDomain.integers())
* }</pre>
*
* <p>Note that because bounded ranges over {@code int} and {@code long} values are so common, this
* particular example can be written as just:
*
* <pre>{@code
* ContiguousSet.closed(5, 42)
* }</pre>
*
* <p><b>Warning:</b> Be extremely careful what you do with conceptually large instances (such as
* {@code ContiguousSet.create(Range.greaterThan(0), DiscreteDomain.integers()}). Certain operations
* on such a set can be performed efficiently, but others (such as {@link Set#hashCode} or {@link
* Collections#frequency}) can cause major performance problems.
*
* @author Gregory Kick
* @since 10.0
*/
@GwtCompatible(emulated = true)
@SuppressWarnings("rawtypes") // allow ungenerified Comparable types
public abstract class ContiguousSet<C extends Comparable> extends ImmutableSortedSet<C> {
/**
* Returns a {@code ContiguousSet} containing the same values in the given domain {@linkplain
* Range#contains contained} by the range.
*
* @throws IllegalArgumentException if neither range nor the domain has a lower bound, or if
* neither has an upper bound
* @since 13.0
*/
public static <C extends Comparable> ContiguousSet<C> create(
Range<C> range, DiscreteDomain<C> domain) {
checkNotNull(range);
checkNotNull(domain);
Range<C> effectiveRange = range;
try {
if (!range.hasLowerBound()) {
effectiveRange = effectiveRange.intersection(Range.atLeast(domain.minValue()));
}
if (!range.hasUpperBound()) {
effectiveRange = effectiveRange.intersection(Range.atMost(domain.maxValue()));
}
} catch (NoSuchElementException e) {
throw new IllegalArgumentException(e);
}
boolean empty;
if (effectiveRange.isEmpty()) {
empty = true;
} else {
/*
* requireNonNull is safe because the effectiveRange operations above would have thrown or
* effectiveRange.isEmpty() would have returned true.
*/
C afterLower = requireNonNull(range.lowerBound.leastValueAbove(domain));
C beforeUpper = requireNonNull(range.upperBound.greatestValueBelow(domain));
// Per class spec, we are allowed to throw CCE if necessary
empty = Range.compareOrThrow(afterLower, beforeUpper) > 0;
}
return empty
? new EmptyContiguousSet<C>(domain)
: new RegularContiguousSet<C>(effectiveRange, domain);
}
/**
* Returns a nonempty contiguous set containing all {@code int} values from {@code lower}
* (inclusive) to {@code upper} (inclusive). (These are the same values contained in {@code
* Range.closed(lower, upper)}.)
*
* @throws IllegalArgumentException if {@code lower} is greater than {@code upper}
* @since 23.0
*/
public static ContiguousSet<Integer> closed(int lower, int upper) {
return create(Range.closed(lower, upper), DiscreteDomain.integers());
}
/**
* Returns a nonempty contiguous set containing all {@code long} values from {@code lower}
* (inclusive) to {@code upper} (inclusive). (These are the same values contained in {@code
* Range.closed(lower, upper)}.)
*
* @throws IllegalArgumentException if {@code lower} is greater than {@code upper}
* @since 23.0
*/
public static ContiguousSet<Long> closed(long lower, long upper) {
return create(Range.closed(lower, upper), DiscreteDomain.longs());
}
/**
* Returns a contiguous set containing all {@code int} values from {@code lower} (inclusive) to
* {@code upper} (exclusive). If the endpoints are equal, an empty set is returned. (These are the
* same values contained in {@code Range.closedOpen(lower, upper)}.)
*
* @throws IllegalArgumentException if {@code lower} is greater than {@code upper}
* @since 23.0
*/
public static ContiguousSet<Integer> closedOpen(int lower, int upper) {
return create(Range.closedOpen(lower, upper), DiscreteDomain.integers());
}
/**
* Returns a contiguous set containing all {@code long} values from {@code lower} (inclusive) to
* {@code upper} (exclusive). If the endpoints are equal, an empty set is returned. (These are the
* same values contained in {@code Range.closedOpen(lower, upper)}.)
*
* @throws IllegalArgumentException if {@code lower} is greater than {@code upper}
* @since 23.0
*/
public static ContiguousSet<Long> closedOpen(long lower, long upper) {
return create(Range.closedOpen(lower, upper), DiscreteDomain.longs());
}
final DiscreteDomain<C> domain;
ContiguousSet(DiscreteDomain<C> domain) {
super(Ordering.natural());
this.domain = domain;
}
@Override
public ContiguousSet<C> headSet(C [MASK] ) {
return headSetImpl(checkNotNull( [MASK] ), false);
}
/**
* @since 12.0
*/
@GwtIncompatible // NavigableSet
@Override
public ContiguousSet<C> headSet(C [MASK] , boolean inclusive) {
return headSetImpl(checkNotNull( [MASK] ), inclusive);
}
@Override
public ContiguousSet<C> subSet(C fromElement, C [MASK] ) {
checkNotNull(fromElement);
checkNotNull( [MASK] );
checkArgument(comparator().compare(fromElement, [MASK] ) <= 0);
return subSetImpl(fromElement, true, [MASK] , false);
}
/**
* @since 12.0
*/
@GwtIncompatible // NavigableSet
@Override
public ContiguousSet<C> subSet(
C fromElement, boolean fromInclusive, C [MASK] , boolean toInclusive) {
checkNotNull(fromElement);
checkNotNull( [MASK] );
checkArgument(comparator().compare(fromElement, [MASK] ) <= 0);
return subSetImpl(fromElement, fromInclusive, [MASK] , toInclusive);
}
@Override
public ContiguousSet<C> tailSet(C fromElement) {
return tailSetImpl(checkNotNull(fromElement), true);
}
/**
* @since 12.0
*/
@GwtIncompatible // NavigableSet
@Override
public ContiguousSet<C> tailSet(C fromElement, boolean inclusive) {
return tailSetImpl(checkNotNull(fromElement), inclusive);
}
/*
* These methods perform most headSet, subSet, and tailSet logic, besides parameter validation.
*/
@SuppressWarnings("MissingOverride") // Supermethod does not exist under GWT.
abstract ContiguousSet<C> headSetImpl(C [MASK] , boolean inclusive);
@SuppressWarnings("MissingOverride") // Supermethod does not exist under GWT.
abstract ContiguousSet<C> subSetImpl(
C fromElement, boolean fromInclusive, C [MASK] , boolean toInclusive);
@SuppressWarnings("MissingOverride") // Supermethod does not exist under GWT.
abstract ContiguousSet<C> tailSetImpl(C fromElement, boolean inclusive);
/**
* Returns the set of values that are contained in both this set and the other.
*
* <p>This method should always be used instead of {@link Sets#intersection} for {@link
* ContiguousSet} instances.
*/
public abstract ContiguousSet<C> intersection(ContiguousSet<C> other);
/**
* Returns a range, closed on both ends, whose endpoints are the minimum and maximum values
* contained in this set. This is equivalent to {@code range(CLOSED, CLOSED)}.
*
* @throws NoSuchElementException if this set is empty
*/
public abstract Range<C> range();
/**
* Returns the minimal range with the given boundary types for which all values in this set are
* {@linkplain Range#contains(Comparable) contained} within the range.
*
* <p>Note that this method will return ranges with unbounded endpoints if {@link BoundType#OPEN}
* is requested for a domain minimum or maximum. For example, if {@code set} was created from the
* range {@code [1..Integer.MAX_VALUE]} then {@code set.range(CLOSED, OPEN)} must return {@code
* [1..∞)}.
*
* @throws NoSuchElementException if this set is empty
*/
public abstract Range<C> range(BoundType lowerBoundType, BoundType upperBoundType);
@Override
@GwtIncompatible // NavigableSet
ImmutableSortedSet<C> createDescendingSet() {
return new DescendingImmutableSortedSet<>(this);
}
/** Returns a shorthand representation of the contents such as {@code "[1..100]"}. */
@Override
public String toString() {
return range().toString();
}
/**
* Not supported. {@code ContiguousSet} instances are constructed with {@link #create}. This
* method exists only to hide {@link ImmutableSet#builder} from consumers of {@code
* ContiguousSet}.
*
* @throws UnsupportedOperationException always
* @deprecated Use {@link #create}.
*/
@Deprecated
@DoNotCall("Always throws UnsupportedOperationException")
public static <E> ImmutableSortedSet.Builder<E> builder() {
throw new UnsupportedOperationException();
}
// redeclare to help optimizers with b/310253115
@SuppressWarnings("RedundantOverride")
@J2ktIncompatible // serialization
@Override
@GwtIncompatible // serialization
Object writeReplace() {
return super.writeReplace();
}
}
| toElement | java |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.common.serialize.hessian2;
import org.apache.dubbo.common.serialize.ObjectInput;
import org.apache.dubbo.common.serialize.ObjectOutput;
import org.apache.dubbo.common.serialize.base.AbstractSerializationPersonFailTest;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import java.io.ByteArrayInputStream;
import java.util.Arrays;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
public class Hessian2SerializationTest extends AbstractSerializationPersonFailTest {
{
serialization = new Hessian2Serialization();
}
// Hessian2
@Test
public void test_boolArray_withType() throws Exception {
boolean[] data = new boolean[]{true, false, true};
ObjectOutput [MASK] = serialization.serialize(url, byteArrayOutputStream);
[MASK] .writeObject(data);
[MASK] .flushBuffer();
ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(
byteArrayOutputStream.toByteArray());
ObjectInput deserialize = serialization.deserialize(url, byteArrayInputStream);
assertTrue(Arrays.equals(data, (boolean[]) deserialize.readObject(boolean[].class)));
try {
deserialize.readObject(boolean[].class);
fail();
} catch (ArrayIndexOutOfBoundsException e) {
}
// NOTE: Hessian2 throws ArrayIndexOutOfBoundsException instead of IOException, let's live with this.
}
@Disabled("type missing, char[] -> String")
@Test
public void test_charArray() throws Exception {
}
@Test
public void test_shortArray_withType() throws Exception {
short[] data = new short[]{37, 39, 12};
ObjectOutput [MASK] = serialization.serialize(url, byteArrayOutputStream);
[MASK] .writeObject(data);
[MASK] .flushBuffer();
ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(
byteArrayOutputStream.toByteArray());
ObjectInput deserialize = serialization.deserialize(url, byteArrayInputStream);
assertArrayEquals(data, (short[]) deserialize.readObject(short[].class));
try {
deserialize.readObject(short[].class);
fail();
} catch (ArrayIndexOutOfBoundsException e) {
}
// NOTE: Hessian2 throws ArrayIndexOutOfBoundsException instead of IOException, let's live with this.
}
@Test
public void test_intArray_withType() throws Exception {
int[] data = new int[]{234, 0, -1};
ObjectOutput [MASK] = serialization.serialize(url, byteArrayOutputStream);
[MASK] .writeObject(data);
[MASK] .flushBuffer();
ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(
byteArrayOutputStream.toByteArray());
ObjectInput deserialize = serialization.deserialize(url, byteArrayInputStream);
assertArrayEquals(data, (int[]) deserialize.readObject());
try {
deserialize.readObject(int[].class);
fail();
} catch (ArrayIndexOutOfBoundsException e) {
}
// NOTE: Hessian2 throws ArrayIndexOutOfBoundsException instead of IOException, let's live with this.
}
@Test
public void test_longArray_withType() throws Exception {
long[] data = new long[]{234, 0, -1};
ObjectOutput [MASK] = serialization.serialize(url, byteArrayOutputStream);
[MASK] .writeObject(data);
[MASK] .flushBuffer();
ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(
byteArrayOutputStream.toByteArray());
ObjectInput deserialize = serialization.deserialize(url, byteArrayInputStream);
assertArrayEquals(data, (long[]) deserialize.readObject());
try {
deserialize.readObject(long[].class);
fail();
} catch (ArrayIndexOutOfBoundsException e) {
}
// NOTE: Hessian2 throws ArrayIndexOutOfBoundsException instead of IOException, let's live with this.
}
@Test
public void test_floatArray_withType() throws Exception {
float[] data = new float[]{37F, -3.14F, 123456.7F};
ObjectOutput [MASK] = serialization.serialize(url, byteArrayOutputStream);
[MASK] .writeObject(data);
[MASK] .flushBuffer();
ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(
byteArrayOutputStream.toByteArray());
ObjectInput deserialize = serialization.deserialize(url, byteArrayInputStream);
assertArrayEquals(data, (float[]) deserialize.readObject(), 0.0001F);
try {
deserialize.readObject(float[].class);
fail();
} catch (ArrayIndexOutOfBoundsException e) {
}
// NOTE: Hessian2 throws ArrayIndexOutOfBoundsException instead of IOException, let's live with this.
}
@Test
public void test_doubleArray_withType() throws Exception {
double[] data = new double[]{37D, -3.14D, 123456.7D};
ObjectOutput [MASK] = serialization.serialize(url, byteArrayOutputStream);
[MASK] .writeObject(data);
[MASK] .flushBuffer();
ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(
byteArrayOutputStream.toByteArray());
ObjectInput deserialize = serialization.deserialize(url, byteArrayInputStream);
assertArrayEquals(data, (double[]) deserialize.readObject(double[].class), 0.0001);
try {
deserialize.readObject(double[].class);
fail();
} catch (ArrayIndexOutOfBoundsException e) {
}
// NOTE: Hessian2 throws ArrayIndexOutOfBoundsException instead of IOException, let's live with this.
}
@Test
public void test_StringArray_withType() throws Exception {
String[] data = new String[]{"1", "b"};
ObjectOutput [MASK] = serialization.serialize(url, byteArrayOutputStream);
[MASK] .writeObject(data);
[MASK] .flushBuffer();
ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(
byteArrayOutputStream.toByteArray());
ObjectInput deserialize = serialization.deserialize(url, byteArrayInputStream);
assertArrayEquals(data, deserialize.readObject(String[].class));
try {
deserialize.readObject(String[].class);
fail();
} catch (ArrayIndexOutOfBoundsException e) {
}
// NOTE: Hessian2 throws ArrayIndexOutOfBoundsException instead of IOException, let's live with this.
}
@Disabled("type missing, Byte -> Integer")
@Test
public void test_ByteWrap() throws Exception {
}
// FIXME
@Disabled("Bad Stream read other type data")
@Test
public void test_MediaContent_badStream() throws Exception {
}
} | objectOutput | java |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.action.search;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.TransportVersion;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.search.TransportSearchAction.SearchTimeProvider;
import org.elasticsearch.action.support.SubscribableListener;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.util.Maps;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.core.Releasable;
import org.elasticsearch.core.Releasables;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.SearchContextMissingException;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.builder.PointInTimeBuilder;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchContextId;
import org.elasticsearch.search.internal.ShardSearchRequest;
import org.elasticsearch.tasks.TaskCancelledException;
import org.elasticsearch.transport.Transport;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.Executor;
import java.util.concurrent.Semaphore;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.BiFunction;
import java.util.function.Consumer;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import static org.elasticsearch.core.Strings.format;
/**
* This is an abstract base class that encapsulates the logic to fan out to all shards in provided {@link List<SearchShardIterator>}
* and collect the results. If a shard request returns a failure this class handles the advance to the next replica of the shard until
* the shards replica iterator is exhausted. Each shard is referenced by position in the {@link List<SearchShardIterator>} which is later
* referred to as the {@code shardIndex}.
* The fan out and collect algorithm is traditionally used as the initial phase which can either be a query execution or collection of
* distributed frequencies
*/
abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> extends SearchPhase {
private static final float DEFAULT_INDEX_BOOST = 1.0f;
private final Logger logger;
private final NamedWriteableRegistry namedWriteableRegistry;
private final SearchTransportService searchTransportService;
private final Executor executor;
private final ActionListener<SearchResponse> listener;
private final SearchRequest request;
/**
* Used by subclasses to resolve node ids to DiscoveryNodes.
**/
private final BiFunction<String, String, Transport.Connection> nodeIdToConnection;
private final SearchTask task;
protected final SearchPhaseResults<Result> results;
private final long clusterStateVersion;
private final TransportVersion minTransportVersion;
private final Map<String, AliasFilter> aliasFilter;
private final Map<String, Float> concreteIndexBoosts;
private final SetOnce<AtomicArray<ShardSearchFailure>> shardFailures = new SetOnce<>();
private final Object shardFailuresMutex = new Object();
private final AtomicBoolean hasShardResponse = new AtomicBoolean(false);
private final AtomicInteger successfulOps = new AtomicInteger();
private final SearchTimeProvider timeProvider;
private final SearchResponse.Clusters clusters;
protected final List<SearchShardIterator> toSkipShardsIts;
protected final List<SearchShardIterator> shardsIts;
private final SearchShardIterator[] [MASK] erators;
private final AtomicInteger outstandingShards;
private final int maxConcurrentRequestsPerNode;
private final Map<String, PendingExecutions> pendingExecutionsPerNode = new ConcurrentHashMap<>();
private final boolean throttleConcurrentRequests;
private final AtomicBoolean requestCancelled = new AtomicBoolean();
// protected for tests
protected final List<Releasable> releasables = new ArrayList<>();
AbstractSearchAsyncAction(
String name,
Logger logger,
NamedWriteableRegistry namedWriteableRegistry,
SearchTransportService searchTransportService,
BiFunction<String, String, Transport.Connection> nodeIdToConnection,
Map<String, AliasFilter> aliasFilter,
Map<String, Float> concreteIndexBoosts,
Executor executor,
SearchRequest request,
ActionListener<SearchResponse> listener,
List<SearchShardIterator> shardsIts,
SearchTimeProvider timeProvider,
ClusterState clusterState,
SearchTask task,
SearchPhaseResults<Result> resultConsumer,
int maxConcurrentRequestsPerNode,
SearchResponse.Clusters clusters
) {
super(name);
this.namedWriteableRegistry = namedWriteableRegistry;
final List<SearchShardIterator> toSkipIterators = new ArrayList<>();
final List<SearchShardIterator> iterators = new ArrayList<>();
for (final SearchShardIterator iterator : shardsIts) {
if (iterator.skip()) {
toSkipIterators.add(iterator);
} else {
iterators.add(iterator);
}
}
this.toSkipShardsIts = toSkipIterators;
this.shardsIts = iterators;
outstandingShards = new AtomicInteger(shardsIts.size());
this. [MASK] erators = iterators.toArray(new SearchShardIterator[0]);
// we later compute the shard index based on the natural order of the shards
// that participate in the search request. This means that this number is
// consistent between two requests that target the same shards.
Arrays.sort( [MASK] erators);
this.maxConcurrentRequestsPerNode = maxConcurrentRequestsPerNode;
// in the case were we have less shards than maxConcurrentRequestsPerNode we don't need to throttle
this.throttleConcurrentRequests = maxConcurrentRequestsPerNode < shardsIts.size();
this.timeProvider = timeProvider;
this.logger = logger;
this.searchTransportService = searchTransportService;
this.executor = executor;
this.request = request;
this.task = task;
this.listener = ActionListener.runAfter(listener, () -> Releasables.close(releasables));
this.nodeIdToConnection = nodeIdToConnection;
this.concreteIndexBoosts = concreteIndexBoosts;
this.clusterStateVersion = clusterState.version();
this.minTransportVersion = clusterState.getMinTransportVersion();
this.aliasFilter = aliasFilter;
this.results = resultConsumer;
// register the release of the query consumer to free up the circuit breaker memory
// at the end of the search
addReleasable(resultConsumer);
this.clusters = clusters;
}
protected void notifyListShards(
SearchProgressListener progressListener,
SearchResponse.Clusters clusters,
SearchSourceBuilder sourceBuilder
) {
progressListener.notifyListShards(
SearchProgressListener.buildSearchShardsFromIter(this.shardsIts),
SearchProgressListener.buildSearchShardsFromIter(toSkipShardsIts),
clusters,
sourceBuilder == null || sourceBuilder.size() > 0,
timeProvider
);
}
/**
* Registers a {@link Releasable} that will be closed when the search request finishes or fails.
*/
public void addReleasable(Releasable releasable) {
releasables.add(releasable);
}
/**
* Builds how long it took to execute the search.
*/
long buildTookInMillis() {
return timeProvider.buildTookInMillis();
}
/**
* This is the main entry point for a search. This method starts the search execution of the initial phase.
*/
public final void start() {
if (getNumShards() == 0) {
// no search shards to search on, bail with empty response
// (it happens with search across _all with no indices around and consistent with broadcast operations)
int trackTotalHitsUpTo = request.source() == null ? SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO
: request.source().trackTotalHitsUpTo() == null ? SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO
: request.source().trackTotalHitsUpTo();
// total hits is null in the response if the tracking of total hits is disabled
boolean withTotalHits = trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_DISABLED;
sendSearchResponse(
withTotalHits ? SearchResponseSections.EMPTY_WITH_TOTAL_HITS : SearchResponseSections.EMPTY_WITHOUT_TOTAL_HITS,
new AtomicArray<>(0)
);
return;
}
executePhase(this);
}
@Override
protected final void run() {
for (final SearchShardIterator iterator : toSkipShardsIts) {
assert iterator.skip();
skipShard(iterator);
}
final Map<SearchShardIterator, Integer> shardIndexMap = Maps.newHashMapWithExpectedSize( [MASK] erators.length);
for (int i = 0; i < [MASK] erators.length; i++) {
shardIndexMap.put( [MASK] erators[i], i);
}
if (shardsIts.size() > 0) {
doCheckNoMissingShards(getName(), request, shardsIts);
for (int i = 0; i < shardsIts.size(); i++) {
final SearchShardIterator shardRoutings = shardsIts.get(i);
assert shardRoutings.skip() == false;
assert shardIndexMap.containsKey(shardRoutings);
int shardIndex = shardIndexMap.get(shardRoutings);
final SearchShardTarget routing = shardRoutings.nextOrNull();
if (routing == null) {
failOnUnavailable(shardIndex, shardRoutings);
} else {
performPhaseOnShard(shardIndex, shardRoutings, routing);
}
}
}
}
void skipShard(SearchShardIterator iterator) {
successfulOps.incrementAndGet();
assert iterator.skip();
successfulShardExecution();
}
private void performPhaseOnShard(final int shardIndex, final SearchShardIterator [MASK] , final SearchShardTarget shard) {
if (throttleConcurrentRequests) {
var pendingExecutions = pendingExecutionsPerNode.computeIfAbsent(
shard.getNodeId(),
n -> new PendingExecutions(maxConcurrentRequestsPerNode)
);
pendingExecutions.submit(l -> doPerformPhaseOnShard(shardIndex, [MASK] , shard, l));
} else {
doPerformPhaseOnShard(shardIndex, [MASK] , shard, () -> {});
}
}
private void doPerformPhaseOnShard(int shardIndex, SearchShardIterator [MASK] , SearchShardTarget shard, Releasable releasable) {
var shardListener = new SearchActionListener<Result>(shard, shardIndex) {
@Override
public void innerOnResponse(Result result) {
try {
releasable.close();
onShardResult(result);
} catch (Exception exc) {
onShardFailure(shardIndex, shard, [MASK] , exc);
}
}
@Override
public void onFailure(Exception e) {
releasable.close();
onShardFailure(shardIndex, shard, [MASK] , e);
}
};
final Transport.Connection connection;
try {
connection = getConnection(shard.getClusterAlias(), shard.getNodeId());
} catch (Exception e) {
shardListener.onFailure(e);
return;
}
executePhaseOnShard( [MASK] , connection, shardListener);
}
private void failOnUnavailable(int shardIndex, SearchShardIterator [MASK] ) {
SearchShardTarget unassignedShard = new SearchShardTarget(null, [MASK] .shardId(), [MASK] .getClusterAlias());
onShardFailure(shardIndex, unassignedShard, [MASK] , new NoShardAvailableActionException( [MASK] .shardId()));
}
/**
* Sends the request to the actual shard.
* @param [MASK] the shards iterator
* @param connection to node that the shard is located on
* @param listener the listener to notify on response
*/
protected abstract void executePhaseOnShard(
SearchShardIterator [MASK] ,
Transport.Connection connection,
SearchActionListener<Result> listener
);
/**
* Processes the phase transition from on phase to another. This method handles all errors that happen during the initial run execution
* of the next phase. If there are no successful operations in the context when this method is executed the search is aborted and
* a response is returned to the user indicating that all shards have failed.
*/
protected void executeNextPhase(String currentPhase, Supplier<SearchPhase> nextPhaseSupplier) {
/* This is the main search phase transition where we move to the next phase. If all shards
* failed or if there was a failure and partial results are not allowed, then we immediately
* fail. Otherwise we continue to the next phase.
*/
ShardOperationFailedException[] shardSearchFailures = buildShardFailures();
if (shardSearchFailures.length == getNumShards()) {
shardSearchFailures = ExceptionsHelper.groupBy(shardSearchFailures);
Throwable cause = shardSearchFailures.length == 0
? null
: ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0];
logger.debug(() -> "All shards failed for phase: [" + currentPhase + "]", cause);
onPhaseFailure(currentPhase, "all shards failed", cause);
} else {
Boolean allowPartialResults = request.allowPartialSearchResults();
assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults";
if (allowPartialResults == false && successfulOps.get() != getNumShards()) {
// check if there are actual failures in the atomic array since
// successful retries can reset the failures to null
if (shardSearchFailures.length > 0) {
if (logger.isDebugEnabled()) {
int numShardFailures = shardSearchFailures.length;
shardSearchFailures = ExceptionsHelper.groupBy(shardSearchFailures);
Throwable cause = ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0];
logger.debug(() -> format("%s shards failed for phase: [%s]", numShardFailures, currentPhase), cause);
}
onPhaseFailure(currentPhase, "Partial shards failure", null);
} else {
int discrepancy = getNumShards() - successfulOps.get();
assert discrepancy > 0 : "discrepancy: " + discrepancy;
if (logger.isDebugEnabled()) {
logger.debug(
"Partial shards failure (unavailable: {}, successful: {}, skipped: {}, num-shards: {}, phase: {})",
discrepancy,
successfulOps.get(),
toSkipShardsIts.size(),
getNumShards(),
currentPhase
);
}
onPhaseFailure(currentPhase, "Partial shards failure (" + discrepancy + " shards unavailable)", null);
}
return;
}
var nextPhase = nextPhaseSupplier.get();
if (logger.isTraceEnabled()) {
final String resultsFrom = results.getSuccessfulResults()
.map(r -> r.getSearchShardTarget().toString())
.collect(Collectors.joining(","));
logger.trace(
"[{}] Moving to next phase: [{}], based on results from: {} (cluster state version: {})",
currentPhase,
nextPhase.getName(),
resultsFrom,
clusterStateVersion
);
}
executePhase(nextPhase);
}
}
private void executePhase(SearchPhase phase) {
try {
phase.run();
} catch (RuntimeException e) {
if (logger.isDebugEnabled()) {
logger.debug(() -> format("Failed to execute [%s] while moving to [%s] phase", request, phase.getName()), e);
}
onPhaseFailure(phase.getName(), "", e);
}
}
private ShardSearchFailure[] buildShardFailures() {
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures.get();
if (shardFailures == null) {
return ShardSearchFailure.EMPTY_ARRAY;
}
List<ShardSearchFailure> entries = shardFailures.asList();
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
for (int i = 0; i < failures.length; i++) {
failures[i] = entries.get(i);
}
return failures;
}
private void onShardFailure(final int shardIndex, SearchShardTarget shard, final SearchShardIterator [MASK] , Exception e) {
// we always add the shard failure for a specific shard instance
// we do make sure to clean it on a successful response from a shard
onShardFailure(shardIndex, shard, e);
final SearchShardTarget nextShard = [MASK] .nextOrNull();
final boolean lastShard = nextShard == null;
logger.debug(() -> format("%s: Failed to execute [%s] lastShard [%s]", shard, request, lastShard), e);
if (lastShard) {
if (request.allowPartialSearchResults() == false) {
if (requestCancelled.compareAndSet(false, true)) {
try {
searchTransportService.cancelSearchTask(task, "partial results are not allowed and at least one shard has failed");
} catch (Exception cancelFailure) {
logger.debug("Failed to cancel search request", cancelFailure);
}
}
}
onShardGroupFailure(shardIndex, shard, e);
}
if (lastShard == false) {
performPhaseOnShard(shardIndex, [MASK] , nextShard);
} else {
// count down outstanding shards, we're done with this shard as there's no more copies to try
final int outstanding = outstandingShards.decrementAndGet();
assert outstanding >= 0 : "outstanding: " + outstanding;
if (outstanding == 0) {
onPhaseDone();
}
}
}
/**
* Executed once for every {@link ShardId} that failed on all available shard routing.
*
* @param shardIndex the shard index that failed
* @param shardTarget the last shard target for this failure
* @param exc the last failure reason
*/
protected void onShardGroupFailure(int shardIndex, SearchShardTarget shardTarget, Exception exc) {}
/**
* Executed once for every failed shard level request. This method is invoked before the next replica is tried for the given
* shard target.
* @param shardIndex the internal index for this shard. Each shard has an index / ordinal assigned that is used to reference
* it's results
* @param shardTarget the shard target for this failure
* @param e the failure reason
*/
void onShardFailure(final int shardIndex, SearchShardTarget shardTarget, Exception e) {
if (TransportActions.isShardNotAvailableException(e)) {
// Groups shard not available exceptions under a generic exception that returns a SERVICE_UNAVAILABLE(503)
// temporary error.
e = NoShardAvailableActionException.forOnShardFailureWrapper(e.getMessage());
}
// we don't aggregate shard on failures due to the internal cancellation,
// but do keep the header counts right
if ((requestCancelled.get() && isTaskCancelledException(e)) == false) {
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures.get();
// lazily create shard failures, so we can early build the empty shard failure list in most cases (no failures)
if (shardFailures == null) { // this is double checked locking but it's fine since SetOnce uses a volatile read internally
synchronized (shardFailuresMutex) {
shardFailures = this.shardFailures.get(); // read again otherwise somebody else has created it?
if (shardFailures == null) { // still null so we are the first and create a new instance
shardFailures = new AtomicArray<>(getNumShards());
this.shardFailures.set(shardFailures);
}
}
}
ShardSearchFailure failure = shardFailures.get(shardIndex);
if (failure == null) {
shardFailures.set(shardIndex, new ShardSearchFailure(e, shardTarget));
} else {
// the failure is already present, try and not override it with an exception that is less meaningless
// for example, getting illegal shard state
if (TransportActions.isReadOverrideException(e) && (e instanceof SearchContextMissingException == false)) {
shardFailures.set(shardIndex, new ShardSearchFailure(e, shardTarget));
}
}
if (results.hasResult(shardIndex)) {
assert failure == null : "shard failed before but shouldn't: " + failure;
successfulOps.decrementAndGet(); // if this shard was successful before (initial phase) we have to adjust the counter
}
}
}
private static boolean isTaskCancelledException(Exception e) {
return ExceptionsHelper.unwrapCausesAndSuppressed(e, ex -> ex instanceof TaskCancelledException).isPresent();
}
/**
* Executed once for every successful shard level request.
* @param result the result returned form the shard
*/
protected void onShardResult(Result result) {
assert result.getShardIndex() != -1 : "shard index is not set";
assert result.getSearchShardTarget() != null : "search shard target must not be null";
hasShardResponse.set(true);
if (logger.isTraceEnabled()) {
logger.trace("got first-phase result from {}", result != null ? result.getSearchShardTarget() : null);
}
results.consumeResult(result, () -> onShardResultConsumed(result));
}
private void onShardResultConsumed(Result result) {
successfulOps.incrementAndGet();
// clean a previous error on this shard group (note, this code will be serialized on the same shardIndex value level
// so its ok concurrency wise to miss potentially the shard failures being created because of another failure
// in the #addShardFailure, because by definition, it will happen on *another* shardIndex
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures.get();
if (shardFailures != null) {
shardFailures.set(result.getShardIndex(), null);
}
// we need to increment successful ops first before we compare the exit condition otherwise if we
// are fast we could concurrently update totalOps but then preempt one of the threads which can
// cause the successor to read a wrong value from successfulOps if second phase is very fast ie. count etc.
// increment all the "future" shards to update the total ops since we some may work and some may not...
// and when that happens, we break on total ops, so we must maintain them
successfulShardExecution();
}
private void successfulShardExecution() {
final int outstanding = outstandingShards.decrementAndGet();
assert outstanding >= 0 : "outstanding: " + outstanding;
if (outstanding == 0) {
onPhaseDone();
}
}
/**
* Returns the total number of shards to the current search across all indices
*/
public final int getNumShards() {
return results.getNumShards();
}
/**
* Returns a logger for this context to prevent each individual phase to create their own logger.
*/
public final Logger getLogger() {
return logger;
}
/**
* Returns the currently executing search task
*/
public final SearchTask getTask() {
return task;
}
/**
* Returns the currently executing search request
*/
public final SearchRequest getRequest() {
return request;
}
/**
* Returns the targeted {@link OriginalIndices} for the provided {@code shardIndex}.
*/
public OriginalIndices getOriginalIndices(int shardIndex) {
return [MASK] erators[shardIndex].getOriginalIndices();
}
/**
* Checks if the given context id is part of the point in time of this search (if exists).
* We should not release search contexts that belong to the point in time during or after searches.
*/
public boolean isPartOfPointInTime(ShardSearchContextId contextId) {
final PointInTimeBuilder pointInTimeBuilder = request.pointInTimeBuilder();
if (pointInTimeBuilder != null) {
return request.pointInTimeBuilder().getSearchContextId(namedWriteableRegistry).contains(contextId);
} else {
return false;
}
}
private SearchResponse buildSearchResponse(
SearchResponseSections internalSearchResponse,
ShardSearchFailure[] failures,
String scrollId,
BytesReference searchContextId
) {
int numSuccess = successfulOps.get();
int numFailures = failures.length;
assert numSuccess + numFailures == getNumShards()
: "numSuccess(" + numSuccess + ") + numFailures(" + numFailures + ") != totalShards(" + getNumShards() + ")";
return new SearchResponse(
internalSearchResponse,
scrollId,
getNumShards(),
numSuccess,
toSkipShardsIts.size(),
buildTookInMillis(),
failures,
clusters,
searchContextId
);
}
boolean buildPointInTimeFromSearchResults() {
return false;
}
/**
* Builds and sends the final search response back to the user.
*
* @param internalSearchResponse the internal search response
* @param queryResults the results of the query phase
*/
public void sendSearchResponse(SearchResponseSections internalSearchResponse, AtomicArray<SearchPhaseResult> queryResults) {
ShardSearchFailure[] failures = buildShardFailures();
Boolean allowPartialResults = request.allowPartialSearchResults();
assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults";
if (allowPartialResults == false && failures.length > 0) {
raisePhaseFailure(new SearchPhaseExecutionException("", "Shard failures", null, failures));
} else {
final String scrollId = request.scroll() != null ? TransportSearchHelper.buildScrollId(queryResults) : null;
final BytesReference searchContextId;
if (buildPointInTimeFromSearchResults()) {
searchContextId = SearchContextId.encode(queryResults.asList(), aliasFilter, minTransportVersion, failures);
} else {
if (request.source() != null
&& request.source().pointInTimeBuilder() != null
&& request.source().pointInTimeBuilder().singleSession() == false) {
searchContextId = request.source().pointInTimeBuilder().getEncodedId();
} else {
searchContextId = null;
}
}
ActionListener.respondAndRelease(listener, buildSearchResponse(internalSearchResponse, failures, scrollId, searchContextId));
}
}
/**
* This method will communicate a fatal phase failure back to the user. In contrast to a shard failure
* will this method immediately fail the search request and return the failure to the issuer of the request
* @param phase the phase that failed
* @param msg an optional message
* @param cause the cause of the phase failure
*/
public void onPhaseFailure(String phase, String msg, Throwable cause) {
raisePhaseFailure(new SearchPhaseExecutionException(phase, msg, cause, buildShardFailures()));
}
/**
* This method should be called if a search phase failed to ensure all relevant reader contexts are released.
* This method will also notify the listener and sends back a failure to the user.
*
* @param exception the exception explaining or causing the phase failure
*/
private void raisePhaseFailure(SearchPhaseExecutionException exception) {
results.getSuccessfulResults().forEach((entry) -> {
// Do not release search contexts that are part of the point in time
if (entry.getContextId() != null && isPartOfPointInTime(entry.getContextId()) == false) {
try {
SearchShardTarget searchShardTarget = entry.getSearchShardTarget();
Transport.Connection connection = getConnection(searchShardTarget.getClusterAlias(), searchShardTarget.getNodeId());
sendReleaseSearchContext(entry.getContextId(), connection);
} catch (Exception inner) {
inner.addSuppressed(exception);
logger.trace("failed to release context", inner);
}
}
});
listener.onFailure(exception);
}
/**
* Releases a search context with the given context ID on the node the given connection is connected to.
* @see org.elasticsearch.search.query.QuerySearchResult#getContextId()
* @see org.elasticsearch.search.fetch.FetchSearchResult#getContextId()
*
*/
void sendReleaseSearchContext(ShardSearchContextId contextId, Transport.Connection connection) {
assert isPartOfPointInTime(contextId) == false : "Must not release point in time context [" + contextId + "]";
if (connection != null) {
searchTransportService.sendFreeContext(connection, contextId, ActionListener.noop());
}
}
/**
* Executed once all shard results have been received and processed
* @see #onShardFailure(int, SearchShardTarget, Exception)
* @see #onShardResult(SearchPhaseResult)
*/
private void onPhaseDone() { // as a tribute to @kimchy aka. finishHim()
executeNextPhase(getName(), this::getNextPhase);
}
/**
* Returns a connection to the node if connected otherwise and {@link org.elasticsearch.transport.ConnectTransportException} will be
* thrown.
*/
public final Transport.Connection getConnection(String clusterAlias, String nodeId) {
return nodeIdToConnection.apply(clusterAlias, nodeId);
}
/**
* Returns the {@link SearchTransportService} to send shard request to other nodes
*/
public SearchTransportService getSearchTransport() {
return searchTransportService;
}
public final void execute(Runnable command) {
executor.execute(command);
}
/**
* Builds an request for the initial search phase.
*
* @param [MASK] the target {@link SearchShardIterator}
* @param shardIndex the index of the shard that is used in the coordinator node to
* tiebreak results with identical sort values
*/
protected final ShardSearchRequest buildShardSearchRequest(SearchShardIterator [MASK] , int shardIndex) {
AliasFilter filter = aliasFilter.get( [MASK] .shardId().getIndex().getUUID());
assert filter != null;
float indexBoost = concreteIndexBoosts.getOrDefault( [MASK] .shardId().getIndex().getUUID(), DEFAULT_INDEX_BOOST);
ShardSearchRequest shardRequest = new ShardSearchRequest(
[MASK] .getOriginalIndices(),
request,
[MASK] .shardId(),
shardIndex,
getNumShards(),
filter,
indexBoost,
timeProvider.absoluteStartMillis(),
[MASK] .getClusterAlias(),
[MASK] .getSearchContextId(),
[MASK] .getSearchContextKeepAlive()
);
// if we already received a search result we can inform the shard that it
// can return a null response if the request rewrites to match none rather
// than creating an empty response in the search thread pool.
// Note that, we have to disable this shortcut for queries that create a context (scroll and search context).
shardRequest.canReturnNullResponseIfMatchNoDocs(hasShardResponse.get() && shardRequest.scroll() == null);
return shardRequest;
}
/**
* Returns the next phase based on the results of the initial search phase
*/
protected abstract SearchPhase getNextPhase();
private static final class PendingExecutions {
private final Semaphore semaphore;
private final ConcurrentLinkedQueue<Consumer<Releasable>> queue = new ConcurrentLinkedQueue<>();
PendingExecutions(int permits) {
assert permits > 0 : "not enough permits: " + permits;
semaphore = new Semaphore(permits);
}
void submit(Consumer<Releasable> task) {
if (semaphore.tryAcquire()) {
executeAndRelease(task);
} else {
queue.add(task);
if (semaphore.tryAcquire()) {
task = pollNextTaskOrReleasePermit();
if (task != null) {
executeAndRelease(task);
}
}
}
}
private void executeAndRelease(Consumer<Releasable> task) {
do {
final SubscribableListener<Void> onDone = new SubscribableListener<>();
task.accept(() -> onDone.onResponse(null));
if (onDone.isDone()) {
// keep going on the current thread, no need to fork
task = pollNextTaskOrReleasePermit();
} else {
onDone.addListener(new ActionListener<>() {
@Override
public void onResponse(Void unused) {
final Consumer<Releasable> nextTask = pollNextTaskOrReleasePermit();
if (nextTask != null) {
executeAndRelease(nextTask);
}
}
@Override
public void onFailure(Exception e) {
assert false : e;
}
});
return;
}
} while (task != null);
}
private Consumer<Releasable> pollNextTaskOrReleasePermit() {
var task = queue.poll();
if (task == null) {
semaphore.release();
while (queue.peek() != null && semaphore.tryAcquire()) {
task = queue.poll();
if (task == null) {
semaphore.release();
} else {
return task;
}
}
}
return task;
}
}
}
| shardIt | java |
/*
* Copyright 2013 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty5.util;
import org.junit.jupiter.api.Test;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
public class HashedWheelTimerTest {
@Test
public void testScheduleTimeoutShouldNotRunBeforeDelay() throws InterruptedException {
final Timer [MASK] = new HashedWheelTimer();
final CountDownLatch barrier = new CountDownLatch(1);
final Timeout timeout = [MASK] .newTimeout(timeout1 -> {
fail("This should not have run");
barrier.countDown();
}, 10, TimeUnit.SECONDS);
assertFalse(barrier.await(3, TimeUnit.SECONDS));
assertFalse(timeout.isExpired(), " [MASK] should not expire");
[MASK] .stop();
}
@Test
public void testScheduleTimeoutShouldRunAfterDelay() throws InterruptedException {
final Timer [MASK] = new HashedWheelTimer();
final CountDownLatch barrier = new CountDownLatch(1);
final Timeout timeout = [MASK] .newTimeout(timeout1 -> barrier.countDown(), 2, TimeUnit.SECONDS);
assertTrue(barrier.await(3, TimeUnit.SECONDS));
assertTrue(timeout.isExpired(), " [MASK] should expire");
[MASK] .stop();
}
@Test
@org.junit.jupiter.api.Timeout(value = 3000, unit = TimeUnit.MILLISECONDS)
public void testStopTimer() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(3);
final Timer [MASK] Processed = new HashedWheelTimer();
for (int i = 0; i < 3; i ++) {
[MASK] Processed.newTimeout(timeout -> latch.countDown(), 1, TimeUnit.MILLISECONDS);
}
latch.await();
assertEquals(0, [MASK] Processed.stop().size(), "Number of unprocessed timeouts should be 0");
final Timer [MASK] Unprocessed = new HashedWheelTimer();
for (int i = 0; i < 5; i ++) {
[MASK] Unprocessed.newTimeout(timeout -> {
}, 5, TimeUnit.SECONDS);
}
Thread.sleep(1000L); // sleep for a second
assertFalse( [MASK] Unprocessed.stop().isEmpty(), "Number of unprocessed timeouts should be greater than 0");
}
@Test
@org.junit.jupiter.api.Timeout(value = 3000, unit = TimeUnit.MILLISECONDS)
public void testTimerShouldThrowExceptionAfterShutdownForNewTimeouts() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(3);
final Timer [MASK] = new HashedWheelTimer();
for (int i = 0; i < 3; i ++) {
[MASK] .newTimeout(timeout -> latch.countDown(), 1, TimeUnit.MILLISECONDS);
}
latch.await();
[MASK] .stop();
try {
[MASK] .newTimeout(createNoOpTimerTask(), 1, TimeUnit.MILLISECONDS);
fail("Expected exception didn't occur.");
} catch (IllegalStateException ignored) {
// expected
}
}
@Test
@org.junit.jupiter.api.Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testTimerOverflowWheelLength() throws InterruptedException {
final HashedWheelTimer [MASK] = new HashedWheelTimer(
Executors.defaultThreadFactory(), 100, TimeUnit.MILLISECONDS, 32);
final CountDownLatch latch = new CountDownLatch(3);
[MASK] .newTimeout(new TimerTask() {
@Override
public void run(final Timeout timeout) throws Exception {
[MASK] .newTimeout(this, 100, TimeUnit.MILLISECONDS);
latch.countDown();
}
}, 100, TimeUnit.MILLISECONDS);
latch.await();
assertFalse( [MASK] .stop().isEmpty());
}
@Test
public void testExecutionOnTime() throws InterruptedException {
int tickDuration = 200;
int timeout = 125;
int maxTimeout = 2 * (tickDuration + timeout);
final HashedWheelTimer [MASK] = new HashedWheelTimer(tickDuration, TimeUnit.MILLISECONDS);
final BlockingQueue<Long> queue = new LinkedBlockingQueue<>();
int scheduledTasks = 100000;
for (int i = 0; i < scheduledTasks; i++) {
final long start = System.nanoTime();
[MASK] .newTimeout(timeout1 -> queue.add(
TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start)), timeout, TimeUnit.MILLISECONDS);
}
for (int i = 0; i < scheduledTasks; i++) {
long delay = queue.take();
assertTrue(delay >= timeout && delay < maxTimeout,
"Timeout + " + scheduledTasks + " delay " + delay + " must be " + timeout + " < " + maxTimeout);
}
[MASK] .stop();
}
@Test
public void testExecutionOnTaskExecutor() throws InterruptedException {
int timeout = 10;
final CountDownLatch latch = new CountDownLatch(1);
final CountDownLatch timeoutLatch = new CountDownLatch(1);
Executor executor = new Executor() {
@Override
public void execute(Runnable command) {
try {
command.run();
} finally {
latch.countDown();
}
}
};
final HashedWheelTimer [MASK] = new HashedWheelTimer(Executors.defaultThreadFactory(), 100,
TimeUnit.MILLISECONDS, 32, true, 2, executor);
[MASK] .newTimeout(new TimerTask() {
@Override
public void run(final Timeout timeout) throws Exception {
timeoutLatch.countDown();
}
}, timeout, TimeUnit.MILLISECONDS);
latch.await();
timeoutLatch.await();
[MASK] .stop();
}
@Test
public void testRejectedExecutionExceptionWhenTooManyTimeoutsAreAddedBackToBack() {
HashedWheelTimer [MASK] = new HashedWheelTimer(Executors.defaultThreadFactory(), 100,
TimeUnit.MILLISECONDS, 32, true, 2);
[MASK] .newTimeout(createNoOpTimerTask(), 5, TimeUnit.SECONDS);
[MASK] .newTimeout(createNoOpTimerTask(), 5, TimeUnit.SECONDS);
try {
[MASK] .newTimeout(createNoOpTimerTask(), 1, TimeUnit.MILLISECONDS);
fail("Timer allowed adding 3 timeouts when maxPendingTimeouts was 2");
} catch (RejectedExecutionException e) {
// Expected
} finally {
[MASK] .stop();
}
}
@Test
public void testNewTimeoutShouldStopThrowingRejectedExecutionExceptionWhenExistingTimeoutIsCancelled()
throws InterruptedException {
final int tickDurationMs = 100;
final HashedWheelTimer [MASK] = new HashedWheelTimer(Executors.defaultThreadFactory(), tickDurationMs,
TimeUnit.MILLISECONDS, 32, true, 2);
[MASK] .newTimeout(createNoOpTimerTask(), 5, TimeUnit.SECONDS);
Timeout timeoutToCancel = [MASK] .newTimeout(createNoOpTimerTask(), 5, TimeUnit.SECONDS);
assertTrue(timeoutToCancel.cancel());
Thread.sleep(tickDurationMs * 5);
final CountDownLatch secondLatch = new CountDownLatch(1);
[MASK] .newTimeout(createCountDownLatchTimerTask(secondLatch), 90, TimeUnit.MILLISECONDS);
secondLatch.await();
[MASK] .stop();
}
@Test
@org.junit.jupiter.api.Timeout(value = 3000, unit = TimeUnit.MILLISECONDS)
public void testNewTimeoutShouldStopThrowingRejectedExecutionExceptionWhenExistingTimeoutIsExecuted()
throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final HashedWheelTimer [MASK] = new HashedWheelTimer(Executors.defaultThreadFactory(), 25,
TimeUnit.MILLISECONDS, 4, true, 2);
[MASK] .newTimeout(createNoOpTimerTask(), 5, TimeUnit.SECONDS);
[MASK] .newTimeout(createCountDownLatchTimerTask(latch), 90, TimeUnit.MILLISECONDS);
latch.await();
final CountDownLatch secondLatch = new CountDownLatch(1);
[MASK] .newTimeout(createCountDownLatchTimerTask(secondLatch), 90, TimeUnit.MILLISECONDS);
secondLatch.await();
[MASK] .stop();
}
@Test()
public void reportPendingTimeouts() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
final HashedWheelTimer [MASK] = new HashedWheelTimer();
final Timeout t1 = [MASK] .newTimeout(createNoOpTimerTask(), 100, TimeUnit.MINUTES);
final Timeout t2 = [MASK] .newTimeout(createNoOpTimerTask(), 100, TimeUnit.MINUTES);
[MASK] .newTimeout(createCountDownLatchTimerTask(latch), 90, TimeUnit.MILLISECONDS);
assertEquals(3, [MASK] .pendingTimeouts());
t1.cancel();
t2.cancel();
latch.await();
assertEquals(0, [MASK] .pendingTimeouts());
[MASK] .stop();
}
@Test
public void testOverflow() throws InterruptedException {
final HashedWheelTimer [MASK] = new HashedWheelTimer();
final CountDownLatch latch = new CountDownLatch(1);
Timeout timeout = [MASK] .newTimeout(timeout1 -> latch.countDown(), Long.MAX_VALUE, TimeUnit.MILLISECONDS);
assertFalse(latch.await(1, TimeUnit.SECONDS));
timeout.cancel();
[MASK] .stop();
}
@Test
@org.junit.jupiter.api.Timeout(value = 3000, unit = TimeUnit.MILLISECONDS)
public void testStopTimerCancelsPendingTasks() throws InterruptedException {
final Timer [MASK] Unprocessed = new HashedWheelTimer();
for (int i = 0; i < 5; i ++) {
[MASK] Unprocessed.newTimeout(new TimerTask() {
@Override
public void run(Timeout timeout) throws Exception {
}
}, 5, TimeUnit.SECONDS);
}
Thread.sleep(1000L); // sleep for a second
for (Timeout timeout : [MASK] Unprocessed.stop()) {
assertTrue(timeout.isCancelled(), "All unprocessed tasks should be canceled");
}
}
private static TimerTask createNoOpTimerTask() {
return timeout -> {
};
}
private static TimerTask createCountDownLatchTimerTask(final CountDownLatch latch) {
return timeout -> latch.countDown();
}
}
| timer | java |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
package org.elasticsearch.xpack.autoscaling.capacity.nodeinfo;
import org.elasticsearch.Build;
import org.elasticsearch.TransportVersion;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.ActionType;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodeRole;
import org.elasticsearch.cluster.node.DiscoveryNodeUtils;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.cluster.version.CompatibilityVersions;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.Processors;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.monitor.os.OsInfo;
import org.elasticsearch.monitor.os.OsStats;
import org.elasticsearch.test.client.NoOpClient;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.autoscaling.AutoscalingMetadata;
import org.elasticsearch.xpack.autoscaling.AutoscalingTestCase;
import org.elasticsearch.xpack.autoscaling.policy.AutoscalingPolicy;
import org.elasticsearch.xpack.autoscaling.policy.AutoscalingPolicyMetadata;
import org.hamcrest.Matchers;
import org.junit.After;
import org.junit.Before;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.RejectedExecutionException;
import java.util.function.BiConsumer;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import static org.elasticsearch.test.hamcrest.OptionalMatchers.isEmpty;
import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresent;
import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresentWith;
import static org.elasticsearch.xpack.autoscaling.capacity.nodeinfo.AutoscalingNodeInfoService.FETCH_TIMEOUT;
import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class AutoscalingNodesInfoServiceTests extends AutoscalingTestCase {
private TestThreadPool threadPool;
private NodeStatsClient client;
private AutoscalingNodeInfoService service;
private TimeValue fetchTimeout;
private AutoscalingMetadata autoscalingMetadata;
private Metadata metadata;
@Before
@Override
public void setUp() throws Exception {
super.setUp();
threadPool = createThreadPool();
client = new NodeStatsClient(threadPool);
final ClusterService clusterService = mock(ClusterService.class);
Settings settings;
if (randomBoolean()) {
fetchTimeout = TimeValue.timeValueSeconds(15);
settings = Settings.EMPTY;
} else {
fetchTimeout = TimeValue.timeValueMillis(randomLongBetween(1, 10000));
settings = Settings.builder().put(FETCH_TIMEOUT.getKey(), fetchTimeout).build();
}
when(clusterService.getSettings()).thenReturn(settings);
Set<Setting<?>> settingsSet = Sets.union(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS, Set.of(FETCH_TIMEOUT));
ClusterSettings clusterSettings = new ClusterSettings(settings, settingsSet);
when(clusterService.getClusterSettings()).thenReturn(clusterSettings);
service = new AutoscalingNodeInfoService(clusterService, client);
autoscalingMetadata = randomAutoscalingMetadataOfPolicyCount(between(1, 8));
metadata = Metadata.builder().putCustom(AutoscalingMetadata.NAME, autoscalingMetadata).build();
}
@After
@Override
public void tearDown() throws Exception {
threadPool.close();
super.tearDown();
}
public void testAddRemoveNode() {
if (randomBoolean()) {
service.onClusterChanged(new ClusterChangedEvent("test", ClusterState.EMPTY_STATE, ClusterState.EMPTY_STATE));
}
ClusterState previousState = ClusterState.EMPTY_STATE;
Set<DiscoveryNode> previousNodes = new HashSet<>();
Set<DiscoveryNode> previousSucceededNodes = new HashSet<>();
for (int i = 0; i < 5; ++i) {
Set<DiscoveryNode> newNodes = IntStream.range(0, between(1, 10))
.mapToObj(n -> newNode("test_" + n))
.collect(Collectors.toSet());
Set<DiscoveryNode> nodes = Sets.union(newNodes, new HashSet<>(randomSubsetOf(previousNodes)));
ClusterState [MASK] = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.nodes(discoveryNodesBuilder(nodes, true))
.build();
Set<DiscoveryNode> missingNodes = Sets.difference(nodes, previousSucceededNodes);
Set<DiscoveryNode> failingNodes = new HashSet<>(randomSubsetOf(missingNodes));
Set<DiscoveryNode> succeedingNodes = Sets.difference(missingNodes, failingNodes);
List<FailedNodeException> failures = failingNodes.stream()
.map(node -> new FailedNodeException(node.getId(), randomAlphaOfLength(10), new Exception()))
.collect(Collectors.toList());
NodesStatsResponse response = new NodesStatsResponse(
ClusterName.DEFAULT,
succeedingNodes.stream()
.map(n -> statsForNode(n, randomLongBetween(0, Long.MAX_VALUE / 1000)))
.collect(Collectors.toList()),
failures
);
NodesInfoResponse responseInfo = new NodesInfoResponse(
ClusterName.DEFAULT,
succeedingNodes.stream().map(n -> infoForNode(n, randomIntBetween(1, 64))).collect(Collectors.toList()),
List.of()
);
client.respondStats(response, () -> {
Sets.union(missingNodes, Sets.difference(previousNodes, nodes))
.forEach(n -> assertThat(service.snapshot().get(n), isEmpty()));
Sets.intersection(previousSucceededNodes, nodes).forEach(n -> assertThat(service.snapshot().get(n), isPresent()));
});
client.respondInfo(responseInfo, () -> {
});
service.onClusterChanged(new ClusterChangedEvent("test", [MASK] , previousState));
client.assertNoResponder();
assertMatchesResponse(succeedingNodes, response, responseInfo);
failingNodes.forEach(n -> assertThat(service.snapshot().get(n), isEmpty()));
previousNodes.clear();
previousNodes.addAll(nodes);
previousSucceededNodes.retainAll(nodes);
previousSucceededNodes.addAll(succeedingNodes);
previousState = [MASK] ;
}
}
public void testNotMaster() {
Set<DiscoveryNode> nodes = IntStream.range(0, between(1, 10)).mapToObj(n -> newNode("test_" + n)).collect(Collectors.toSet());
DiscoveryNodes.Builder nodesBuilder = discoveryNodesBuilder(nodes, false);
ClusterState [MASK] = ClusterState.builder(ClusterName.DEFAULT).nodes(nodesBuilder).metadata(metadata).build();
// client throws if called.
service.onClusterChanged(new ClusterChangedEvent("test", [MASK] , ClusterState.EMPTY_STATE));
nodes.forEach(n -> assertThat(service.snapshot().get(n), isEmpty()));
}
public void testNoLongerMaster() {
Set<DiscoveryNode> nodes = IntStream.range(0, between(1, 10)).mapToObj(n -> newNode("test_" + n)).collect(Collectors.toSet());
ClusterState masterState = ClusterState.builder(ClusterName.DEFAULT)
.nodes(discoveryNodesBuilder(nodes, true))
.metadata(metadata)
.build();
NodesStatsResponse response = new NodesStatsResponse(
ClusterName.DEFAULT,
nodes.stream().map(n -> statsForNode(n, randomLongBetween(0, Long.MAX_VALUE / 1000))).collect(Collectors.toList()),
List.of()
);
NodesInfoResponse responseInfo = new NodesInfoResponse(
ClusterName.DEFAULT,
nodes.stream().map(n -> infoForNode(n, randomIntBetween(1, 64))).collect(Collectors.toList()),
List.of()
);
client.respondStats(response, () -> {});
client.respondInfo(responseInfo, () -> {});
service.onClusterChanged(new ClusterChangedEvent("test", masterState, ClusterState.EMPTY_STATE));
client.assertNoResponder();
assertMatchesResponse(nodes, response, responseInfo);
ClusterState notMasterState = ClusterState.builder(masterState).nodes(masterState.nodes().withMasterNodeId(null)).build();
// client throws if called.
service.onClusterChanged(new ClusterChangedEvent("test", notMasterState, masterState));
nodes.forEach(n -> assertThat(service.snapshot().get(n), isEmpty()));
}
public void testStatsFails() {
Set<DiscoveryNode> nodes = IntStream.range(0, between(1, 10)).mapToObj(n -> newNode("test_" + n)).collect(Collectors.toSet());
ClusterState [MASK] = ClusterState.builder(ClusterName.DEFAULT).nodes(discoveryNodesBuilder(nodes, true)).metadata(metadata).build();
client.respondStats((r, listener) -> listener.onFailure(randomFrom(new IllegalStateException(), new RejectedExecutionException())));
service.onClusterChanged(new ClusterChangedEvent("test", [MASK] , ClusterState.EMPTY_STATE));
nodes.forEach(n -> assertThat(service.snapshot().get(n), isEmpty()));
NodesStatsResponse response = new NodesStatsResponse(
ClusterName.DEFAULT,
nodes.stream().map(n -> statsForNode(n, randomLongBetween(0, Long.MAX_VALUE / 1000))).collect(Collectors.toList()),
List.of()
);
NodesInfoResponse responseInfo = new NodesInfoResponse(
ClusterName.DEFAULT,
nodes.stream().map(n -> infoForNode(n, randomIntBetween(1, 64))).collect(Collectors.toList()),
List.of()
);
// implicit retry on cluster [MASK] update.
client.respondStats(response, () -> {});
client.respondInfo(responseInfo, () -> {});
service.onClusterChanged(new ClusterChangedEvent("test", [MASK] , [MASK] ));
client.assertNoResponder();
}
public void testInfoFails() {
Set<DiscoveryNode> nodes = IntStream.range(0, between(1, 10)).mapToObj(n -> newNode("test_" + n)).collect(Collectors.toSet());
ClusterState [MASK] = ClusterState.builder(ClusterName.DEFAULT).nodes(discoveryNodesBuilder(nodes, true)).metadata(metadata).build();
NodesStatsResponse response = new NodesStatsResponse(
ClusterName.DEFAULT,
nodes.stream().map(n -> statsForNode(n, randomLongBetween(0, Long.MAX_VALUE / 1000))).collect(Collectors.toList()),
List.of()
);
client.respondStats(response, () -> {});
client.respondInfo((r, listener) -> listener.onFailure(randomFrom(new IllegalStateException(), new RejectedExecutionException())));
service.onClusterChanged(new ClusterChangedEvent("test", [MASK] , ClusterState.EMPTY_STATE));
nodes.forEach(n -> assertThat(service.snapshot().get(n), isEmpty()));
NodesInfoResponse responseInfo = new NodesInfoResponse(
ClusterName.DEFAULT,
nodes.stream().map(n -> infoForNode(n, randomIntBetween(1, 64))).collect(Collectors.toList()),
List.of()
);
// implicit retry on cluster [MASK] update.
client.respondStats(response, () -> {});
client.respondInfo(responseInfo, () -> {});
service.onClusterChanged(new ClusterChangedEvent("test", [MASK] , [MASK] ));
client.assertNoResponder();
}
public void testRestartNode() {
Set<DiscoveryNode> nodes = IntStream.range(0, between(1, 10)).mapToObj(n -> newNode("test_" + n)).collect(Collectors.toSet());
ClusterState [MASK] = ClusterState.builder(ClusterName.DEFAULT).nodes(discoveryNodesBuilder(nodes, true)).metadata(metadata).build();
NodesStatsResponse response = new NodesStatsResponse(
ClusterName.DEFAULT,
nodes.stream().map(n -> statsForNode(n, randomLongBetween(0, Long.MAX_VALUE / 1000))).collect(Collectors.toList()),
List.of()
);
NodesInfoResponse responseInfo = new NodesInfoResponse(
ClusterName.DEFAULT,
nodes.stream().map(n -> infoForNode(n, randomIntBetween(1, 64))).collect(Collectors.toList()),
List.of()
);
client.respondStats(response, () -> {});
client.respondInfo(responseInfo, () -> {});
service.onClusterChanged(new ClusterChangedEvent("test", [MASK] , ClusterState.EMPTY_STATE));
client.assertNoResponder();
assertMatchesResponse(nodes, response, responseInfo);
Set<DiscoveryNode> restartedNodes = randomValueOtherThan(
nodes,
() -> nodes.stream().map(n -> randomBoolean() ? restartNode(n) : n).collect(Collectors.toSet())
);
ClusterState restartedState = ClusterState.builder( [MASK] ).nodes(discoveryNodesBuilder(restartedNodes, true)).build();
NodesStatsResponse restartedStatsResponse = new NodesStatsResponse(
ClusterName.DEFAULT,
Sets.difference(restartedNodes, nodes)
.stream()
.map(n -> statsForNode(n, randomLongBetween(0, Long.MAX_VALUE / 1000)))
.collect(Collectors.toList()),
List.of()
);
NodesInfoResponse restartedInfoResponse = new NodesInfoResponse(
ClusterName.DEFAULT,
Sets.difference(restartedNodes, nodes).stream().map(n -> infoForNode(n, randomIntBetween(1, 64))).collect(Collectors.toList()),
List.of()
);
client.respondStats(restartedStatsResponse, () -> {});
client.respondInfo(restartedInfoResponse, () -> {});
service.onClusterChanged(new ClusterChangedEvent("test", restartedState, [MASK] ));
client.assertNoResponder();
assertMatchesResponse(Sets.intersection(restartedNodes, nodes), response, responseInfo);
assertMatchesResponse(Sets.difference(restartedNodes, nodes), restartedStatsResponse, restartedInfoResponse);
Sets.difference(nodes, restartedNodes).forEach(n -> assertThat(service.snapshot().get(n), isEmpty()));
}
public void testConcurrentStateUpdate() throws Exception {
Set<DiscoveryNode> nodes = IntStream.range(0, between(1, 10)).mapToObj(n -> newNode("test_" + n)).collect(Collectors.toSet());
ClusterState [MASK] = ClusterState.builder(ClusterName.DEFAULT).nodes(discoveryNodesBuilder(nodes, true)).metadata(metadata).build();
NodesStatsResponse response = new NodesStatsResponse(
ClusterName.DEFAULT,
nodes.stream().map(n -> statsForNode(n, randomLongBetween(0, Long.MAX_VALUE / 1000))).collect(Collectors.toList()),
List.of()
);
NodesInfoResponse nodesInfoResponse = new NodesInfoResponse(
ClusterName.DEFAULT,
nodes.stream().map(n -> infoForNode(n, randomIntBetween(1, 64))).collect(Collectors.toList()),
List.of()
);
List<Thread> threads = new ArrayList<>();
client.respondStats((request, listener) -> {
CountDownLatch latch = new CountDownLatch(1);
threads.add(startThread(() -> {
safeAwait(latch);
listener.onResponse(response);
}));
threads.add(startThread(() -> {
// we do not register a new responder, so this will fail if it calls anything on client.
service.onClusterChanged(new ClusterChangedEvent("test_concurrent", [MASK] , [MASK] ));
latch.countDown();
}));
});
client.respondInfo((r, l) -> l.onResponse(nodesInfoResponse));
service.onClusterChanged(new ClusterChangedEvent("test", [MASK] , ClusterState.EMPTY_STATE));
for (Thread thread : threads) {
thread.join(10000);
}
client.assertNoResponder();
threads.forEach(t -> assertThat(t.isAlive(), is(false)));
}
public void testRelevantNodes() {
Set<DiscoveryNode> nodes = IntStream.range(0, between(1, 10)).mapToObj(n -> newNode("test_" + n)).collect(Collectors.toSet());
ClusterState [MASK] = ClusterState.builder(ClusterName.DEFAULT).nodes(discoveryNodesBuilder(nodes, true)).metadata(metadata).build();
Set<DiscoveryNode> relevantNodes = service.relevantNodes( [MASK] );
assertThat(relevantNodes, equalTo(nodes));
}
private DiscoveryNodes.Builder discoveryNodesBuilder(Set<DiscoveryNode> nodes, boolean master) {
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
final String localNodeId = nodes.isEmpty() ? null : randomFrom(nodes).getId();
nodesBuilder.localNodeId(localNodeId);
nodesBuilder.masterNodeId(master ? localNodeId : null);
nodes.forEach(nodesBuilder::add);
addIrrelevantNodes(nodesBuilder);
return nodesBuilder;
}
/**
* Add irrelevant nodes. NodeStatsClient will validate that they are not asked for.
*/
private void addIrrelevantNodes(DiscoveryNodes.Builder nodesBuilder) {
Set<Set<String>> relevantRoleSets = autoscalingMetadata.policies()
.values()
.stream()
.map(AutoscalingPolicyMetadata::policy)
.map(AutoscalingPolicy::roles)
.collect(Collectors.toSet());
IntStream.range(0, 5).mapToObj(i -> newNode("irrelevant_" + i, randomIrrelevantRoles(relevantRoleSets))).forEach(nodesBuilder::add);
}
private Set<DiscoveryNodeRole> randomIrrelevantRoles(Set<Set<String>> relevantRoleSets) {
return randomValueOtherThanMany(relevantRoleSets::contains, AutoscalingTestCase::randomRoles).stream()
.map(DiscoveryNodeRole::getRoleFromRoleName)
.collect(Collectors.toSet());
}
public void assertMatchesResponse(Set<DiscoveryNode> nodes, NodesStatsResponse response, NodesInfoResponse infoResponse) {
nodes.forEach(n -> {
assertThat(
service.snapshot().get(n),
isPresentWith(
new AutoscalingNodeInfo(
response.getNodesMap().get(n.getId()).getOs().getMem().getAdjustedTotal().getBytes(),
Processors.of(infoResponse.getNodesMap().get(n.getId()).getInfo(OsInfo.class).getFractionalAllocatedProcessors())
)
)
);
});
}
private Thread startThread(Runnable runnable) {
Thread thread = new Thread(runnable);
thread.start();
return thread;
}
private static NodeStats statsForNode(DiscoveryNode node, long memory) {
OsStats osStats = new OsStats(
randomNonNegativeLong(),
new OsStats.Cpu(randomShort(), null),
new OsStats.Mem(memory, randomLongBetween(0, memory), randomLongBetween(0, memory)),
new OsStats.Swap(randomNonNegativeLong(), randomNonNegativeLong()),
null
);
return new NodeStats(
node,
randomNonNegativeLong(),
null,
osStats,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null
);
}
private static org.elasticsearch.action.admin.cluster.node.info.NodeInfo infoForNode(DiscoveryNode node, int processors) {
OsInfo osInfo = new OsInfo(randomLong(), processors, Processors.of((double) processors), null, null, null, null);
return new org.elasticsearch.action.admin.cluster.node.info.NodeInfo(
Build.current().version(),
new CompatibilityVersions(TransportVersion.current(), Map.of()),
IndexVersion.current(),
Map.of(),
Build.current(),
node,
null,
osInfo,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null
);
}
private class NodeStatsClient extends NoOpClient {
private BiConsumer<NodesStatsRequest, ActionListener<NodesStatsResponse>> responderStats;
private BiConsumer<NodesInfoRequest, ActionListener<NodesInfoResponse>> responderInfo;
private NodeStatsClient(ThreadPool threadPool) {
super(threadPool);
}
public void respondInfo(NodesInfoResponse response, Runnable whileFetching) {
respondInfo((request, listener) -> {
assertThat(
Set.of(request.nodesIds()),
Matchers.equalTo(
Stream.concat(
response.getNodesMap().keySet().stream(),
response.failures().stream().map(FailedNodeException::nodeId)
).collect(Collectors.toSet())
)
);
whileFetching.run();
listener.onResponse(response);
});
}
public void respondStats(NodesStatsResponse response, Runnable whileFetching) {
respondStats((request, listener) -> {
assertThat(
Set.of(request.nodesIds()),
Matchers.equalTo(
Stream.concat(
response.getNodesMap().keySet().stream(),
response.failures().stream().map(FailedNodeException::nodeId)
).collect(Collectors.toSet())
)
);
whileFetching.run();
listener.onResponse(response);
});
}
public void respondStats(BiConsumer<NodesStatsRequest, ActionListener<NodesStatsResponse>> responderValue) {
assertThat(responderValue, notNullValue());
this.responderStats = responderValue;
}
public void respondInfo(BiConsumer<NodesInfoRequest, ActionListener<NodesInfoResponse>> responderValue) {
assertThat(responderValue, notNullValue());
this.responderInfo = responderValue;
}
@Override
protected <Request extends ActionRequest, Response extends ActionResponse> void doExecute(
ActionType<Response> action,
Request request,
ActionListener<Response> listener
) {
assertThat(
action,
anyOf(Matchers.sameInstance(TransportNodesStatsAction.TYPE), Matchers.sameInstance(TransportNodesInfoAction.TYPE))
);
if (action == TransportNodesStatsAction.TYPE) {
NodesStatsRequest nodesStatsRequest = (NodesStatsRequest) request;
assertThat(nodesStatsRequest.timeout(), equalTo(fetchTimeout));
assertThat(responderStats, notNullValue());
BiConsumer<NodesStatsRequest, ActionListener<NodesStatsResponse>> responderValue = this.responderStats;
this.responderStats = null;
@SuppressWarnings("unchecked")
ActionListener<NodesStatsResponse> statsListener = (ActionListener<NodesStatsResponse>) listener;
responderValue.accept(nodesStatsRequest, statsListener);
} else {
NodesInfoRequest nodesInfoRequest = (NodesInfoRequest) request;
assertThat(nodesInfoRequest.timeout(), equalTo(fetchTimeout));
assertThat(responderInfo, notNullValue());
BiConsumer<NodesInfoRequest, ActionListener<NodesInfoResponse>> responderValue = this.responderInfo;
this.responderInfo = null;
@SuppressWarnings("unchecked")
ActionListener<NodesInfoResponse> infoListener = (ActionListener<NodesInfoResponse>) listener;
responderValue.accept(nodesInfoRequest, infoListener);
}
}
public void assertNoResponder() {
assertThat(responderInfo, nullValue());
assertThat(responderStats, nullValue());
}
}
private DiscoveryNode newNode(String nodeName) {
return newNode(
nodeName,
randomFrom(autoscalingMetadata.policies().values()).policy()
.roles()
.stream()
.map(DiscoveryNodeRole::getRoleFromRoleName)
.collect(Collectors.toSet())
);
}
private DiscoveryNode newNode(String nodeName, Set<DiscoveryNodeRole> roles) {
return DiscoveryNodeUtils.builder(UUIDs.randomBase64UUID()).name(nodeName).roles(roles).build();
}
private DiscoveryNode restartNode(DiscoveryNode node) {
return DiscoveryNodeUtils.builder(node.getId())
.name(node.getName())
.address(node.getAddress())
.attributes(node.getAttributes())
.roles(node.getRoles())
.version(node.getVersionInformation())
.build();
}
}
| state | java |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.remoting.exchange.codec;
import org.apache.dubbo.common.Version;
import org.apache.dubbo.common.config.ConfigurationUtils;
import org.apache.dubbo.common.io.Bytes;
import org.apache.dubbo.common.io.StreamUtils;
import org.apache.dubbo.common.logger.Logger;
import org.apache.dubbo.common.logger.LoggerFactory;
import org.apache.dubbo.common.serialize.Cleanable;
import org.apache.dubbo.common.serialize.ObjectInput;
import org.apache.dubbo.common.serialize.ObjectOutput;
import org.apache.dubbo.common.serialize.Serialization;
import org.apache.dubbo.common.utils.StringUtils;
import org.apache.dubbo.remoting.Channel;
import org.apache.dubbo.remoting.RemotingException;
import org.apache.dubbo.remoting. [MASK] .ChannelBuffer;
import org.apache.dubbo.remoting. [MASK] .ChannelBufferInputStream;
import org.apache.dubbo.remoting. [MASK] .ChannelBufferOutputStream;
import org.apache.dubbo.remoting.exchange.Request;
import org.apache.dubbo.remoting.exchange.Response;
import org.apache.dubbo.remoting.exchange.support.DefaultFuture;
import org.apache.dubbo.remoting.telnet.codec.TelnetCodec;
import org.apache.dubbo.remoting.transport.CodecSupport;
import org.apache.dubbo.remoting.transport.ExceedPayloadLimitException;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.text.SimpleDateFormat;
import java.util.Date;
/**
* ExchangeCodec.
*/
public class ExchangeCodec extends TelnetCodec {
// header length.
protected static final int HEADER_LENGTH = 16;
// magic header.
protected static final short MAGIC = (short) 0xdabb;
protected static final byte MAGIC_HIGH = Bytes.short2bytes(MAGIC)[0];
protected static final byte MAGIC_LOW = Bytes.short2bytes(MAGIC)[1];
// message flag.
protected static final byte FLAG_REQUEST = (byte) 0x80;
protected static final byte FLAG_TWOWAY = (byte) 0x40;
protected static final byte FLAG_EVENT = (byte) 0x20;
protected static final int SERIALIZATION_MASK = 0x1f;
private static final Logger logger = LoggerFactory.getLogger(ExchangeCodec.class);
public Short getMagicCode() {
return MAGIC;
}
@Override
public void encode(Channel channel, ChannelBuffer [MASK] , Object msg) throws IOException {
if (msg instanceof Request) {
encodeRequest(channel, [MASK] , (Request) msg);
} else if (msg instanceof Response) {
encodeResponse(channel, [MASK] , (Response) msg);
} else {
super.encode(channel, [MASK] , msg);
}
}
@Override
public Object decode(Channel channel, ChannelBuffer [MASK] ) throws IOException {
int readable = [MASK] .readableBytes();
byte[] header = new byte[Math.min(readable, HEADER_LENGTH)];
[MASK] .readBytes(header);
return decode(channel, [MASK] , readable, header);
}
@Override
protected Object decode(Channel channel, ChannelBuffer [MASK] , int readable, byte[] header) throws IOException {
// check magic number.
if (readable > 0 && header[0] != MAGIC_HIGH
|| readable > 1 && header[1] != MAGIC_LOW) {
int length = header.length;
if (header.length < readable) {
header = Bytes.copyOf(header, readable);
[MASK] .readBytes(header, length, readable - length);
}
for (int i = 1; i < header.length - 1; i++) {
if (header[i] == MAGIC_HIGH && header[i + 1] == MAGIC_LOW) {
[MASK] .readerIndex( [MASK] .readerIndex() - header.length + i);
header = Bytes.copyOf(header, i);
break;
}
}
return super.decode(channel, [MASK] , readable, header);
}
// check length.
if (readable < HEADER_LENGTH) {
return DecodeResult.NEED_MORE_INPUT;
}
// get data length.
int len = Bytes.bytes2int(header, 12);
// When receiving response, how to exceed the length, then directly construct a response to the client.
// see more detail from https://github.com/apache/dubbo/issues/7021.
Object obj = finishRespWhenOverPayload(channel, len, header);
if (null != obj) {
return obj;
}
checkPayload(channel, len);
int tt = len + HEADER_LENGTH;
if (readable < tt) {
return DecodeResult.NEED_MORE_INPUT;
}
// limit input stream.
ChannelBufferInputStream is = new ChannelBufferInputStream( [MASK] , len);
try {
return decodeBody(channel, is, header);
} finally {
if (is.available() > 0) {
try {
if (logger.isWarnEnabled()) {
logger.warn("Skip input stream " + is.available());
}
StreamUtils.skipUnusedStream(is);
} catch (IOException e) {
logger.warn(e.getMessage(), e);
}
}
}
}
protected Object decodeBody(Channel channel, InputStream is, byte[] header) throws IOException {
byte flag = header[2], proto = (byte) (flag & SERIALIZATION_MASK);
// get request id.
long id = Bytes.bytes2long(header, 4);
if ((flag & FLAG_REQUEST) == 0) {
// decode response.
Response res = new Response(id);
if ((flag & FLAG_EVENT) != 0) {
res.setEvent(true);
}
// get status.
byte status = header[3];
res.setStatus(status);
try {
if (status == Response.OK) {
Object data;
if (res.isEvent()) {
byte[] eventPayload = CodecSupport.getPayload(is);
if (CodecSupport.isHeartBeat(eventPayload, proto)) {
// heart beat response data is always null;
data = null;
} else {
data = decodeEventData(channel, CodecSupport.deserialize(channel.getUrl(), new ByteArrayInputStream(eventPayload), proto), eventPayload);
}
} else {
data = decodeResponseData(channel, CodecSupport.deserialize(channel.getUrl(), is, proto), getRequestData(channel, res, id));
}
res.setResult(data);
} else {
res.setErrorMessage(CodecSupport.deserialize(channel.getUrl(), is, proto).readUTF());
}
} catch (Throwable t) {
res.setStatus(Response.CLIENT_ERROR);
res.setErrorMessage(StringUtils.toString(t));
}
return res;
} else {
// decode request.
Request req = new Request(id);
req.setVersion(Version.getProtocolVersion());
req.setTwoWay((flag & FLAG_TWOWAY) != 0);
if ((flag & FLAG_EVENT) != 0) {
req.setEvent(true);
}
try {
Object data;
if (req.isEvent()) {
byte[] eventPayload = CodecSupport.getPayload(is);
if (CodecSupport.isHeartBeat(eventPayload, proto)) {
// heart beat response data is always null;
data = null;
} else {
data = decodeEventData(channel, CodecSupport.deserialize(channel.getUrl(), new ByteArrayInputStream(eventPayload), proto), eventPayload);
}
} else {
data = decodeRequestData(channel, CodecSupport.deserialize(channel.getUrl(), is, proto));
}
req.setData(data);
} catch (Throwable t) {
// bad request
req.setBroken(true);
req.setData(t);
}
return req;
}
}
protected Object getRequestData(Channel channel, Response response, long id) {
DefaultFuture future = DefaultFuture.getFuture(id);
if (future != null) {
Request req = future.getRequest();
if (req != null) {
return req.getData();
}
}
logger.warn("The timeout response finally returned at "
+ (new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS").format(new Date()))
+ ", response status is " + response.getStatus() + ", response id is " + response.getId()
+ (channel == null ? "" : ", channel: " + channel.getLocalAddress()
+ " -> " + channel.getRemoteAddress()) + ", please check provider side for detailed result.");
throw new IllegalArgumentException("Failed to find any request match the response, response id: " + id);
}
protected void encodeRequest(Channel channel, ChannelBuffer [MASK] , Request req) throws IOException {
Serialization serialization = getSerialization(channel, req);
// header.
byte[] header = new byte[HEADER_LENGTH];
// set magic number.
Bytes.short2bytes(MAGIC, header);
// set request and serialization flag.
header[2] = (byte) (FLAG_REQUEST | serialization.getContentTypeId());
if (req.isTwoWay()) {
header[2] |= FLAG_TWOWAY;
}
if (req.isEvent()) {
header[2] |= FLAG_EVENT;
}
// set request id.
Bytes.long2bytes(req.getId(), header, 4);
// encode request data.
int savedWriteIndex = [MASK] .writerIndex();
[MASK] .writerIndex(savedWriteIndex + HEADER_LENGTH);
ChannelBufferOutputStream bos = new ChannelBufferOutputStream( [MASK] );
if (req.isHeartbeat()) {
// heartbeat request data is always null
bos.write(CodecSupport.getNullBytesOf(serialization));
} else {
ObjectOutput out = serialization.serialize(channel.getUrl(), bos);
if (req.isEvent()) {
encodeEventData(channel, out, req.getData());
} else {
encodeRequestData(channel, out, req.getData(), req.getVersion());
}
out.flushBuffer();
if (out instanceof Cleanable) {
((Cleanable) out).cleanup();
}
}
bos.flush();
bos.close();
int len = bos.writtenBytes();
checkPayload(channel, len);
Bytes.int2bytes(len, header, 12);
// write
[MASK] .writerIndex(savedWriteIndex);
[MASK] .writeBytes(header); // write header.
[MASK] .writerIndex(savedWriteIndex + HEADER_LENGTH + len);
}
protected void encodeResponse(Channel channel, ChannelBuffer [MASK] , Response res) throws IOException {
int savedWriteIndex = [MASK] .writerIndex();
try {
Serialization serialization = getSerialization(channel, res);
// header.
byte[] header = new byte[HEADER_LENGTH];
// set magic number.
Bytes.short2bytes(MAGIC, header);
// set request and serialization flag.
header[2] = serialization.getContentTypeId();
if (res.isHeartbeat()) {
header[2] |= FLAG_EVENT;
}
// set response status.
byte status = res.getStatus();
header[3] = status;
// set request id.
Bytes.long2bytes(res.getId(), header, 4);
[MASK] .writerIndex(savedWriteIndex + HEADER_LENGTH);
ChannelBufferOutputStream bos = new ChannelBufferOutputStream( [MASK] );
// encode response data or error message.
if (status == Response.OK) {
if(res.isHeartbeat()){
// heartbeat response data is always null
bos.write(CodecSupport.getNullBytesOf(serialization));
}else {
ObjectOutput out = serialization.serialize(channel.getUrl(), bos);
if (res.isEvent()) {
encodeEventData(channel, out, res.getResult());
} else {
encodeResponseData(channel, out, res.getResult(), res.getVersion());
}
out.flushBuffer();
if (out instanceof Cleanable) {
((Cleanable) out).cleanup();
}
}
} else {
ObjectOutput out = serialization.serialize(channel.getUrl(), bos);
out.writeUTF(res.getErrorMessage());
out.flushBuffer();
if (out instanceof Cleanable) {
((Cleanable) out).cleanup();
}
}
bos.flush();
bos.close();
int len = bos.writtenBytes();
checkPayload(channel, len);
Bytes.int2bytes(len, header, 12);
// write
[MASK] .writerIndex(savedWriteIndex);
[MASK] .writeBytes(header); // write header.
[MASK] .writerIndex(savedWriteIndex + HEADER_LENGTH + len);
} catch (Throwable t) {
// clear [MASK]
[MASK] .writerIndex(savedWriteIndex);
// send error message to Consumer, otherwise, Consumer will wait till timeout.
if (!res.isEvent() && res.getStatus() != Response.BAD_RESPONSE) {
Response r = new Response(res.getId(), res.getVersion());
r.setStatus(Response.SERIALIZATION_ERROR);
if (t instanceof ExceedPayloadLimitException) {
logger.warn(t.getMessage(), t);
try {
r.setErrorMessage(t.getMessage());
channel.send(r);
return;
} catch (RemotingException e) {
logger.warn("Failed to send bad_response info back: " + t.getMessage() + ", cause: " + e.getMessage(), e);
}
} else {
// FIXME log error message in Codec and handle in caught() of IoHanndler?
logger.warn("Fail to encode response: " + res + ", send bad_response info instead, cause: " + t.getMessage(), t);
try {
r.setErrorMessage("Failed to send response: " + res + ", cause: " + StringUtils.toString(t));
channel.send(r);
return;
} catch (RemotingException e) {
logger.warn("Failed to send bad_response info back: " + res + ", cause: " + e.getMessage(), e);
}
}
}
// Rethrow exception
if (t instanceof IOException) {
throw (IOException) t;
} else if (t instanceof RuntimeException) {
throw (RuntimeException) t;
} else if (t instanceof Error) {
throw (Error) t;
} else {
throw new RuntimeException(t.getMessage(), t);
}
}
}
@Override
protected Object decodeData(ObjectInput in) throws IOException {
return decodeRequestData(in);
}
protected Object decodeRequestData(ObjectInput in) throws IOException {
try {
return in.readObject();
} catch (ClassNotFoundException e) {
throw new IOException(StringUtils.toString("Read object failed.", e));
}
}
protected Object decodeResponseData(ObjectInput in) throws IOException {
try {
return in.readObject();
} catch (ClassNotFoundException e) {
throw new IOException(StringUtils.toString("Read object failed.", e));
}
}
@Override
protected void encodeData(ObjectOutput out, Object data) throws IOException {
encodeRequestData(out, data);
}
private void encodeEventData(ObjectOutput out, Object data) throws IOException {
out.writeEvent(data);
}
@Deprecated
protected void encodeHeartbeatData(ObjectOutput out, Object data) throws IOException {
encodeEventData(out, data);
}
protected void encodeRequestData(ObjectOutput out, Object data) throws IOException {
out.writeObject(data);
}
protected void encodeResponseData(ObjectOutput out, Object data) throws IOException {
out.writeObject(data);
}
@Override
protected Object decodeData(Channel channel, ObjectInput in) throws IOException {
return decodeRequestData(channel, in);
}
protected Object decodeEventData(Channel channel, ObjectInput in, byte[] eventBytes) throws IOException {
try {
if (eventBytes != null) {
int dataLen = eventBytes.length;
int threshold = ConfigurationUtils.getSystemConfiguration().getInt("deserialization.event.size", 15);
if (dataLen > threshold) {
throw new IllegalArgumentException("Event data too long, actual size " + dataLen + ", threshold " + threshold + " rejected for security consideration.");
}
}
return in.readEvent();
} catch (IOException | ClassNotFoundException e) {
throw new IOException(StringUtils.toString("Decode dubbo protocol event failed.", e));
}
}
protected Object decodeRequestData(Channel channel, ObjectInput in) throws IOException {
return decodeRequestData(in);
}
protected Object decodeResponseData(Channel channel, ObjectInput in) throws IOException {
return decodeResponseData(in);
}
protected Object decodeResponseData(Channel channel, ObjectInput in, Object requestData) throws IOException {
return decodeResponseData(channel, in);
}
@Override
protected void encodeData(Channel channel, ObjectOutput out, Object data) throws IOException {
encodeRequestData(channel, out, data);
}
private void encodeEventData(Channel channel, ObjectOutput out, Object data) throws IOException {
encodeEventData(out, data);
}
@Deprecated
protected void encodeHeartbeatData(Channel channel, ObjectOutput out, Object data) throws IOException {
encodeHeartbeatData(out, data);
}
protected void encodeRequestData(Channel channel, ObjectOutput out, Object data) throws IOException {
encodeRequestData(out, data);
}
protected void encodeResponseData(Channel channel, ObjectOutput out, Object data) throws IOException {
encodeResponseData(out, data);
}
protected void encodeRequestData(Channel channel, ObjectOutput out, Object data, String version) throws IOException {
encodeRequestData(out, data);
}
protected void encodeResponseData(Channel channel, ObjectOutput out, Object data, String version) throws IOException {
encodeResponseData(out, data);
}
private Object finishRespWhenOverPayload(Channel channel, long size, byte[] header) {
int payload = getPayload(channel);
boolean overPayload = isOverPayload(payload, size);
if (overPayload) {
long reqId = Bytes.bytes2long(header, 4);
byte flag = header[2];
if ((flag & FLAG_REQUEST) == 0) {
Response res = new Response(reqId);
if ((flag & FLAG_EVENT) != 0) {
res.setEvent(true);
}
res.setStatus(Response.CLIENT_ERROR);
String errorMsg = "Data length too large: " + size + ", max payload: " + payload + ", channel: " + channel;
logger.error(errorMsg);
res.setErrorMessage(errorMsg);
return res;
}
}
return null;
}
}
| buffer | java |
/*******************************************************************************
* Copyright 2011 See AUTHORS file.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.badlogic.gdx.tests;
import com.badlogic.gdx.Gdx;
import com.badlogic.gdx.graphics.Color;
import com.badlogic.gdx.graphics.OrthographicCamera;
import com.badlogic.gdx.graphics.Pixmap;
import com.badlogic.gdx.graphics.Texture;
import com.badlogic.gdx.graphics.g2d.NinePatch;
import com.badlogic.gdx.graphics.g2d.SpriteBatch;
import com.badlogic.gdx.graphics.g2d.TextureRegion;
import com.badlogic.gdx.math.Interpolation;
import com.badlogic.gdx.math.MathUtils;
import com.badlogic.gdx.tests.utils.GdxTest;
import com.badlogic.gdx.utils.Array;
import com.badlogic.gdx.utils.ScreenUtils;
public class NinePatchTest extends GdxTest {
/** A string name for the type of test, and the NinePatch being tested. */
private static class TestPatch {
public final String name;
public final NinePatch ninePatch;
TestPatch (String n) {
this.name = n;
this.ninePatch = NinePatchTest.newNinePatch();
}
TestPatch (String n, NinePatch np) {
this.name = n;
this.ninePatch = np;
}
}
private OrthographicCamera camera;
private SpriteBatch b;
private Array<TestPatch> ninePatches = new Array<TestPatch>(10);
private final long start = System.currentTimeMillis();
@Override
public void create () {
TestPatch tp;
// Create all the NinePatches to test
ninePatches.add(new TestPatch("default"));
tp = new TestPatch("20px width");
int bWidth = 20;
tp.ninePatch.setLeftWidth(bWidth);
tp.ninePatch.setRightWidth(bWidth);
tp.ninePatch.setTopHeight(bWidth);
tp.ninePatch.setBottomHeight(bWidth);
ninePatches.add(tp);
tp = new TestPatch("fat left");
tp.ninePatch.setLeftWidth(3 * tp.ninePatch.getRightWidth());
ninePatches.add(tp);
tp = new TestPatch("fat top");
tp.ninePatch.setTopHeight(3 * tp.ninePatch.getBottomHeight());
ninePatches.add(tp);
tp = new TestPatch("degenerate", newDegenerateNinePatch());
ninePatches.add(tp);
tp = new TestPatch("upper-left quad", newULQuadPatch());
ninePatches.add(tp);
tp = new TestPatch("no middle row", newMidlessPatch());
ninePatches.add(tp);
b = new SpriteBatch();
}
// Make a new 'pixmapSize' square texture region with ' [MASK] ' patches in it. Each patch is a different color.
static TextureRegion newPatchPix (int [MASK] , int pixmapSize) {
final int pixmapDim = MathUtils.nextPowerOfTwo(pixmapSize);
Pixmap p = new Pixmap(pixmapDim, pixmapDim, Pixmap.Format.RGBA8888);
p.setColor(1, 1, 1, 0);
p.fill();
for (int x = 0; x < pixmapSize; x += [MASK] ) {
for (int y = 0; y < pixmapSize; y += [MASK] ) {
p.setColor(x / (float)pixmapSize, y / (float)pixmapSize, 1.0f, 1.0f);
p.fillRectangle(x, y, [MASK] , [MASK] );
}
}
return new TextureRegion(new Texture(p), pixmapSize, pixmapSize);
}
// Make a degenerate NinePatch
static NinePatch newDegenerateNinePatch () {
final int [MASK] = 8;
final int pixmapSize = [MASK] * 3;
TextureRegion tr = newPatchPix( [MASK] , pixmapSize);
return new NinePatch(tr);
}
// Make a basic NinePatch with different colors in each of the nine patches
static NinePatch newNinePatch () {
final int [MASK] = 8;
final int pixmapSize = [MASK] * 3;
TextureRegion tr = newPatchPix( [MASK] , pixmapSize);
return new NinePatch(tr, [MASK] , [MASK] , [MASK] , [MASK] );
}
// Make a upper-left "quad" patch (only 4 patches defined in the top-left corner of the ninepatch)
static NinePatch newULQuadPatch () {
final int [MASK] = 8;
final int pixmapSize = [MASK] * 2;
TextureRegion tr = newPatchPix( [MASK] , pixmapSize);
return new NinePatch(tr, [MASK] , 0, [MASK] , 0);
}
// Make a ninepatch with no middle band, just top three and bottom three.
static NinePatch newMidlessPatch () {
final int [MASK] = 8;
final int fullPatchHeight = [MASK] * 2;
final int fullPatchWidth = [MASK] * 3;
final int pixmapDim = MathUtils.nextPowerOfTwo(Math.max(fullPatchWidth, fullPatchHeight));
Pixmap testPatch = new Pixmap(pixmapDim, pixmapDim, Pixmap.Format.RGBA8888);
testPatch.setColor(1, 1, 1, 0);
testPatch.fill();
for (int x = 0; x < fullPatchWidth; x += [MASK] ) {
for (int y = 0; y < fullPatchHeight; y += [MASK] ) {
testPatch.setColor(x / (float)fullPatchWidth, y / (float)fullPatchHeight, 1.0f, 1.0f);
testPatch.fillRectangle(x, y, [MASK] , [MASK] );
}
}
return new NinePatch(new TextureRegion(new Texture(testPatch), fullPatchWidth, fullPatchHeight), [MASK] , [MASK] ,
[MASK] , [MASK] );
}
private float timePassed = 0;
private final Color filterColor = new Color();
private final Color oldColor = new Color();
@Override
public void render () {
final int screenWidth = Gdx.graphics.getWidth();
final int screenHeight = Gdx.graphics.getHeight();
ScreenUtils.clear(0, 0, 0, 0);
timePassed += Gdx.graphics.getDeltaTime();
b.begin();
final int sz = ninePatches.size;
final int XGAP = 10;
final int pheight = (int)((screenHeight * 0.5f) / ((sz + 1) / 2));
int x = XGAP;
int y = 10;
// Test that batch color is applied to NinePatch
if (timePassed < 2) {
b.setColor(1, 1, 1, Interpolation.sine.apply(timePassed / 2f));
}
// Test that the various nine patches render
for (int i = 0; i < sz; i += 2) {
int pwidth = (int)(0.44f * screenWidth);
final NinePatch np1 = ninePatches.get(i).ninePatch;
np1.draw(b, x, y, pwidth, pheight);
if (i + 1 < sz) {
final NinePatch np2 = ninePatches.get(i + 1).ninePatch;
final int x2 = x + pwidth + XGAP;
final int pwidth2 = screenWidth - XGAP - x2;
np2.draw(b, x2, y, pwidth2, pheight);
}
y += pheight + 2;
}
// Dim a np by setting its color. Also test sending same np to batch twice
NinePatch np = ninePatches.get(0).ninePatch;
oldColor.set(np.getColor());
filterColor.set(0.3f, 0.3f, 0.3f, 1.0f);
np.setColor(filterColor);
np.draw(b, x, y, 100, 30);
np.setColor(oldColor);
b.end();
}
@Override
public void resize (int width, int height) {
float ratio = ((float)Gdx.graphics.getWidth() / (float)Gdx.graphics.getHeight());
int h = 10;
int w = (int)(h * ratio);
camera = new OrthographicCamera(w, h);
}
}
| patchSize | java |
End of preview.
No dataset card yet
- Downloads last month
- 43