index
int64 0
0
| repo_id
stringlengths 9
205
| file_path
stringlengths 31
246
| content
stringlengths 1
12.2M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestZkReconnect.java | package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.helix.HelixException;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.SystemPropertyKeys;
import org.apache.helix.TestHelper;
import org.apache.helix.ZkTestHelper;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.store.zk.ZkHelixPropertyStore;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.zookeeper.zkclient.ZkServer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestZkReconnect {
private static final Logger LOG = LoggerFactory.getLogger(TestZkReconnect.class);
@Test
public void testHelixManagerStateListenerCallback() throws Exception {
final int zkPort = TestHelper.getRandomPort();
final String zkAddr = String.format("localhost:%d", zkPort);
final ZkServer zkServer = TestHelper.startZkServer(zkAddr);
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
final String clusterName = className + "_" + methodName;
// Init onDisconnectedFlag to check if callback is triggered
final AtomicReference<Boolean> onDisconnectedFlag = new AtomicReference<>(false);
final AtomicReference<Boolean> onConnectedFlag = new AtomicReference<>(false);
// Setup cluster
LOG.info("Setup clusters");
ClusterSetup clusterSetup = new ClusterSetup(zkAddr);
clusterSetup.addCluster(clusterName, true);
// For fast test, set short timeout
System.setProperty(SystemPropertyKeys.ZK_CONNECTION_TIMEOUT, "2000");
// Registers and starts controller, register listener for disconnect handling
LOG.info("Starts controller");
final ZKHelixManager controller =
(ZKHelixManager) HelixManagerFactory.getZKHelixManager(clusterName, null, InstanceType.CONTROLLER, zkAddr,
new HelixManagerStateListener() {
@Override
public void onConnected(HelixManager helixManager) throws Exception {
Assert.assertEquals(helixManager.getClusterName(), clusterName);
onConnectedFlag.getAndSet(true);
}
@Override
public void onDisconnected(HelixManager helixManager, Throwable error) throws Exception {
Assert.assertEquals(helixManager.getClusterName(), clusterName);
onDisconnectedFlag.getAndSet(true);
}
});
Assert.assertEquals(controller.getMetadataStoreConnectionString(), zkAddr);
try {
controller.connect();
// check onConnected() is triggered
Assert.assertTrue(onConnectedFlag.getAndSet(false));
// 1. shutdown zkServer and check if handler trigger callback
zkServer.shutdown();
// Simulate a retry in ZkClient that will not succeed
ZkTestHelper.injectExpire(controller._zkclient);
Assert.assertFalse(controller._zkclient.waitUntilConnected(5000, TimeUnit.MILLISECONDS));
// While retrying, onDisconnectedFlag = false
Assert.assertFalse(onDisconnectedFlag.get());
// 2. restart zkServer and check if handler will recover connection
zkServer.start();
Assert.assertTrue(controller._zkclient
.waitUntilConnected(HelixZkClient.DEFAULT_CONNECTION_TIMEOUT, TimeUnit.MILLISECONDS));
Assert.assertTrue(controller.isConnected());
// New propertyStore should be in good state
ZkHelixPropertyStore propertyStore = controller.getHelixPropertyStore();
propertyStore.get("/", null, 0);
TestHelper.verify(new TestHelper.Verifier() {
@Override
public boolean verify() throws Exception {
return onConnectedFlag.getAndSet(false);
}
}, 1000);
// Inject expire to test handler
// onDisconnectedFlag should be set within onDisconnected handler
controller.handleSessionEstablishmentError(new Exception("For testing"));
TestHelper.verify(new TestHelper.Verifier() {
@Override
public boolean verify() throws Exception {
return onDisconnectedFlag.get();
}
}, 1000);
Assert.assertFalse(onConnectedFlag.get());
Assert.assertFalse(controller.isConnected());
// Verify ZK is down
try {
controller.getHelixPropertyStore();
} catch (HelixException e) {
// Expected exception
System.out.println(e.getMessage());
}
} finally {
controller.disconnect();
zkServer.shutdown();
System.clearProperty(SystemPropertyKeys.ZK_CONNECTION_TIMEOUT);
}
}
}
| 9,800 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestZkHelixAdmin.java | package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.BaseDataAccessor;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.PropertyKey;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.PropertyType;
import org.apache.helix.TestHelper;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.api.exceptions.HelixConflictException;
import org.apache.helix.api.status.ClusterManagementMode;
import org.apache.helix.api.status.ClusterManagementModeRequest;
import org.apache.helix.api.topology.ClusterTopology;
import org.apache.helix.cloud.constants.CloudProvider;
import org.apache.helix.constants.InstanceConstants;
import org.apache.helix.controller.rebalancer.waged.WagedRebalancer;
import org.apache.helix.examples.MasterSlaveStateModelFactory;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.model.CloudConfig;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.ClusterConstraints;
import org.apache.helix.model.ClusterConstraints.ConstraintAttribute;
import org.apache.helix.model.ClusterConstraints.ConstraintType;
import org.apache.helix.model.ConstraintItem;
import org.apache.helix.model.CustomizedStateConfig;
import org.apache.helix.model.CustomizedView;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.HelixConfigScope;
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.MasterSlaveSMD;
import org.apache.helix.model.PauseSignal;
import org.apache.helix.model.ResourceConfig;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.model.builder.ConstraintItemBuilder;
import org.apache.helix.model.builder.HelixConfigScopeBuilder;
import org.apache.helix.participant.StateMachineEngine;
import org.apache.helix.tools.StateModelConfigGenerator;
import org.apache.helix.zookeeper.api.client.RealmAwareZkClient;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.exception.ZkClientException;
import org.apache.helix.zookeeper.zkclient.NetworkUtil;
import org.apache.helix.zookeeper.zkclient.exception.ZkException;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.data.Stat;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.AssertJUnit;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestZkHelixAdmin extends ZkUnitTestBase {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
@BeforeClass
public void beforeClass() {
}
@Test()
public void testZkHelixAdmin() {
// TODO refactor this test into small test cases and use @before annotations
System.out.println("START testZkHelixAdmin at " + new Date(System.currentTimeMillis()));
final String clusterName = getShortClassName();
String rootPath = "/" + clusterName;
if (_gZkClient.exists(rootPath)) {
_gZkClient.deleteRecursively(rootPath);
}
HelixAdmin tool = new ZKHelixAdmin(_gZkClient);
tool.addCluster(clusterName, true);
Assert.assertTrue(ZKUtil.isClusterSetup(clusterName, _gZkClient));
Assert.assertTrue(_gZkClient.exists(PropertyPathBuilder.customizedStateConfig(clusterName)));
tool.addCluster(clusterName, true);
Assert.assertTrue(ZKUtil.isClusterSetup(clusterName, _gZkClient));
Assert.assertTrue(_gZkClient.exists(PropertyPathBuilder.customizedStateConfig(clusterName)));
List<String> list = tool.getClusters();
AssertJUnit.assertTrue(list.size() > 0);
try {
Stat oldstat = _gZkClient.getStat(rootPath);
Assert.assertNotNull(oldstat);
boolean success = tool.addCluster(clusterName, false);
// Even though it exists, it should return true but it should not make any changes in zk
Assert.assertTrue(success);
Stat newstat = _gZkClient.getStat(rootPath);
Assert.assertEquals(oldstat, newstat);
} catch (HelixException e) {
// OK
}
String hostname = "host1";
String port = "9999";
String instanceName = hostname + "_" + port;
InstanceConfig config = new InstanceConfig(instanceName);
config.setHostName(hostname);
config.setPort(port);
List<String> dummyList = new ArrayList<>();
dummyList.add("foo");
dummyList.add("bar");
config.getRecord().setListField("dummy", dummyList);
tool.addInstance(clusterName, config);
tool.enableInstance(clusterName, instanceName, true);
String path = PropertyPathBuilder.getPath(PropertyType.INSTANCES, clusterName, instanceName);
AssertJUnit.assertTrue(_gZkClient.exists(path));
try {
tool.addInstance(clusterName, config);
Assert.fail("should fail if add an already-existing instance");
} catch (HelixException e) {
// OK
}
config = tool.getInstanceConfig(clusterName, instanceName);
AssertJUnit.assertEquals(config.getId(), instanceName);
// test setInstanceConfig()
config = tool.getInstanceConfig(clusterName, instanceName);
config.setHostName("host2");
try {
// different host
tool.setInstanceConfig(clusterName, instanceName, config);
Assert.fail("should fail if hostname is different from the current one");
} catch (HelixException e) {
// OK
}
config = tool.getInstanceConfig(clusterName, instanceName);
config.setPort("7777");
try {
// different port
tool.setInstanceConfig(clusterName, instanceName, config);
Assert.fail("should fail if port is different from the current one");
} catch (HelixException e) {
// OK
}
Assert.assertTrue(
tool.getInstanceConfig(clusterName, instanceName).getInstanceDisabledReason().isEmpty());
String disableReason = "Reason";
tool.enableInstance(clusterName, instanceName, false,
InstanceConstants.InstanceDisabledType.CLOUD_EVENT, disableReason);
Assert.assertTrue(tool.getInstanceConfig(clusterName, instanceName).getInstanceDisabledReason()
.equals(disableReason));
tool.enableInstance(clusterName, instanceName, true,
InstanceConstants.InstanceDisabledType.CLOUD_EVENT, disableReason);
Assert.assertTrue(
tool.getInstanceConfig(clusterName, instanceName).getInstanceDisabledReason().isEmpty());
Assert.assertEquals(tool.getInstanceConfig(clusterName, instanceName).getInstanceDisabledType(),
InstanceConstants.INSTANCE_NOT_DISABLED);
dummyList.remove("bar");
dummyList.add("baz");
config = tool.getInstanceConfig(clusterName, instanceName);
config.getRecord().setListField("dummy", dummyList);
AssertJUnit.assertTrue(tool.setInstanceConfig(clusterName, "host1_9999", config));
config = tool.getInstanceConfig(clusterName, "host1_9999");
dummyList = config.getRecord().getListField("dummy");
AssertJUnit.assertTrue(dummyList.contains("foo"));
AssertJUnit.assertTrue(dummyList.contains("baz"));
AssertJUnit.assertFalse(dummyList.contains("bar"));
AssertJUnit.assertEquals(2, dummyList.size());
// test: should not drop instance when it is still alive
HelixManager manager = initializeHelixManager(clusterName, config.getInstanceName());
try {
manager.connect();
} catch (Exception e) {
Assert.fail("HelixManager failed connecting");
}
try {
tool.dropInstance(clusterName, config);
Assert.fail("should fail if an instance is still alive");
} catch (HelixException e) {
// OK
}
try {
manager.disconnect();
} catch (Exception e) {
Assert.fail("HelixManager failed disconnecting");
}
// Tests that ZkClientException thrown from ZkClient should be caught
// and it should be converted HelixException to be rethrown
String instancePath = PropertyPathBuilder.instance(clusterName, config.getInstanceName());
String instanceConfigPath = PropertyPathBuilder.instanceConfig(clusterName, instanceName);
String liveInstancePath = PropertyPathBuilder.liveInstance(clusterName, instanceName);
RealmAwareZkClient mockZkClient = Mockito.mock(RealmAwareZkClient.class);
// Mock the exists() method to let dropInstance() reach deleteRecursively().
Mockito.when(mockZkClient.exists(instanceConfigPath)).thenReturn(true);
Mockito.when(mockZkClient.exists(instancePath)).thenReturn(true);
Mockito.when(mockZkClient.exists(liveInstancePath)).thenReturn(false);
Mockito.doThrow(new ZkClientException("ZkClientException: failed to delete " + instancePath,
new ZkException("ZkException: failed to delete " + instancePath,
new KeeperException.NotEmptyException(
"NotEmptyException: directory" + instancePath + " is not empty"))))
.when(mockZkClient).deleteRecursively(instancePath);
HelixAdmin helixAdminMock = new ZKHelixAdmin(mockZkClient);
try {
helixAdminMock.dropInstance(clusterName, config);
Assert.fail("Should throw HelixException");
} catch (HelixException expected) {
// This exception is expected because it is converted from ZkClientException and rethrown.
Assert.assertEquals(expected.getMessage(),
"Failed to drop instance: " + config.getInstanceName() + ". Retry times: 3");
} catch (ZkClientException e) {
if (e.getMessage().equals("ZkClientException: failed to delete " + instancePath)) {
Assert.fail("Should not throw ZkClientException because it should be caught.");
}
}
tool.dropInstance(clusterName, config); // correctly drop the instance
try {
tool.getInstanceConfig(clusterName, "host1_9999");
Assert.fail("should fail if get a non-existent instance");
} catch (HelixException e) {
// OK
}
try {
tool.dropInstance(clusterName, config);
Assert.fail("should fail if drop on a non-existent instance");
} catch (HelixException e) {
// OK
}
try {
tool.enableInstance(clusterName, "host1_9999", false);
Assert.fail("should fail if enable a non-existent instance");
} catch (HelixException e) {
// OK
}
ZNRecord stateModelRecord = new ZNRecord("id1");
try {
tool.addStateModelDef(clusterName, "id1", new StateModelDefinition(stateModelRecord));
path = PropertyPathBuilder.stateModelDef(clusterName, "id1");
AssertJUnit.assertTrue(_gZkClient.exists(path));
Assert.fail("should fail");
} catch (HelixException | IllegalArgumentException e) {
// OK
}
tool.addStateModelDef(clusterName, "MasterSlave",
new StateModelDefinition(StateModelConfigGenerator.generateConfigForMasterSlave()));
stateModelRecord = StateModelConfigGenerator.generateConfigForMasterSlave();
tool.addStateModelDef(clusterName, stateModelRecord.getId(),
new StateModelDefinition(stateModelRecord));
list = tool.getStateModelDefs(clusterName);
AssertJUnit.assertEquals(list.size(), 1);
try {
tool.addResource(clusterName, "resource", 10, "nonexistStateModelDef");
Assert.fail("should fail if add a resource without an existing state model");
} catch (HelixException e) {
// OK
}
try {
tool.addResource(clusterName, "resource", 10, "id1");
Assert.fail("should fail");
} catch (HelixException e) {
// OK
}
list = tool.getResourcesInCluster(clusterName);
AssertJUnit.assertEquals(list.size(), 0);
try {
tool.addResource(clusterName, "resource", 10, "id1");
Assert.fail("should fail");
} catch (HelixException e) {
// OK
}
list = tool.getResourcesInCluster(clusterName);
AssertJUnit.assertEquals(list.size(), 0);
ExternalView resourceExternalView = tool.getResourceExternalView(clusterName, "resource");
AssertJUnit.assertNull(resourceExternalView);
CustomizedView resourceCustomizedView = tool.getResourceCustomizedView(clusterName,"resource"
, "customizedStateType");
AssertJUnit.assertNull(resourceCustomizedView);
// test config support
// ConfigScope scope = new ConfigScopeBuilder().forCluster(clusterName)
// .forResource("testResource").forPartition("testPartition").build();
HelixConfigScope scope = new HelixConfigScopeBuilder(ConfigScopeProperty.PARTITION)
.forCluster(clusterName).forResource("testResource").forPartition("testPartition").build();
Map<String, String> properties = new HashMap<>();
properties.put("pKey1", "pValue1");
properties.put("pKey2", "pValue2");
// make sure calling set/getConfig() many times will not drain zkClient resources
// int nbOfZkClients = ZkClient.getNumberOfConnections();
for (int i = 0; i < 100; i++) {
tool.setConfig(scope, properties);
Map<String, String> newProperties =
tool.getConfig(scope, new ArrayList<>(properties.keySet()));
Assert.assertEquals(newProperties.size(), 2);
Assert.assertEquals(newProperties.get("pKey1"), "pValue1");
Assert.assertEquals(newProperties.get("pKey2"), "pValue2");
}
deleteCluster(clusterName);
System.out.println("END testZkHelixAdmin at " + new Date(System.currentTimeMillis()));
}
private HelixManager initializeHelixManager(String clusterName, String instanceName) {
HelixManager manager = HelixManagerFactory.getZKHelixManager(clusterName, instanceName,
InstanceType.PARTICIPANT, org.apache.helix.common.ZkTestBase.ZK_ADDR);
MasterSlaveStateModelFactory stateModelFactory = new MasterSlaveStateModelFactory(instanceName);
StateMachineEngine stateMach = manager.getStateMachineEngine();
stateMach.registerStateModelFactory("id1", stateModelFactory);
return manager;
}
// drop resource should drop corresponding resource-level config also
@Test
public void testDropResource() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixAdmin tool = new ZKHelixAdmin(_gZkClient);
tool.addCluster(clusterName, true);
Assert.assertTrue(ZKUtil.isClusterSetup(clusterName, _gZkClient), "Cluster should be setup");
tool.addStateModelDef(clusterName, "MasterSlave",
new StateModelDefinition(StateModelConfigGenerator.generateConfigForMasterSlave()));
tool.addResource(clusterName, "test-db", 4, "MasterSlave");
Map<String, String> resourceConfig = new HashMap<>();
resourceConfig.put("key1", "value1");
tool.setConfig(new HelixConfigScopeBuilder(ConfigScopeProperty.RESOURCE).forCluster(clusterName)
.forResource("test-db").build(), resourceConfig);
PropertyKey.Builder keyBuilder = new PropertyKey.Builder(clusterName);
Assert.assertTrue(_gZkClient.exists(keyBuilder.idealStates("test-db").getPath()),
"test-db ideal-state should exist");
Assert.assertTrue(_gZkClient.exists(keyBuilder.resourceConfig("test-db").getPath()),
"test-db resource config should exist");
tool.dropResource(clusterName, "test-db");
Assert.assertFalse(_gZkClient.exists(keyBuilder.idealStates("test-db").getPath()),
"test-db ideal-state should be dropped");
Assert.assertFalse(_gZkClient.exists(keyBuilder.resourceConfig("test-db").getPath()),
"test-db resource config should be dropped");
tool.dropCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
// test add/remove message constraint
@Test
public void testAddRemoveMsgConstraint() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixAdmin tool = new ZKHelixAdmin(_gZkClient);
tool.addCluster(clusterName, true);
Assert.assertTrue(ZKUtil.isClusterSetup(clusterName, _gZkClient), "Cluster should be setup");
// test admin.getMessageConstraints()
ClusterConstraints constraints =
tool.getConstraints(clusterName, ConstraintType.MESSAGE_CONSTRAINT);
Assert.assertNull(constraints, "message-constraint should NOT exist for cluster: " + className);
// remove non-exist constraint
try {
tool.removeConstraint(clusterName, ConstraintType.MESSAGE_CONSTRAINT, "constraint1");
// will leave a null message-constraint znode on zk
} catch (Exception e) {
Assert.fail("Should not throw exception when remove a non-exist constraint.");
}
// add a message constraint
ConstraintItemBuilder builder = new ConstraintItemBuilder();
builder.addConstraintAttribute(ConstraintAttribute.RESOURCE.toString(), "MyDB")
.addConstraintAttribute(ConstraintAttribute.CONSTRAINT_VALUE.toString(), "1");
tool.setConstraint(clusterName, ConstraintType.MESSAGE_CONSTRAINT, "constraint1",
builder.build());
HelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
PropertyKey.Builder keyBuilder = new PropertyKey.Builder(clusterName);
constraints =
accessor.getProperty(keyBuilder.constraint(ConstraintType.MESSAGE_CONSTRAINT.toString()));
Assert.assertNotNull(constraints, "message-constraint should exist");
ConstraintItem item = constraints.getConstraintItem("constraint1");
Assert.assertNotNull(item, "message-constraint for constraint1 should exist");
Assert.assertEquals(item.getConstraintValue(), "1");
Assert.assertEquals(item.getAttributeValue(ConstraintAttribute.RESOURCE), "MyDB");
// test admin.getMessageConstraints()
constraints = tool.getConstraints(clusterName, ConstraintType.MESSAGE_CONSTRAINT);
Assert.assertNotNull(constraints, "message-constraint should exist");
item = constraints.getConstraintItem("constraint1");
Assert.assertNotNull(item, "message-constraint for constraint1 should exist");
Assert.assertEquals(item.getConstraintValue(), "1");
Assert.assertEquals(item.getAttributeValue(ConstraintAttribute.RESOURCE), "MyDB");
// remove a exist message-constraint
tool.removeConstraint(clusterName, ConstraintType.MESSAGE_CONSTRAINT, "constraint1");
constraints =
accessor.getProperty(keyBuilder.constraint(ConstraintType.MESSAGE_CONSTRAINT.toString()));
Assert.assertNotNull(constraints, "message-constraint should exist");
item = constraints.getConstraintItem("constraint1");
Assert.assertNull(item, "message-constraint for constraint1 should NOT exist");
tool.dropCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testDisableResource() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
admin.addCluster(clusterName, true);
Assert.assertTrue(ZKUtil.isClusterSetup(clusterName, _gZkClient), "Cluster should be setup");
String resourceName = "TestDB";
admin.addStateModelDef(clusterName, "MasterSlave",
new StateModelDefinition(StateModelConfigGenerator.generateConfigForMasterSlave()));
admin.addResource(clusterName, resourceName, 4, "MasterSlave");
admin.enableResource(clusterName, resourceName, false);
BaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
HelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, baseAccessor);
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
IdealState idealState = accessor.getProperty(keyBuilder.idealStates(resourceName));
Assert.assertFalse(idealState.isEnabled());
admin.enableResource(clusterName, resourceName, true);
idealState = accessor.getProperty(keyBuilder.idealStates(resourceName));
Assert.assertTrue(idealState.isEnabled());
admin.dropCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testGetResourcesWithTag() {
String TEST_TAG = "TestTAG";
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
HelixAdmin tool = new ZKHelixAdmin(_gZkClient);
tool.addCluster(clusterName, true);
Assert.assertTrue(ZKUtil.isClusterSetup(clusterName, _gZkClient));
tool.addStateModelDef(clusterName, "OnlineOffline",
new StateModelDefinition(StateModelConfigGenerator.generateConfigForOnlineOffline()));
for (int i = 0; i < 4; i++) {
String instanceName = "host" + i + "_9999";
InstanceConfig config = new InstanceConfig(instanceName);
config.setHostName("host" + i);
config.setPort("9999");
// set tag to two instances
if (i < 2) {
config.addTag(TEST_TAG);
}
tool.addInstance(clusterName, config);
tool.enableInstance(clusterName, instanceName, true);
String path = PropertyPathBuilder.instance(clusterName, instanceName);
AssertJUnit.assertTrue(_gZkClient.exists(path));
}
for (int i = 0; i < 4; i++) {
String resourceName = "database_" + i;
IdealState is = new IdealState(resourceName);
is.setStateModelDefRef("OnlineOffline");
is.setNumPartitions(2);
is.setRebalanceMode(IdealState.RebalanceMode.FULL_AUTO);
is.setReplicas("1");
is.enable(true);
if (i < 2) {
is.setInstanceGroupTag(TEST_TAG);
}
tool.addResource(clusterName, resourceName, is);
}
List<String> allResources = tool.getResourcesInCluster(clusterName);
List<String> resourcesWithTag = tool.getResourcesInClusterWithTag(clusterName, TEST_TAG);
AssertJUnit.assertEquals(allResources.size(), 4);
AssertJUnit.assertEquals(resourcesWithTag.size(), 2);
tool.dropCluster(clusterName);
}
@Test
public void testEnableDisablePartitions() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
String instanceName = "TestInstance";
String testResourcePrefix = "TestResource";
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
admin.addCluster(clusterName, true);
admin.addInstance(clusterName, new InstanceConfig(instanceName));
// Test disable instances with resources
admin.enablePartition(false, clusterName, instanceName, testResourcePrefix + "0",
Arrays.asList("1", "2"));
admin.enablePartition(false, clusterName, instanceName, testResourcePrefix + "1",
Arrays.asList("2", "3", "4"));
InstanceConfig instanceConfig = admin.getInstanceConfig(clusterName, instanceName);
Assert.assertEquals(instanceConfig.getDisabledPartitions(testResourcePrefix + "0").size(), 2);
Assert.assertEquals(instanceConfig.getDisabledPartitions(testResourcePrefix + "1").size(), 3);
// Test disable partition across resources
// TODO : Remove this part once setInstanceEnabledForPartition(partition, enabled) is removed
instanceConfig.setInstanceEnabledForPartition("10", false);
Assert.assertEquals(instanceConfig.getDisabledPartitions(testResourcePrefix + "0").size(), 3);
Assert.assertEquals(instanceConfig.getDisabledPartitions(testResourcePrefix + "1").size(), 4);
admin.dropCluster(clusterName);
}
@Test
public void testLegacyEnableDisablePartition() {
String instanceName = "TestInstanceLegacy";
String testResourcePrefix = "TestResourceLegacy";
ZNRecord record = new ZNRecord(instanceName);
List<String> disabledPartitions = new ArrayList<>(Arrays.asList("1", "2", "3"));
record.setListField(InstanceConfig.InstanceConfigProperty.HELIX_DISABLED_PARTITION.name(),
disabledPartitions);
InstanceConfig instanceConfig = new InstanceConfig(record);
instanceConfig.setInstanceEnabledForPartition(testResourcePrefix, "2", false);
Assert.assertEquals(instanceConfig.getDisabledPartitions(testResourcePrefix).size(), 3);
Assert.assertEquals(instanceConfig.getRecord()
.getListField(InstanceConfig.InstanceConfigProperty.HELIX_DISABLED_PARTITION.name()).size(),
3);
instanceConfig.setInstanceEnabledForPartition(testResourcePrefix, "2", true);
Assert.assertEquals(instanceConfig.getDisabledPartitions(testResourcePrefix).size(), 2);
Assert.assertEquals(instanceConfig.getRecord()
.getListField(InstanceConfig.InstanceConfigProperty.HELIX_DISABLED_PARTITION.name()).size(),
2);
}
@Test
public void testResetPartition() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
String instanceName = "TestInstance";
String testResource = "TestResource";
String wrongTestInstance = "WrongTestInstance";
String wrongTestResource = "WrongTestResource";
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
admin.addCluster(clusterName, true);
admin.addInstance(clusterName, new InstanceConfig(instanceName));
admin.enableInstance(clusterName, instanceName, true);
InstanceConfig instanceConfig = admin.getInstanceConfig(clusterName, instanceName);
IdealState idealState = new IdealState(testResource);
idealState.setNumPartitions(3);
admin.addStateModelDef(clusterName, "MasterSlave", new MasterSlaveSMD());
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(IdealState.RebalanceMode.FULL_AUTO);
admin.addResource(clusterName, testResource, idealState);
admin.enableResource(clusterName, testResource, true);
/*
* This is a unit test for sanity check in resetPartition().
* There is no running controller in this test. We have end-to-end tests for resetPartition()
* under webapp/TestResetPartitionState and integration/TestResetPartitionState.
*/
// resetPartition is expected to throw an exception when provided with a nonexistent instance.
try {
admin.resetPartition(clusterName, wrongTestInstance, testResource, Arrays.asList("1", "2"));
Assert.fail("Should throw HelixException");
} catch (HelixException expected) {
// This exception is expected because the instance name is made up.
Assert.assertEquals(expected.getMessage(), String.format(
"Can't reset state for %s.[1, 2] on WrongTestInstance, because %s does not exist in cluster %s",
testResource, wrongTestInstance, clusterName));
}
// resetPartition is expected to throw an exception when provided with a non-live instance.
try {
admin.resetPartition(clusterName, instanceName, testResource, Arrays.asList("1", "2"));
Assert.fail("Should throw HelixException");
} catch (HelixException expected) {
// This exception is expected because the instance is not alive.
Assert.assertEquals(expected.getMessage(), String
.format("Can't reset state for %s.[1, 2] on %s, because %s is not alive in cluster %s",
testResource, instanceName, instanceName, clusterName));
}
HelixManager manager = initializeHelixManager(clusterName, instanceConfig.getInstanceName());
manager.connect();
// resetPartition is expected to throw an exception when provided with a nonexistent resource.
try {
admin.resetPartition(clusterName, instanceName, wrongTestResource, Arrays.asList("1", "2"));
Assert.fail("Should throw HelixException");
} catch (HelixException expected) {
// This exception is expected because the resource is not added.
Assert.assertEquals(expected.getMessage(), String.format(
"Can't reset state for %s.[1, 2] on %s, because resource %s is not added to cluster %s",
wrongTestResource, instanceName, wrongTestResource, clusterName));
}
try {
admin.resetPartition(clusterName, instanceName, testResource, Arrays.asList("1", "2"));
Assert.fail("Should throw HelixException");
} catch (HelixException expected) {
// This exception is expected because partitions do not exist.
Assert.assertEquals(expected.getMessage(), String.format(
"Can't reset state for %s.[1, 2] on %s, because not all [1, 2] exist in cluster %s",
testResource, instanceName, clusterName));
}
// clean up
manager.disconnect();
admin.dropCluster(clusterName);
// verify the cluster has been removed successfully
HelixDataAccessor dataAccessor = new ZKHelixDataAccessor(className, new ZkBaseDataAccessor<>(_gZkClient));
try {
Assert.assertTrue(TestHelper.verify(() -> dataAccessor.getChildNames(dataAccessor.keyBuilder().liveInstances()).isEmpty(), 1000));
} catch (Exception e) {
e.printStackTrace();
System.out.println("There're live instances not cleaned up yet");
assert false;
}
try {
Assert.assertTrue(TestHelper.verify(() -> dataAccessor.getChildNames(dataAccessor.keyBuilder().clusterConfig()).isEmpty(), 1000));
} catch (Exception e) {
e.printStackTrace();
System.out.println("The cluster is not cleaned up yet");
assert false;
}
}
/**
* Test addResourceWithWeight() and validateResourcesForWagedRebalance() by trying to add a resource with incomplete ResourceConfig.
*/
@Test
public void testAddResourceWithWeightAndValidation()
throws IOException {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
String mockInstance = "MockInstance";
String testResourcePrefix = "TestResource";
HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
admin.addCluster(clusterName, true);
admin.addStateModelDef(clusterName, "MasterSlave", new MasterSlaveSMD());
// Create a dummy instance
InstanceConfig instanceConfig = new InstanceConfig(mockInstance);
Map<String, Integer> mockInstanceCapacity =
ImmutableMap.of("WCU", 100, "RCU", 100, "STORAGE", 100);
instanceConfig.setInstanceCapacityMap(mockInstanceCapacity);
admin.addInstance(clusterName, instanceConfig);
MockParticipantManager mockParticipantManager =
new MockParticipantManager(ZK_ADDR, clusterName, mockInstance);
mockParticipantManager.syncStart();
IdealState idealState = new IdealState(testResourcePrefix);
idealState.setNumPartitions(3);
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(IdealState.RebalanceMode.FULL_AUTO);
ResourceConfig resourceConfig = new ResourceConfig(testResourcePrefix);
// validate
Map<String, Boolean> validationResult = admin.validateResourcesForWagedRebalance(clusterName,
Collections.singletonList(testResourcePrefix));
Assert.assertEquals(validationResult.size(), 1);
Assert.assertFalse(validationResult.get(testResourcePrefix));
try {
admin.addResourceWithWeight(clusterName, idealState, resourceConfig);
Assert.fail();
} catch (HelixException e) {
// OK since resourceConfig is empty
}
// Set PARTITION_CAPACITY_MAP
Map<String, String> capacityDataMap =
ImmutableMap.of("WCU", "1", "RCU", "2", "STORAGE", "3");
resourceConfig.getRecord()
.setMapField(ResourceConfig.ResourceConfigProperty.PARTITION_CAPACITY_MAP.name(),
Collections.singletonMap(ResourceConfig.DEFAULT_PARTITION_KEY,
OBJECT_MAPPER.writeValueAsString(capacityDataMap)));
// validate
validationResult = admin.validateResourcesForWagedRebalance(clusterName,
Collections.singletonList(testResourcePrefix));
Assert.assertEquals(validationResult.size(), 1);
Assert.assertFalse(validationResult.get(testResourcePrefix));
// Add the capacity key to ClusterConfig
HelixDataAccessor dataAccessor = new ZKHelixDataAccessor(clusterName, _baseAccessor);
PropertyKey.Builder keyBuilder = dataAccessor.keyBuilder();
ClusterConfig clusterConfig = dataAccessor.getProperty(keyBuilder.clusterConfig());
clusterConfig.setInstanceCapacityKeys(Arrays.asList("WCU", "RCU", "STORAGE"));
dataAccessor.setProperty(keyBuilder.clusterConfig(), clusterConfig);
// Should succeed now
Assert.assertTrue(admin.addResourceWithWeight(clusterName, idealState, resourceConfig));
// validate
validationResult = admin.validateResourcesForWagedRebalance(clusterName,
Collections.singletonList(testResourcePrefix));
Assert.assertEquals(validationResult.size(), 1);
Assert.assertTrue(validationResult.get(testResourcePrefix));
}
/**
* Test enabledWagedRebalance by checking the rebalancer class name changed.
*/
@Test
public void testEnableWagedRebalance() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
String testResourcePrefix = "TestResource";
String unaffectedResource = "UnaffectedResource";
HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
admin.addCluster(clusterName, true);
admin.addStateModelDef(clusterName, "MasterSlave", new MasterSlaveSMD());
// Add an IdealState
IdealState idealState = new IdealState(testResourcePrefix);
idealState.setNumPartitions(3);
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(IdealState.RebalanceMode.FULL_AUTO);
admin.addResource(clusterName, testResourcePrefix, idealState);
// Add an unaffected IdealState
IdealState unaffectedIdealState = new IdealState(unaffectedResource);
unaffectedIdealState.setNumPartitions(3);
unaffectedIdealState.setStateModelDefRef("MasterSlave");
unaffectedIdealState.setRebalanceMode(IdealState.RebalanceMode.FULL_AUTO);
admin.addResource(clusterName, unaffectedResource, unaffectedIdealState);
Assert.assertTrue(admin.enableWagedRebalance(clusterName, Collections.singletonList(testResourcePrefix)));
IdealState is = admin.getResourceIdealState(clusterName, testResourcePrefix);
Assert.assertEquals(is.getRebalancerClassName(), WagedRebalancer.class.getName());
is = admin.getResourceIdealState(clusterName, unaffectedResource);
Assert.assertNotSame(is.getRebalancerClassName(), WagedRebalancer.class.getName());
// Test non existent resource case
try {
admin.enableWagedRebalance(clusterName, Collections.singletonList("FakeResourceName"));
Assert.fail("Expected HelixException");
} catch (HelixException e) {
Assert.assertEquals(e.getMessage(),
"Some resources do not have IdealStates: [FakeResourceName]");
}
}
@Test
public void testAddCloudConfig() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
admin.addCluster(clusterName, true);
CloudConfig.Builder builder = new CloudConfig.Builder();
builder.setCloudEnabled(true);
builder.setCloudID("TestID");
builder.addCloudInfoSource("TestURL");
builder.setCloudProvider(CloudProvider.CUSTOMIZED);
builder.setCloudInfoProcessorName("TestProcessor");
CloudConfig cloudConfig = builder.build();
admin.addCloudConfig(clusterName, cloudConfig);
// Read CloudConfig from Zookeeper and check the content
ConfigAccessor _configAccessor = new ConfigAccessor(_gZkClient);
CloudConfig cloudConfigFromZk = _configAccessor.getCloudConfig(clusterName);
Assert.assertTrue(cloudConfigFromZk.isCloudEnabled());
Assert.assertEquals(cloudConfigFromZk.getCloudID(), "TestID");
Assert.assertEquals(cloudConfigFromZk.getCloudProvider(), CloudProvider.CUSTOMIZED.name());
List<String> listUrlFromZk = cloudConfigFromZk.getCloudInfoSources();
Assert.assertEquals(listUrlFromZk.get(0), "TestURL");
Assert.assertEquals(cloudConfigFromZk.getCloudInfoProcessorName(), "TestProcessor");
}
@Test
public void testRemoveCloudConfig() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
admin.addCluster(clusterName, true);
CloudConfig.Builder builder = new CloudConfig.Builder();
builder.setCloudEnabled(true);
builder.setCloudID("TestID");
builder.addCloudInfoSource("TestURL");
builder.setCloudProvider(CloudProvider.CUSTOMIZED);
builder.setCloudInfoProcessorName("TestProcessor");
CloudConfig cloudConfig = builder.build();
admin.addCloudConfig(clusterName, cloudConfig);
// Read CloudConfig from Zookeeper and check the content
ConfigAccessor _configAccessor = new ConfigAccessor(_gZkClient);
CloudConfig cloudConfigFromZk = _configAccessor.getCloudConfig(clusterName);
Assert.assertTrue(cloudConfigFromZk.isCloudEnabled());
Assert.assertEquals(cloudConfigFromZk.getCloudID(), "TestID");
Assert.assertEquals(cloudConfigFromZk.getCloudProvider(), CloudProvider.CUSTOMIZED.name());
List<String> listUrlFromZk = cloudConfigFromZk.getCloudInfoSources();
Assert.assertEquals(listUrlFromZk.get(0), "TestURL");
Assert.assertEquals(cloudConfigFromZk.getCloudInfoProcessorName(), "TestProcessor");
// Remove Cloud Config and make sure it has been removed from Zookeeper
admin.removeCloudConfig(clusterName);
cloudConfigFromZk = _configAccessor.getCloudConfig(clusterName);
Assert.assertNull(cloudConfigFromZk);
}
@Test
public void testGetDomainInformation() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
admin.addCluster(clusterName, true);
ClusterConfig clusterConfig = new ClusterConfig(clusterName);
clusterConfig.setTopologyAwareEnabled(true);
clusterConfig.setTopology("/group/zone/rack/host");
clusterConfig.setFaultZoneType("rack");
ConfigAccessor _configAccessor = new ConfigAccessor(_gZkClient);
_configAccessor.setClusterConfig(clusterName, clusterConfig);
HelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
for (int i = 0; i < 42; i++) {
String hostname = "myhost" + i;
String port = "9999";
String instanceName = hostname + "_" + port;
InstanceConfig instanceConfig = new InstanceConfig(instanceName);
instanceConfig.setHostName(hostname);
instanceConfig.setPort(port);
if (i == 40) {
instanceConfig.setDomain(String
.format("invaliddomain=%s,zone=%s,rack=%s,host=%s", "mygroup" + i % 2, "myzone" + i % 4,
"myrack" + i % 4, hostname));
} else if (i == 41) {
instanceConfig.setDomain("invaliddomain");
} else {
String domain = String
.format("group=%s,zone=%s,rack=%s,host=%s", "mygroup" + i % 2, "myzone" + i % 4,
"myrack" + i % 4, hostname);
instanceConfig.setDomain(domain);
}
LiveInstance liveInstance = new LiveInstance(instanceName);
liveInstance.setSessionId(UUID.randomUUID().toString());
liveInstance.setHelixVersion(UUID.randomUUID().toString());
accessor.setProperty(keyBuilder.liveInstance(instanceName), liveInstance);
admin.addInstance(clusterName, instanceConfig);
admin.enableInstance(clusterName, instanceName, true);
}
ClusterTopology clusterTopology = admin.getClusterTopology(clusterName);
Assert.assertNotNull(clusterTopology);
Map<String, List<String>> results = clusterTopology.getTopologyMap();
Assert.assertEquals(results.size(), 2);
Assert.assertTrue(results.containsKey("/group:mygroup0"));
Assert.assertTrue(results.containsKey("/group:mygroup1"));
Assert.assertEquals(results.get("/group:mygroup0").size(), 20);
Assert.assertEquals(results.get("/group:mygroup1").size(), 20);
results = clusterTopology.getFaultZoneMap();
Assert.assertEquals(results.size(), 4);
Assert.assertEquals(results.get("/group:mygroup0/zone:myzone0/rack:myrack0").size(), 10);
Assert.assertTrue(results.get("/group:mygroup0/zone:myzone0/rack:myrack0").contains("/host"
+ ":myhost0"));
Assert.assertEquals(clusterTopology.getInvalidInstances().size(), 2);
Assert.assertTrue(clusterTopology.getInvalidInstances()
.containsAll(new HashSet<>(Arrays.asList("myhost40_9999", "myhost41_9999"))));
}
@Test
public void testAddCustomizedStateConfig() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
HelixAdmin admin = new ZKHelixAdmin(ZK_ADDR);
admin.addCluster(clusterName, true);
CustomizedStateConfig.Builder builder =
new CustomizedStateConfig.Builder();
builder.addAggregationEnabledType("mockType1");
CustomizedStateConfig customizedStateConfig = builder.build();
admin.addCustomizedStateConfig(clusterName, customizedStateConfig);
// Read CustomizedStateConfig from Zookeeper and check the content
ConfigAccessor _configAccessor = new ConfigAccessor(ZK_ADDR);
CustomizedStateConfig configFromZk =
_configAccessor.getCustomizedStateConfig(clusterName);
List<String> listTypesFromZk = configFromZk.getAggregationEnabledTypes();
Assert.assertEquals(listTypesFromZk.get(0), "mockType1");
}
@Test
public void testRemoveCustomizedStateConfig() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
HelixAdmin admin = new ZKHelixAdmin(ZK_ADDR);
admin.addCluster(clusterName, true);
CustomizedStateConfig.Builder builder =
new CustomizedStateConfig.Builder();
builder.addAggregationEnabledType("mockType1");
CustomizedStateConfig customizedStateConfig = builder.build();
admin.addCustomizedStateConfig(clusterName, customizedStateConfig);
// Read CustomizedStateConfig from Zookeeper and check the content
ConfigAccessor _configAccessor = new ConfigAccessor(ZK_ADDR);
CustomizedStateConfig configFromZk =
_configAccessor.getCustomizedStateConfig(clusterName);
List<String> listTypesFromZk = configFromZk.getAggregationEnabledTypes();
Assert.assertEquals(listTypesFromZk.get(0), "mockType1");
// Remove CustomizedStateConfig Config and make sure it has been removed from
// Zookeeper
admin.removeCustomizedStateConfig(clusterName);
configFromZk = _configAccessor.getCustomizedStateConfig(clusterName);
Assert.assertNull(configFromZk);
}
@Test
public void testUpdateCustomizedStateConfig() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
HelixAdmin admin = new ZKHelixAdmin(ZK_ADDR);
admin.addCluster(clusterName, true);
CustomizedStateConfig.Builder builder =
new CustomizedStateConfig.Builder();
builder.addAggregationEnabledType("mockType1");
CustomizedStateConfig customizedStateConfig = builder.build();
admin.addCustomizedStateConfig(clusterName, customizedStateConfig);
// Read CustomizedStateConfig from Zookeeper and check the content
ConfigAccessor _configAccessor = new ConfigAccessor(ZK_ADDR);
CustomizedStateConfig configFromZk =
_configAccessor.getCustomizedStateConfig(clusterName);
List<String> listTypesFromZk = configFromZk.getAggregationEnabledTypes();
Assert.assertEquals(listTypesFromZk.get(0), "mockType1");
admin.addTypeToCustomizedStateConfig(clusterName, "mockType2");
admin.addTypeToCustomizedStateConfig(clusterName, "mockType3");
configFromZk =
_configAccessor.getCustomizedStateConfig(clusterName);
listTypesFromZk = configFromZk.getAggregationEnabledTypes();
Assert.assertEquals(listTypesFromZk.get(0), "mockType1");
Assert.assertEquals(listTypesFromZk.get(1), "mockType2");
Assert.assertEquals(listTypesFromZk.get(2), "mockType3");
admin.removeTypeFromCustomizedStateConfig(clusterName, "mockType1");
configFromZk =
_configAccessor.getCustomizedStateConfig(clusterName);
listTypesFromZk = configFromZk.getAggregationEnabledTypes();
Assert.assertEquals(listTypesFromZk.get(0), "mockType2");
Assert.assertEquals(listTypesFromZk.get(1), "mockType3");
}
@Test
public void testPurgeOfflineInstances() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixAdmin tool = new ZKHelixAdmin(_gZkClient);
tool.addCluster(clusterName, true);
HelixDataAccessor dataAccessor = new ZKHelixDataAccessor(clusterName, _baseAccessor);
PropertyKey.Builder keyBuilder = dataAccessor.keyBuilder();
// set default offline duration for purge in cluster config
ClusterConfig clusterConfig = dataAccessor.getProperty(keyBuilder.clusterConfig());
clusterConfig.setOfflineDurationForPurge(100000L);
dataAccessor.setProperty(keyBuilder.clusterConfig(), clusterConfig);
String hostname = "host1";
String port = "9999";
String instanceName = hostname + "_" + port;
InstanceConfig config = new InstanceConfig(instanceName);
config.setHostName(hostname);
config.setPort(port);
tool.addInstance(clusterName, config);
tool.enableInstance(clusterName, instanceName, true);
LiveInstance liveInstance = new LiveInstance(instanceName);
liveInstance.setSessionId(UUID.randomUUID().toString());
liveInstance.setHelixVersion(UUID.randomUUID().toString());
dataAccessor.setProperty(keyBuilder.liveInstance(instanceName), liveInstance);
dataAccessor.removeProperty(keyBuilder.liveInstance(instanceName));
ZNRecord znRecord = new ZNRecord(instanceName);
znRecord
.setSimpleField("LAST_OFFLINE_TIME", String.valueOf(System.currentTimeMillis() - 50000L));
_baseAccessor.set(PropertyPathBuilder.instanceHistory(clusterName, instanceName), znRecord, 1);
// This purge will not remove the instance since the default offline duration is not met yet.
tool.purgeOfflineInstances(clusterName, ClusterConfig.OFFLINE_DURATION_FOR_PURGE_NOT_SET);
Assert.assertTrue(_gZkClient.exists(keyBuilder.instanceConfig(instanceName).getPath()),
"Instance should still be there");
// This purge will not remove the instance since the offline duration is not met yet.
tool.purgeOfflineInstances(clusterName, 100000L);
Assert.assertTrue(_gZkClient.exists(keyBuilder.instanceConfig(instanceName).getPath()),
"Instance should still be there");
// This purge will remove the instance as the customized offline duration is met.
tool.purgeOfflineInstances(clusterName, 10000L);
assertInstanceDropped(keyBuilder, instanceName);
// Set message without config or history, mimicking race condition
_baseAccessor.set(PropertyPathBuilder.instanceMessage(clusterName, instanceName, "testId"),
new ZNRecord("testId"), 1);
tool.purgeOfflineInstances(clusterName, 10000L);
assertInstanceDropped(keyBuilder, instanceName);
// Set config again, without instancePath or history, mimicking new instance joining
_baseAccessor.set(PropertyPathBuilder.instanceConfig(clusterName, instanceName),
new ZNRecord(instanceName), 1);
tool.purgeOfflineInstances(clusterName, 10000L);
Assert.assertTrue(_gZkClient.exists(keyBuilder.instanceConfig(instanceName).getPath()),
"Instance should still be there");
tool.dropCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
private void assertInstanceDropped(PropertyKey.Builder keyBuilder, String instanceName) {
Assert.assertFalse(_gZkClient.exists(keyBuilder.instanceConfig(instanceName).getPath()),
"Instance should already be dropped");
Assert.assertFalse(_gZkClient.exists(keyBuilder.instance(instanceName).getPath()),
"Instance should already be dropped");
}
/*
* Tests 2 APIs: enable and disable cluster pause mode.
*/
@Test
public void testEnableDisableClusterPauseMode() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
_gSetupTool.setupTestCluster(clusterName);
ClusterControllerManager controller =
new ClusterControllerManager(ZK_ADDR, clusterName, "controller_0");
controller.syncStart();
_gSetupTool.activateCluster(clusterName, controller.getClusterName(), true);
try {
// Should not create pause with pending cancel ST enabled because cancellation is not enabled
try {
ClusterManagementModeRequest request = ClusterManagementModeRequest.newBuilder()
.withClusterName(clusterName)
.withMode(ClusterManagementMode.Type.CLUSTER_FREEZE)
.withCancelPendingST(true)
.withReason(methodName)
.build();
_gSetupTool.getClusterManagementTool().setClusterManagementMode(request);
Assert.fail("Should not create pause with pending cancel ST enabled because "
+ "cancellation is not enabled");
} catch (HelixConflictException e) {
Assert.assertTrue(e.getMessage().startsWith("State transition cancellation not enabled"));
}
ClusterManagementModeRequest request = ClusterManagementModeRequest.newBuilder()
.withClusterName(clusterName)
.withMode(ClusterManagementMode.Type.CLUSTER_FREEZE)
.withReason(methodName)
.build();
_gSetupTool.getClusterManagementTool().setClusterManagementMode(request);
HelixDataAccessor dataAccessor = new ZKHelixDataAccessor(clusterName, _baseAccessor);
PauseSignal pauseSignal = dataAccessor.getProperty(dataAccessor.keyBuilder().pause());
// Verify pause signal is correctly written
Assert.assertNotNull(pauseSignal);
Assert.assertTrue(pauseSignal.isClusterPause());
Assert.assertFalse(pauseSignal.getCancelPendingST());
Assert.assertEquals(pauseSignal.getFromHost(), NetworkUtil.getLocalhostName());
Assert.assertEquals(pauseSignal.getReason(), methodName);
// Disable pause mode
request = ClusterManagementModeRequest.newBuilder()
.withClusterName(clusterName)
.withMode(ClusterManagementMode.Type.NORMAL)
.build();
_gSetupTool.getClusterManagementTool().setClusterManagementMode(request);
pauseSignal = dataAccessor.getProperty(dataAccessor.keyBuilder().pause());
// Verify pause signal has been deleted.
Assert.assertNull(pauseSignal);
} finally {
_gSetupTool.activateCluster(clusterName, controller.getClusterName(), false);
controller.syncStop();
_gSetupTool.deleteCluster(clusterName);
}
}
}
| 9,801 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestLiveInstanceBounce.java | package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.integration.common.ZkStandAloneCMTestBase;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.tools.ClusterStateVerifier;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestLiveInstanceBounce extends ZkStandAloneCMTestBase {
@Test
public void testInstanceBounce() throws Exception {
int handlerSize = _controller.getHandlers().size();
for (int i = 0; i < 2; i++) {
String instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + i);
// kill 2 participants
_participants[i].syncStop();
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
// restart the participant
_participants[i] = new MockParticipantManager(ZK_ADDR, CLUSTER_NAME, instanceName);
_participants[i].syncStart();
Thread.sleep(100);
}
Thread.sleep(4000);
boolean result =
ClusterStateVerifier.verifyByPolling(new ClusterStateVerifier.BestPossAndExtViewZkVerifier(
ZK_ADDR, CLUSTER_NAME), 50 * 1000);
Assert.assertTrue(result);
// When a new live instance is created, we add current state listener to it
// and we will remove current-state listener on expired session
// so the number of callback handlers is unchanged
for (int j = 0; j < 10; j++) {
if (_controller.getHandlers().size() == (handlerSize)) {
break;
}
Thread.sleep(400);
}
Assert.assertEquals(_controller.getHandlers().size(), handlerSize);
}
}
| 9,802 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestZkClusterManager.java | package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.helix.AccessOption;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.InstanceType;
import org.apache.helix.LiveInstanceInfoProvider;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.TestHelper;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.ZkTestHelper;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.manager.MockListener;
import org.apache.helix.model.HelixConfigScope;
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.builder.HelixConfigScopeBuilder;
import org.apache.helix.store.zk.ZkHelixPropertyStore;
import org.apache.zookeeper.data.Stat;
import org.testng.Assert;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
public class TestZkClusterManager extends ZkUnitTestBase {
final String className = getShortClassName();
@Test()
public void testController() throws Exception {
System.out.println("START " + className + ".testController() at "
+ new Date(System.currentTimeMillis()));
final String clusterName = CLUSTER_PREFIX + "_" + className + "_controller";
// basic test
if (_gZkClient.exists("/" + clusterName)) {
_gZkClient.deleteRecursively("/" + clusterName);
}
ZKHelixManager controller =
new ZKHelixManager(clusterName, null, InstanceType.CONTROLLER, ZK_ADDR);
try {
controller.connect();
Assert.fail("Should throw HelixException if initial cluster structure is not setup");
} catch (HelixException e) {
// OK
}
TestHelper.setupEmptyCluster(_gZkClient, clusterName);
controller.connect();
AssertJUnit.assertTrue(controller.isConnected());
controller.connect();
AssertJUnit.assertTrue(controller.isConnected());
MockListener listener = new MockListener();
listener.reset();
try {
controller.addControllerListener(null);
Assert.fail("Should throw HelixException");
} catch (HelixException e) {
// OK
}
Builder keyBuilder = new Builder(controller.getClusterName());
controller.addControllerListener(listener);
AssertJUnit.assertTrue(listener.isControllerChangeListenerInvoked);
controller.removeListener(keyBuilder.controller(), listener);
ZkHelixPropertyStore<ZNRecord> store = controller.getHelixPropertyStore();
ZNRecord record = new ZNRecord("node_1");
int options = 0;
store.set("/node_1", record, AccessOption.PERSISTENT);
Stat stat = new Stat();
record = store.get("/node_1", stat, options);
AssertJUnit.assertEquals("node_1", record.getId());
controller.getMessagingService();
controller.getClusterManagmentTool();
controller.handleNewSession(controller.getSessionId());
controller.disconnect();
AssertJUnit.assertFalse(controller.isConnected());
deleteCluster(clusterName);
System.out.println("END " + className + ".testController() at "
+ new Date(System.currentTimeMillis()));
}
@Test
public void testLiveInstanceInfoProvider() throws Exception {
System.out.println("START " + className + ".testLiveInstanceInfoProvider() at "
+ new Date(System.currentTimeMillis()));
final String clusterName = CLUSTER_PREFIX + "_" + className + "_liveInstanceInfoProvider";
class provider implements LiveInstanceInfoProvider {
boolean _flag = false;
public provider(boolean genSessionId) {
_flag = genSessionId;
}
@Override
public ZNRecord getAdditionalLiveInstanceInfo() {
ZNRecord record = new ZNRecord("info");
record.setSimpleField("simple", "value");
List<String> listFieldVal = new ArrayList<String>();
listFieldVal.add("val1");
listFieldVal.add("val2");
listFieldVal.add("val3");
record.setListField("list", listFieldVal);
Map<String, String> mapFieldVal = new HashMap<String, String>();
mapFieldVal.put("k1", "val1");
mapFieldVal.put("k2", "val2");
mapFieldVal.put("k3", "val3");
record.setMapField("map", mapFieldVal);
if (_flag) {
record.setSimpleField("SESSION_ID", "value");
record.setSimpleField("LIVE_INSTANCE", "value");
record.setSimpleField("Others", "value");
}
return record;
}
}
TestHelper.setupEmptyCluster(_gZkClient, clusterName);
int[] ids = {
0, 1, 2, 3, 4, 5
};
setupInstances(clusterName, ids);
// ///////////////////
ZKHelixManager manager =
new ZKHelixManager(clusterName, "localhost_0", InstanceType.PARTICIPANT, ZK_ADDR);
manager.connect();
HelixDataAccessor accessor = manager.getHelixDataAccessor();
LiveInstance liveInstance =
accessor.getProperty(accessor.keyBuilder().liveInstance("localhost_0"));
Assert.assertTrue(liveInstance.getRecord().getListFields().size() == 0);
Assert.assertTrue(liveInstance.getRecord().getMapFields().size() == 0);
Assert.assertTrue(liveInstance.getRecord().getSimpleFields().size() == 4);
manager.disconnect();
manager = new ZKHelixManager(clusterName, "localhost_1", InstanceType.PARTICIPANT, ZK_ADDR);
manager.setLiveInstanceInfoProvider(new provider(false));
manager.connect();
accessor = manager.getHelixDataAccessor();
liveInstance = accessor.getProperty(accessor.keyBuilder().liveInstance("localhost_1"));
Assert.assertTrue(liveInstance.getRecord().getListFields().size() == 1);
Assert.assertTrue(liveInstance.getRecord().getMapFields().size() == 1);
Assert.assertTrue(liveInstance.getRecord().getSimpleFields().size() == 5);
manager.disconnect();
manager = new ZKHelixManager(clusterName, "localhost_2", InstanceType.PARTICIPANT, ZK_ADDR);
manager.setLiveInstanceInfoProvider(new provider(true));
manager.connect();
accessor = manager.getHelixDataAccessor();
liveInstance = accessor.getProperty(accessor.keyBuilder().liveInstance("localhost_2"));
Assert.assertTrue(liveInstance.getRecord().getListFields().size() == 1);
Assert.assertTrue(liveInstance.getRecord().getMapFields().size() == 1);
Assert.assertTrue(liveInstance.getRecord().getSimpleFields().size() == 6);
Assert.assertFalse(liveInstance.getEphemeralOwner().equals("value"));
Assert.assertFalse(liveInstance.getLiveInstance().equals("value"));
MockParticipantManager manager2 =
new MockParticipantManager(ZK_ADDR, clusterName, "localhost_3");
manager2.setLiveInstanceInfoProvider(new provider(true));
manager2.connect();
accessor = manager2.getHelixDataAccessor();
liveInstance = accessor.getProperty(accessor.keyBuilder().liveInstance("localhost_3"));
Assert.assertTrue(liveInstance.getRecord().getListFields().size() == 1);
Assert.assertTrue(liveInstance.getRecord().getMapFields().size() == 1);
Assert.assertTrue(liveInstance.getRecord().getSimpleFields().size() == 6);
Assert.assertFalse(liveInstance.getEphemeralOwner().equals("value"));
Assert.assertFalse(liveInstance.getLiveInstance().equals("value"));
String sessionId = liveInstance.getEphemeralOwner();
ZkTestHelper.expireSession(manager2.getZkClient());
Thread.sleep(1000);
liveInstance = accessor.getProperty(accessor.keyBuilder().liveInstance("localhost_3"));
Assert.assertTrue(liveInstance.getRecord().getListFields().size() == 1);
Assert.assertTrue(liveInstance.getRecord().getMapFields().size() == 1);
Assert.assertTrue(liveInstance.getRecord().getSimpleFields().size() == 6);
Assert.assertFalse(liveInstance.getEphemeralOwner().equals("value"));
Assert.assertFalse(liveInstance.getLiveInstance().equals("value"));
Assert.assertFalse(sessionId.equals(liveInstance.getEphemeralOwner()));
manager.disconnect();
manager2.disconnect();
deleteCluster(clusterName);
System.out.println("END " + className + ".testLiveInstanceInfoProvider() at "
+ new Date(System.currentTimeMillis()));
}
@Test()
public void testAdministrator() throws Exception {
System.out.println("START " + className + ".testAdministrator() at "
+ new Date(System.currentTimeMillis()));
final String clusterName = CLUSTER_PREFIX + "_" + className + "_admin";
// basic test
if (_gZkClient.exists("/" + clusterName)) {
_gZkClient.deleteRecursively("/" + clusterName);
}
ZKHelixManager admin =
new ZKHelixManager(clusterName, null, InstanceType.ADMINISTRATOR, ZK_ADDR);
TestHelper.setupEmptyCluster(_gZkClient, clusterName);
admin.connect();
AssertJUnit.assertTrue(admin.isConnected());
HelixAdmin adminTool = admin.getClusterManagmentTool();
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.PARTITION).forCluster(clusterName)
.forResource("testResource").forPartition("testPartition").build();
Map<String, String> properties = new HashMap<String, String>();
properties.put("pKey1", "pValue1");
properties.put("pKey2", "pValue2");
adminTool.setConfig(scope, properties);
properties = adminTool.getConfig(scope, Arrays.asList("pKey1", "pKey2"));
Assert.assertEquals(properties.size(), 2);
Assert.assertEquals(properties.get("pKey1"), "pValue1");
Assert.assertEquals(properties.get("pKey2"), "pValue2");
admin.disconnect();
AssertJUnit.assertFalse(admin.isConnected());
deleteCluster(clusterName);
System.out.println("END " + className + ".testAdministrator() at "
+ new Date(System.currentTimeMillis()));
}
}
| 9,803 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestZkCacheSyncOpSingleThread.java | package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentLinkedQueue;
import org.apache.helix.AccessOption;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.TestHelper;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.datamodel.ZNRecordUpdater;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.impl.factory.SharedZkClientFactory;
import org.apache.helix.store.HelixPropertyListener;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestZkCacheSyncOpSingleThread extends ZkUnitTestBase {
class TestListener implements HelixPropertyListener {
ConcurrentLinkedQueue<String> _deletePathQueue = new ConcurrentLinkedQueue<>();
ConcurrentLinkedQueue<String> _createPathQueue = new ConcurrentLinkedQueue<>();
ConcurrentLinkedQueue<String> _changePathQueue = new ConcurrentLinkedQueue<>();
@Override
public void onDataDelete(String path) {
// System.out.println(Thread.currentThread().getName() + ", onDelete: " + path);
_deletePathQueue.add(path);
}
@Override
public void onDataCreate(String path) {
// System.out.println(Thread.currentThread().getName() + ", onCreate: " + path);
_createPathQueue.add(path);
}
@Override
public void onDataChange(String path) {
// System.out.println(Thread.currentThread().getName() + ", onChange: " + path);
_changePathQueue.add(path);
}
public void reset() {
_deletePathQueue.clear();
_createPathQueue.clear();
_changePathQueue.clear();
}
}
@Test
public void testZkCacheCallbackExternalOpNoChroot() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
// init external base data accessor
HelixZkClient zkclient = SharedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(ZK_ADDR));
zkclient.setZkSerializer(new ZNRecordSerializer());
ZkBaseDataAccessor<ZNRecord> extBaseAccessor = new ZkBaseDataAccessor<>(zkclient);
// init zkCacheDataAccessor
String curStatePath = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901");
String extViewPath = PropertyPathBuilder.externalView(clusterName);
ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
extBaseAccessor.create(curStatePath, null, AccessOption.PERSISTENT);
List<String> cachePaths = Arrays.asList(curStatePath, extViewPath);
ZkCacheBaseDataAccessor<ZNRecord> accessor =
new ZkCacheBaseDataAccessor<>(baseAccessor, null, null, cachePaths);
TestListener listener = new TestListener();
accessor.subscribe(curStatePath, listener);
// create 10 current states
List<String> createPaths = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String path = curStatePath + "/session_0/TestDB" + i;
createPaths.add(path);
boolean success =
extBaseAccessor.create(path, new ZNRecord("TestDB" + i), AccessOption.PERSISTENT);
Assert.assertTrue(success, "Should succeed in create: " + path);
}
Thread.sleep(500);
// verify cache
// TestHelper.printCache(accessor._zkCache._cache);
boolean ret = TestHelper.verifyZkCache(cachePaths, accessor._zkCache._cache, _gZkClient, true);
// System.out.println("ret: " + ret);
Assert.assertTrue(ret, "zkCache doesn't match data on Zk");
System.out.println("createCnt: " + listener._createPathQueue.size());
Assert.assertEquals(listener._createPathQueue.size(), 11, "Shall get 11 onCreate callbacks.");
// verify each callback path
createPaths.add(curStatePath + "/session_0");
List<String> createCallbackPaths = new ArrayList<>(listener._createPathQueue);
Collections.sort(createPaths);
Collections.sort(createCallbackPaths);
// System.out.println("createCallbackPaths: " + createCallbackPaths);
Assert.assertEquals(createCallbackPaths, createPaths,
"Should get create callbacks at " + createPaths + ", but was " + createCallbackPaths);
// update each current state, single thread
List<String> updatePaths = new ArrayList<>();
listener.reset();
for (int i = 0; i < 10; i++) {
String path = curStatePath + "/session_0/TestDB" + i;
for (int j = 0; j < 10; j++) {
updatePaths.add(path);
ZNRecord newRecord = new ZNRecord("TestDB" + i);
newRecord.setSimpleField("" + j, "" + j);
boolean success =
accessor.update(path, new ZNRecordUpdater(newRecord), AccessOption.PERSISTENT);
Assert.assertTrue(success, "Should succeed in update: " + path);
}
}
Thread.sleep(500);
// verify cache
// TestHelper.printCache(accessor._zkCache._cache);
ret = TestHelper.verifyZkCache(cachePaths, accessor._zkCache._cache, _gZkClient, true);
// System.out.println("ret: " + ret);
Assert.assertTrue(ret, "zkCache doesn't match data on Zk");
System.out.println("changeCnt: " + listener._changePathQueue.size());
Assert.assertEquals(listener._changePathQueue.size(), 100, "Shall get 100 onChange callbacks.");
// verify each callback path
List<String> updateCallbackPaths = new ArrayList<>(listener._changePathQueue);
Collections.sort(updatePaths);
Collections.sort(updateCallbackPaths);
Assert.assertEquals(updateCallbackPaths, updatePaths,
"Should get change callbacks at " + updatePaths + ", but was " + updateCallbackPaths);
// remove 10 current states
TreeSet<String> removePaths = new TreeSet<>();
listener.reset();
for (int i = 0; i < 10; i++) {
String path = curStatePath + "/session_0/TestDB" + i;
removePaths.add(path);
boolean success = accessor.remove(path, AccessOption.PERSISTENT);
Assert.assertTrue(success, "Should succeed in remove: " + path);
}
Thread.sleep(500);
// verify cache
// TestHelper.printCache(accessor._zkCache._cache);
ret = TestHelper.verifyZkCache(cachePaths, accessor._zkCache._cache, _gZkClient, true);
// System.out.println("ret: " + ret);
Assert.assertTrue(ret, "zkCache doesn't match data on Zk");
System.out.println("deleteCnt: " + listener._deletePathQueue.size());
Assert.assertTrue(listener._deletePathQueue.size() >= 10,
"Shall get at least 10 onDelete callbacks.");
// verify each callback path
Set<String> removeCallbackPaths = new TreeSet<>(listener._deletePathQueue);
Assert.assertEquals(removeCallbackPaths, removePaths,
"Should get remove callbacks at " + removePaths + ", but was " + removeCallbackPaths);
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
}
| 9,804 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestHandleSession.java | package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import org.apache.helix.AccessOption;
import org.apache.helix.HelixException;
import org.apache.helix.HelixManager;
import org.apache.helix.InstanceType;
import org.apache.helix.NotificationContext;
import org.apache.helix.PropertyKey;
import org.apache.helix.TestHelper;
import org.apache.helix.ZkTestHelper;
import org.apache.helix.api.listeners.CurrentStateChangeListener;
import org.apache.helix.api.listeners.LiveInstanceChangeListener;
import org.apache.helix.common.ZkTestBase;
import org.apache.helix.controller.GenericHelixController;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.api.client.RealmAwareZkClient;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestHandleSession extends ZkTestBase {
private static final String _className = TestHelper.getTestClassName();
@Test
public void testHandleNewSession() throws Exception {
String methodName = TestHelper.getTestMethodName();
String clusterName = _className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
"localhost", // participant name prefix
"TestDB", // resource name prefix
1, // resources
10, // partitions per resource
5, // number of nodes
3, // replicas
"MasterSlave", true); // do rebalance
MockParticipantManager participant =
new MockParticipantManager(ZK_ADDR, clusterName, "localhost_12918");
participant.syncStart();
// Logger.getRootLogger().setLevel(Level.INFO);
String lastSessionId = participant.getSessionId();
for (int i = 0; i < 3; i++) {
// System.err.println("curSessionId: " + lastSessionId);
ZkTestHelper.expireSession(participant.getZkClient());
String sessionId = participant.getSessionId();
Assert.assertTrue(sessionId.compareTo(lastSessionId) > 0,
"Session id should be increased after expiry");
lastSessionId = sessionId;
// make sure session id is not 0
Assert.assertFalse(sessionId.equals("0"),
"Hit race condition in zhclient.handleNewSession(). sessionId is not returned yet.");
// TODO: need to test session expiry during handleNewSession()
}
// Logger.getRootLogger().setLevel(Level.INFO);
System.out.println("Disconnecting ...");
participant.syncStop();
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test(dependsOnMethods = "testHandleNewSession")
public void testAcquireLeadershipOnNewSession() throws Exception {
String className = getShortClassName();
final String clusterName =
CLUSTER_PREFIX + "_" + className + "_" + "testAcquireLeadershipOnNewSession";
final ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_gZkClient));
final PropertyKey.Builder keyBuilder = accessor.keyBuilder();
TestHelper.setupEmptyCluster(_gZkClient, clusterName);
// Create controller leader
final String controllerName = "controller_0";
final BlockingHandleNewSessionZkHelixManager manager =
new BlockingHandleNewSessionZkHelixManager(clusterName, controllerName,
InstanceType.CONTROLLER, ZK_ADDR);
GenericHelixController controller0 = new GenericHelixController();
DistributedLeaderElection election =
new DistributedLeaderElection(manager, controller0, Collections.EMPTY_LIST);
manager.connect();
// Ensure the controller successfully acquired leadership.
Assert.assertTrue(TestHelper.verify(() -> {
LiveInstance liveInstance = accessor.getProperty(keyBuilder.controllerLeader());
return liveInstance != null && controllerName.equals(liveInstance.getInstanceName())
&& manager.getSessionId().equals(liveInstance.getEphemeralOwner());
}, 1000));
// Record the original connection info.
final String originalSessionId = manager.getSessionId();
final long originalCreationTime =
accessor.getProperty(keyBuilder.controllerLeader()).getStat().getCreationTime();
int handlerCount = manager.getHandlers().size();
// 1. lock the zk event processing to simulate long backlog queue.
((ZkClient) manager._zkclient).getEventLock().lockInterruptibly();
// 2. add a controller leader node change event to the queue, that will not be processed.
accessor.removeProperty(keyBuilder.controllerLeader());
// 3. expire the session and create a new session
ZkTestHelper.asyncExpireSession(manager._zkclient);
Assert.assertTrue(TestHelper
.verify(() -> !((ZkClient) manager._zkclient).getConnection().getZookeeperState().isAlive(),
3000));
// 4. start processing event again
((ZkClient) manager._zkclient).getEventLock().unlock();
// Wait until the ZkClient has got a new session, and the original leader node gone
Assert.assertTrue(TestHelper.verify(() -> {
try {
return !Long.toHexString(manager._zkclient.getSessionId()).equals(originalSessionId);
} catch (HelixException hex) {
return false;
}
}, 2000));
// ensure that the manager has not process the new session event yet
Assert.assertEquals(manager.getSessionId(), originalSessionId);
// Wait until an invalid leader node created again.
// Note that this is the expected behavior but NOT desired behavior. Ideally, the new node should
// be created with the right session directly. We will need to improve this.
// TODO We should recording session Id in the zk event so the stale events are discarded instead of processed. After this is done, there won't be invalid node.
Assert.assertTrue(TestHelper.verify(() -> {
// Newly created node should have a new creating time but with old session.
LiveInstance invalidLeaderNode = accessor.getProperty(keyBuilder.controllerLeader());
// node exist
if (invalidLeaderNode == null) {
return false;
}
// node is newly created
if (invalidLeaderNode.getStat().getCreationTime() == originalCreationTime) {
return false;
}
// node has the same session as the old one, so it's invalid
if (!invalidLeaderNode.getSessionId().equals(originalSessionId)) {
return false;
}
return true;
}, 2000));
Assert.assertFalse(manager.isLeader());
// 5. proceed the new session handling, so the manager will get the new session.
manager.proceedNewSessionHandling();
// Since the new session handling will re-create the leader node, a new valid node shall be created.
Assert.assertTrue(TestHelper.verify(() -> manager.isLeader(), 1000));
// All the callback handlers shall be recovered.
Assert.assertTrue(TestHelper.verify(() -> manager.getHandlers().size() == handlerCount, 3000));
Assert.assertTrue(manager.getHandlers().stream().allMatch(handler -> handler.isReady()));
manager.disconnect();
TestHelper.dropCluster(clusterName, _gZkClient);
}
@Test (dependsOnMethods = "testAcquireLeadershipOnNewSession")
public void testRemoveOldSession() throws Exception {
String methodName = TestHelper.getTestMethodName();
String clusterName = _className + "_" + methodName;
TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
"localhost", // participant name prefix
"TestDB", // resource name prefix
1, // resources
10, // partitions per resource
5, // number of nodes
3, // replicas
"MasterSlave", true); // do rebalance
String testInstanceName = "localhost_12918";
MockParticipantManager participant =
new MockParticipantManager(ZK_ADDR, clusterName, testInstanceName);
participant.syncStart();
PropertyKey.Builder keyBuilder = new PropertyKey.Builder(clusterName);
String testCurrentStateSessionId = "testCurrentStateSessionId";
_baseAccessor
.create(keyBuilder.sessions(testInstanceName).toString() + "/" + testCurrentStateSessionId,
new ZNRecord(testCurrentStateSessionId), AccessOption.PERSISTENT);
String testTaskCurrentStateSessionId = "testTaskCurrentStateSessionId";
_baseAccessor.create(keyBuilder.taskCurrentStateSessions(testInstanceName).toString() + "/"
+ testTaskCurrentStateSessionId, new ZNRecord(testTaskCurrentStateSessionId),
AccessOption.PERSISTENT);
ZkTestHelper.expireSession(participant.getZkClient());
// Ensure that the test sessions are removed
Assert.assertEquals(_gZkClient.getChildren(keyBuilder.sessions(testInstanceName).toString()),
Collections.emptyList());
Assert.assertEquals(
_gZkClient.getChildren(keyBuilder.taskCurrentStateSessions(testInstanceName).toString()),
Collections.emptyList());
participant.syncStop();
deleteCluster(clusterName);
}
/*
* Tests session expiry before calling ZkHelixManager.handleNewSession(sessionId).
* This test checks to see if the expired sessions would be discarded and the operation would
* be returned in handleNewSession. The live instance is only created by the latest session.
* This test does not handle new sessions until creating 2 expired session events, which simulates
* a long backlog in the event queue. At that time, the first new session is already expired and
* should be discarded. The live instance is only created by the second new session.
* Set test timeout to 5 minutes, just in case zk server is dead and the test is hung.
*/
@Test(timeOut = 5 * 60 * 1000L)
public void testDiscardExpiredSessions() throws Exception {
final String methodName = TestHelper.getTestMethodName();
final String clusterName = _className + "_" + methodName;
final ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(ZK_ADDR));
final PropertyKey.Builder keyBuilder = accessor.keyBuilder();
TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
"localhost", // participant name prefix
"TestDB", // resource name prefix
1, // resources
10, // partitions per resource
5, // number of nodes
3, // replicas
"MasterSlave", true); // do rebalance
final String instanceName = "localhost_12918";
final BlockingHandleNewSessionZkHelixManager manager =
new BlockingHandleNewSessionZkHelixManager(clusterName, instanceName,
InstanceType.PARTICIPANT, ZK_ADDR);
manager.connect();
final String originalSessionId = manager.getSessionId();
final LiveInstance liveInstance = accessor.getProperty(keyBuilder.liveInstance(instanceName));
final long originalLiveInstanceCreationTime = liveInstance.getStat().getCreationTime();
// Verify current live instance.
Assert.assertNotNull(liveInstance);
Assert.assertEquals(liveInstance.getEphemeralOwner(), originalSessionId);
final int handlerCount = manager.getHandlers().size();
final long originalNewSessionStartTime = manager.getHandleNewSessionStartTime();
/*
* Create 2 expired session events. Followed by the expired sessions, there will be 2 new
* sessions(S1, S2) created: S0(original) expired -> S1 created -> S1 expired -> S2 created.
* Session S1 would not create a live instance. Instead, only S2 creates a live instance.
*/
for (int i = 0; i < 2; i++) {
final String lastSessionId = ZKUtil.toHexSessionId(manager.getZkClient().getSessionId());
try {
// Lock zk event processing to simulate a long backlog queue.
((ZkClient) manager.getZkClient()).getEventLock().lockInterruptibly();
// Async expire the session and create a new session.
ZkTestHelper.asyncExpireSession(manager.getZkClient());
// Wait and verify the zookeeper is alive.
Assert.assertTrue(TestHelper.verify(
() -> !((ZkClient) manager.getZkClient()).getConnection().getZookeeperState().isAlive(),
3000L));
} finally {
// Unlock to start processing event again.
((ZkClient) manager.getZkClient()).getEventLock().unlock();
}
// Wait until the ZkClient has got a new session.
Assert.assertTrue(TestHelper.verify(() -> {
try {
final String sessionId = ZKUtil.toHexSessionId(manager.getZkClient().getSessionId());
return !"0".equals(sessionId) && !sessionId.equals(lastSessionId);
} catch (HelixException ex) {
return false;
}
}, 2000L));
// Ensure that the manager has not processed the new session event yet.
Assert.assertEquals(manager.getHandleNewSessionStartTime(), originalNewSessionStartTime);
}
// Start to handle all new sessions.
for (int i = 0; i < 2; i++) {
// The live instance is gone and should NOT be created by the expired session.
Assert.assertNull(accessor.getProperty(keyBuilder.liveInstance(instanceName)));
final long lastEndTime = manager.getHandleNewSessionEndTime();
// Proceed the new session handling, so the manager will
// get the second new session and process it.
manager.proceedNewSessionHandling();
// Wait for handling new session to complete.
Assert.assertTrue(
TestHelper.verify(() -> manager.getHandleNewSessionEndTime() > lastEndTime, 2000L));
}
// From now on, the live instance is created.
// The latest(the final new one) session id that is valid.
final String latestSessionId = ZKUtil.toHexSessionId(manager.getZkClient().getSessionId());
Assert.assertTrue(TestHelper.verify(() -> {
// Newly created live instance should be created by the latest session
// and have a new creation time.
LiveInstance newLiveInstance = accessor.getProperty(keyBuilder.liveInstance(instanceName));
return newLiveInstance != null
&& newLiveInstance.getStat().getCreationTime() != originalLiveInstanceCreationTime
&& newLiveInstance.getEphemeralOwner().equals(latestSessionId);
}, 2000L));
// All the callback handlers shall be recovered.
Assert.assertTrue(TestHelper.verify(() -> manager.getHandlers().size() == handlerCount, 1000L));
Assert.assertTrue(manager.getHandlers().stream().allMatch(CallbackHandler::isReady));
// Clean up.
manager.disconnect();
deleteCluster(clusterName);
}
/*
* This test simulates that long time cost in resetting handlers causes zk session expiry, and
* ephemeral node should not be created by this expired zk session.
* This test follows belows steps:
* 1. Original session S0 initialized
* 2. S0 expired, new session S1 created
* 3. S1 spends a long time resetting handlers
* 4. S1 expired, new session S2 created
* 5. S1 completes resetting handlers, live instance should not be created by the expired S1
* 6. S2 is valid and creates live instance.
*/
@Test
public void testSessionExpiredWhenResetHandlers() throws Exception {
final String methodName = TestHelper.getTestMethodName();
final String clusterName = _className + "_" + methodName;
final ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(ZK_ADDR));
final PropertyKey.Builder keyBuilder = accessor.keyBuilder();
TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
"localhost", // participant name prefix
"TestDB", // resource name prefix
1, // resources
10, // partitions per resource
5, // number of nodes
3, // replicas
"MasterSlave", true); // do rebalance
// 1. Original session S0 initialized
final String instanceName = "localhost_12918";
final BlockingResetHandlersZkHelixManager manager =
new BlockingResetHandlersZkHelixManager(clusterName, instanceName, InstanceType.PARTICIPANT,
ZK_ADDR);
manager.connect();
final String originalSessionId = manager.getSessionId();
final long initResetHandlersStartTime = manager.getResetHandlersStartTime();
final LiveInstance liveInstance = accessor.getProperty(keyBuilder.liveInstance(instanceName));
// Verify current live instance.
Assert.assertNotNull(liveInstance);
Assert.assertEquals(liveInstance.getEphemeralOwner(), originalSessionId);
final int handlerCount = manager.getHandlers().size();
final long originalCreationTime = liveInstance.getStat().getCreationTime();
final CountDownLatch mainThreadBlocker = new CountDownLatch(1);
final CountDownLatch helperThreadBlocker = new CountDownLatch(1);
// Helper thread to help verify zk session states, async expire S1, proceed S1 to reset
// handlers and release main thread to verify results.
new Thread(() -> {
try {
// Wait for new session S1 is established and starting to reset handlers.
TestHelper.verify(() -> !(manager.getSessionId().equals(originalSessionId))
&& manager.getResetHandlersStartTime() > initResetHandlersStartTime, 3000L);
// S1's info.
final String lastSessionId = manager.getSessionId();
final long lastResetHandlersStartTime = manager.getResetHandlersStartTime();
((ZkClient) manager.getZkClient()).getEventLock().lockInterruptibly();
try {
// 4. S1 expired, new session S2 created
ZkTestHelper.asyncExpireSession(manager.getZkClient());
// Wait and verify the new session S2 is established.
TestHelper.verify(() -> !((ZKUtil.toHexSessionId(manager.getZkClient().getSessionId()))
.equals(lastSessionId)), 3000L);
} catch (Exception ignored) {
// Ignored.
} finally {
// Unlock to start processing event again.
((ZkClient) manager.getZkClient()).getEventLock().unlock();
}
// Proceed S1 to complete reset handlers and try to create live instance.
manager.proceedResetHandlers();
// Wait for S2 to handle new session.
TestHelper.verify(() -> !(manager.getSessionId().equals(lastSessionId))
&& manager.getResetHandlersStartTime() > lastResetHandlersStartTime, 3000L);
// Notify main thread to verify result: expired S1 should not create live instance.
mainThreadBlocker.countDown();
// Wait for notification from main thread to proceed S2.
helperThreadBlocker.await();
// Proceed S2.
// 6. S2 is valid and creates live instance.
manager.proceedResetHandlers();
final String latestSessionId = ZKUtil.toHexSessionId(manager.getZkClient().getSessionId());
TestHelper.verify(() -> {
// Newly created live instance should be created by the latest session
// and have a new creation time.
LiveInstance newLiveInstance =
accessor.getProperty(keyBuilder.liveInstance(instanceName));
return newLiveInstance != null
&& newLiveInstance.getStat().getCreationTime() != originalCreationTime
&& newLiveInstance.getEphemeralOwner().equals(latestSessionId);
}, 2000L);
} catch (Exception ignored) {
// Ignored.
}
// Notify the main thread that live instance is already created by session S2.
mainThreadBlocker.countDown();
}).start();
// Lock zk event processing to simulate a long backlog queue.
((ZkClient) manager.getZkClient()).getEventLock().lockInterruptibly();
try {
// 2. S0 expired, new session S1 created
ZkTestHelper.asyncExpireSession(manager.getZkClient());
// 3. S1 spends a long time resetting handlers during this period.
// Wait and verify the zookeeper is closed.
Assert.assertTrue(TestHelper.verify(
() -> !((ZkClient) manager.getZkClient()).getConnection().getZookeeperState().isAlive(),
3000L));
} finally {
// Unlock to start processing event again.
((ZkClient) manager.getZkClient()).getEventLock().unlock();
}
// Wait for S1 completing resetting handlers.
mainThreadBlocker.await();
// 5. S1 completes resetting handlers, live instance should not be created by the expired S1
Assert.assertNull(accessor.getProperty(keyBuilder.liveInstance(instanceName)));
// Notify helper thread to proceed S2.
helperThreadBlocker.countDown();
// Wait for live instance being created by the new session S2.
mainThreadBlocker.await();
// From now on, the live instance is already created by S2.
// The latest(the final new one S2) session id that is valid.
final String latestSessionId = ZKUtil.toHexSessionId(manager.getZkClient().getSessionId());
Assert.assertTrue(TestHelper.verify(() -> {
// Newly created live instance should be created by the latest session
// and have a new creation time.
LiveInstance newLiveInstance = accessor.getProperty(keyBuilder.liveInstance(instanceName));
return newLiveInstance != null
&& newLiveInstance.getStat().getCreationTime() != originalCreationTime && newLiveInstance
.getEphemeralOwner().equals(latestSessionId);
}, 2000L));
// All the callback handlers shall be recovered.
Assert.assertTrue(TestHelper.verify(() -> manager.getHandlers().size() == handlerCount, 1000L));
Assert.assertTrue(TestHelper
.verify(() -> manager.getHandlers().stream().allMatch(CallbackHandler::isReady), 3000L));
// Clean up.
manager.disconnect();
deleteCluster(clusterName);
}
class MockLiveInstanceChangeListener implements LiveInstanceChangeListener {
private final HelixManager _manager;
private final Set<String> _expectedLiveInstances;
public MockLiveInstanceChangeListener(HelixManager manager,
Set<String> expectedLiveInstanceNames) {
_manager = manager;
_expectedLiveInstances = expectedLiveInstanceNames;
}
@Override
public void onLiveInstanceChange(List<LiveInstance> liveInstances,
NotificationContext changeContext) {
if (changeContext.getType() != NotificationContext.Type.FINALIZE) {
for (LiveInstance liveInstance : liveInstances) {
if (_expectedLiveInstances.contains(liveInstance.getInstanceName())) {
try {
_manager.addCurrentStateChangeListener(
(CurrentStateChangeListener) (instanceName, statesInfo, currentStateChangeContext) -> {
// empty callback
}, liveInstance.getInstanceName(), liveInstance.getEphemeralOwner());
} catch (Exception e) {
throw new HelixException("Unexpected exception in the test method.", e);
}
}
}
}
}
}
@Test
public void testConcurrentInitCallbackHandlers() throws Exception {
final String clusterName =
CLUSTER_PREFIX + "_" + _className + "_" + TestHelper.getTestMethodName();
TestHelper.setupEmptyCluster(_gZkClient, clusterName);
final String spectatorName = TestHelper.getTestMethodName() + "Spectator";
try {
BlockingHandleNewSessionZkHelixManager helixManager =
new BlockingHandleNewSessionZkHelixManager(clusterName, spectatorName,
InstanceType.SPECTATOR, _gZkClient.getServers());
helixManager.connect();
// Add two mock listeners that will add more callback handlers while handling INIT or CALLBACK event.
// Note that we have to test with 2 separate listeners so one of them has a chance to fail if
// there is a concurrent modification exception.
helixManager.addLiveInstanceChangeListener(
new MockLiveInstanceChangeListener(helixManager, Collections.singleton("localhost_1")));
helixManager.addLiveInstanceChangeListener(
new MockLiveInstanceChangeListener(helixManager, Collections.singleton("localhost_2")));
// Session expire will trigger all callbacks to be init. And the injected liveInstance
// listener will trigger more callbackhandlers to be registered during the init process.
ZkTestHelper.asyncExpireSession(helixManager.getZkClient());
// Create mock live instance znodes to trigger the internal callback handling logic which will
// modify the handler list.
setupLiveInstances(clusterName, new int[] { 1, 2 });
// Start new session handling so the manager will call the initHandler() for initializing all
// existing handlers.
helixManager.proceedNewSessionHandling();
// Ensure the new session has been processed.
TestHelper.verify(() -> helixManager.getHandleNewSessionEndTime() != 0, 3000);
// Verify that both new mock current state callback handlers have been initialized normally.
// Note that if there is concurrent modification that cause errors, one of the callback will
// not be initialized normally.
for (CallbackHandler handler : helixManager.getHandlers()) {
Assert.assertTrue(handler.isReady(),
"CallbackHandler is not initialized as expected. It might be caused by a ConcurrentModificationException");
}
} finally {
TestHelper.dropCluster(clusterName, _gZkClient);
}
}
static class BlockingHandleNewSessionZkHelixManager extends ZKHelixManager {
private final Semaphore newSessionHandlingCount = new Semaphore(1);
private long handleNewSessionStartTime = 0L;
private long handleNewSessionEndTime = 0L;
public BlockingHandleNewSessionZkHelixManager(String clusterName, String instanceName,
InstanceType instanceType, String zkAddress) {
super(clusterName, instanceName, instanceType, zkAddress);
}
@Override
public void handleNewSession(final String sessionId) throws Exception {
newSessionHandlingCount.acquire();
handleNewSessionStartTime = System.currentTimeMillis();
super.handleNewSession(sessionId);
handleNewSessionEndTime = System.currentTimeMillis();
}
void proceedNewSessionHandling() {
handleNewSessionStartTime = 0L;
handleNewSessionEndTime = 0L;
newSessionHandlingCount.release();
}
List<CallbackHandler> getHandlers() {
return _handlers;
}
RealmAwareZkClient getZkClient() {
return _zkclient;
}
long getHandleNewSessionStartTime() {
return handleNewSessionStartTime;
}
long getHandleNewSessionEndTime() {
return handleNewSessionEndTime;
}
}
/*
* A ZkHelixManager that simulates long time cost in resetting handlers.
*/
static class BlockingResetHandlersZkHelixManager extends ZKHelixManager {
private final Semaphore resetHandlersSemaphore = new Semaphore(1);
private long resetHandlersStartTime = 0L;
public BlockingResetHandlersZkHelixManager(String clusterName, String instanceName,
InstanceType instanceType, String zkAddress) {
super(clusterName, instanceName, instanceType, zkAddress);
}
@Override
void resetHandlers(boolean isShutdown) {
resetHandlersStartTime = System.currentTimeMillis();
try {
if (!isShutdown) {
resetHandlersSemaphore.tryAcquire(20L, TimeUnit.SECONDS);
}
} catch (InterruptedException ignored) {
// Ignore the exception.
}
super.resetHandlers(isShutdown);
}
void proceedResetHandlers() {
resetHandlersSemaphore.release();
}
List<CallbackHandler> getHandlers() {
return _handlers;
}
RealmAwareZkClient getZkClient() {
return _zkclient;
}
long getResetHandlersStartTime() {
return resetHandlersStartTime;
}
}
}
| 9,805 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestZkCacheAsyncOpSingleThread.java | package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import org.apache.helix.AccessOption;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.TestHelper;
import org.apache.helix.ZkTestHelper;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.datamodel.ZNRecordUpdater;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.impl.factory.SharedZkClientFactory;
import org.apache.helix.zookeeper.zkclient.DataUpdater;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestZkCacheAsyncOpSingleThread extends ZkUnitTestBase {
@Test
public void testSessionExpirationWithSharedZkClient() throws Exception {
int curstateCnt = 10;
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
// init external base data accessor
HelixZkClient sharedZkclient = SharedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(ZK_ADDR));
sharedZkclient.setZkSerializer(new ZNRecordSerializer());
ZkBaseDataAccessor<ZNRecord> sharedBaseAccessor = new ZkBaseDataAccessor<>(sharedZkclient);
// init zkCacheBaseDataAccessor
String curStatePath = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901");
String extViewPath = PropertyPathBuilder.externalView(clusterName);
ZkBaseDataAccessor<ZNRecord> extBaseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
extBaseAccessor.create(curStatePath, null, AccessOption.PERSISTENT);
List<String> zkCacheInitPaths = Arrays.asList(curStatePath, extViewPath);
ZkCacheBaseDataAccessor<ZNRecord> accessor =
new ZkCacheBaseDataAccessor<>(sharedBaseAccessor, null, null, zkCacheInitPaths);
boolean ret =
TestHelper.verifyZkCache(zkCacheInitPaths, accessor._zkCache._cache, _gZkClient, true);
Assert.assertTrue(ret, "zkCache doesn't match data on Zk");
// create 10 current states using external base accessor
List<String> paths = new ArrayList<>();
List<ZNRecord> records = new ArrayList<>();
for (int i = 0; i < curstateCnt; i++) {
String path = PropertyPathBuilder
.instanceCurrentState(clusterName, "localhost_8901", "session_0", "TestDB" + i);
ZNRecord record = new ZNRecord("TestDB" + i);
paths.add(path);
records.add(record);
}
boolean[] success = extBaseAccessor.createChildren(paths, records, AccessOption.PERSISTENT);
for (int i = 0; i < curstateCnt; i++) {
Assert.assertTrue(success[i], "Should succeed in create: " + paths.get(i));
}
TestHelper.verifyWithTimeout("verifyZkCache", 5000, zkCacheInitPaths, accessor._zkCache._cache,
_gZkClient, true);
// dup shared ZkClient
HelixZkClient dupZkclient = SharedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(ZK_ADDR));
// kill the session to make sure shared zkClient re-installs watcher
final long sessionId = dupZkclient.getSessionId();
ZkTestHelper.asyncExpireSession(dupZkclient);
ret = TestHelper.verify(() -> {
long curSessionId = dupZkclient.getSessionId();
return curSessionId != sessionId && curSessionId != 0;
}, 10000);
Assert.assertTrue(ret, "kill session timed out!");
// kill the session one more time to cover code path ZkClient resetting flag that
// indicates first time synconnect happened.
final long sessionId1 = dupZkclient.getSessionId();
ZkTestHelper.asyncExpireSession(dupZkclient);
ret = TestHelper.verify(() -> {
long curSessionId = dupZkclient.getSessionId();
return curSessionId != sessionId1 && curSessionId != 0;
}, 10000);
Assert.assertTrue(ret, "kill session second time timed out!");
// remove the currentstates
paths.clear();
for (int i = 0; i < curstateCnt; i++) {
String path = PropertyPathBuilder
.instanceCurrentState(clusterName, "localhost_8901", "session_0", "TestDB" + i);
paths.add(path);
}
success = extBaseAccessor.remove(paths, 0);
for (int i = 0; i < curstateCnt; i++) {
Assert.assertTrue(success[i], "Should succeed in remove:" + paths.get(i));
}
TestHelper.verifyWithTimeout("verifyZkCache", 5000, zkCacheInitPaths, accessor._zkCache._cache,
_gZkClient, true);
}
@Test
public void testHappyPathExtOpZkCacheBaseDataAccessor() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
// init external base data accessor
HelixZkClient extZkclient = SharedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(ZK_ADDR));
extZkclient.setZkSerializer(new ZNRecordSerializer());
ZkBaseDataAccessor<ZNRecord> extBaseAccessor = new ZkBaseDataAccessor<>(extZkclient);
// init zkCacheBaseDataAccessor
String curStatePath = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901");
String extViewPath = PropertyPathBuilder.externalView(clusterName);
ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
extBaseAccessor.create(curStatePath, null, AccessOption.PERSISTENT);
List<String> zkCacheInitPaths = Arrays.asList(curStatePath, extViewPath);
ZkCacheBaseDataAccessor<ZNRecord> accessor =
new ZkCacheBaseDataAccessor<>(baseAccessor, null, null, zkCacheInitPaths);
// TestHelper.printCache(accessor._zkCache);
boolean ret =
TestHelper.verifyZkCache(zkCacheInitPaths, accessor._zkCache._cache, _gZkClient, true);
Assert.assertTrue(ret, "zkCache doesn't match data on Zk");
// create 10 current states using external base accessor
List<String> paths = new ArrayList<>();
List<ZNRecord> records = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String path = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901",
"session_0", "TestDB" + i);
ZNRecord record = new ZNRecord("TestDB" + i);
paths.add(path);
records.add(record);
}
boolean[] success = extBaseAccessor.createChildren(paths, records, AccessOption.PERSISTENT);
for (int i = 0; i < 10; i++) {
Assert.assertTrue(success[i], "Should succeed in create: " + paths.get(i));
}
// wait zkEventThread update zkCache
// verify wtCache
ret = TestHelper.verify(() -> {
return TestHelper.verifyZkCache(zkCacheInitPaths, accessor._zkCache._cache, _gZkClient, true);
}, 30 * 100);
Assert.assertTrue(ret, "zkCache doesn't match data on Zk");
// update each current state 10 times by external base accessor
List<DataUpdater<ZNRecord>> updaters = new ArrayList<>();
for (int j = 0; j < 10; j++) {
paths.clear();
updaters.clear();
for (int i = 0; i < 10; i++) {
String path = curStatePath + "/session_0/TestDB" + i;
ZNRecord newRecord = new ZNRecord("TestDB" + i);
newRecord.setSimpleField("" + j, "" + j);
DataUpdater<ZNRecord> updater = new ZNRecordUpdater(newRecord);
paths.add(path);
updaters.add(updater);
}
success = extBaseAccessor.updateChildren(paths, updaters, AccessOption.PERSISTENT);
for (int i = 0; i < 10; i++) {
Assert.assertTrue(success[i], "Should succeed in update: " + paths.get(i));
}
}
// wait zkEventThread update zkCache
Thread.sleep(100);
// verify cache
// TestHelper.printCache(accessor._zkCache);
ret = TestHelper.verifyZkCache(zkCacheInitPaths, accessor._zkCache._cache, _gZkClient, true);
Assert.assertTrue(ret, "zkCache doesn't match data on Zk");
// set 10 external views by external accessor
paths.clear();
records.clear();
for (int i = 0; i < 10; i++) {
String path = PropertyPathBuilder.externalView(clusterName, "TestDB" + i);
ZNRecord record = new ZNRecord("TestDB" + i);
paths.add(path);
records.add(record);
}
success = extBaseAccessor.setChildren(paths, records, AccessOption.PERSISTENT);
for (int i = 0; i < 10; i++) {
Assert.assertTrue(success[i], "Should succeed in set: " + paths.get(i));
}
// wait zkEventThread update zkCache
Thread.sleep(100);
// verify cache
// TestHelper.printCache(accessor._zkCache._cache);
ret = TestHelper.verifyZkCache(zkCacheInitPaths, accessor._zkCache._cache, _gZkClient, true);
Assert.assertTrue(ret, "zkCache doesn't match data on Zk");
// remove 10 external views by external accessor
paths.clear();
for (int i = 0; i < 10; i++) {
String path = PropertyPathBuilder.externalView(clusterName, "TestDB" + i);
paths.add(path);
}
success = extBaseAccessor.remove(paths, 0);
for (int i = 0; i < 10; i++) {
Assert.assertTrue(success[i], "Should succeed in remove: " + paths.get(i));
}
// wait zkEventThread update zkCache
Thread.sleep(100);
// verify cache
// TestHelper.printCache(accessor._zkCache._cache);
ret = TestHelper.verifyZkCache(zkCacheInitPaths, accessor._zkCache._cache, _gZkClient, true);
Assert.assertTrue(ret, "zkCache doesn't match data on Zk");
// clean up
extZkclient.close();
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testHappyPathSelfOpZkCacheBaseDataAccessor() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
// init zkCacheDataAccessor
String curStatePath = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901");
String extViewPath = PropertyPathBuilder.externalView(clusterName);
ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
baseAccessor.create(curStatePath, null, AccessOption.PERSISTENT);
List<String> zkCacheInitPaths = Arrays.asList(curStatePath, extViewPath);
ZkCacheBaseDataAccessor<ZNRecord> accessor =
new ZkCacheBaseDataAccessor<>(baseAccessor, null, null, zkCacheInitPaths);
// TestHelper.printCache(accessor._zkCache._cache);
boolean ret =
TestHelper.verifyZkCache(zkCacheInitPaths, accessor._zkCache._cache, _gZkClient, true);
Assert.assertTrue(ret, "zkCache doesn't match data on Zk");
// create 10 current states using this accessor
List<String> paths = new ArrayList<>();
List<ZNRecord> records = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String path = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901",
"session_0", "TestDB" + i);
ZNRecord record = new ZNRecord("TestDB" + i);
paths.add(path);
records.add(record);
}
boolean[] success = accessor.createChildren(paths, records, AccessOption.PERSISTENT);
for (int i = 0; i < 10; i++) {
Assert.assertTrue(success[i], "Should succeed in create: " + paths.get(i));
}
// verify cache
// TestHelper.printCache(accessor._zkCache._cache);
ret = TestHelper.verifyZkCache(zkCacheInitPaths, accessor._zkCache._cache, _gZkClient, false);
Assert.assertTrue(ret, "zkCache doesn't match data on Zk");
// update each current state 10 times by this accessor
List<DataUpdater<ZNRecord>> updaters = new ArrayList<>();
for (int j = 0; j < 10; j++) {
paths.clear();
updaters.clear();
for (int i = 0; i < 10; i++) {
String path = curStatePath + "/session_0/TestDB" + i;
ZNRecord newRecord = new ZNRecord("TestDB" + i);
newRecord.setSimpleField("" + j, "" + j);
DataUpdater<ZNRecord> updater = new ZNRecordUpdater(newRecord);
paths.add(path);
updaters.add(updater);
}
success = accessor.updateChildren(paths, updaters, AccessOption.PERSISTENT);
for (int i = 0; i < 10; i++) {
Assert.assertTrue(success[i], "Should succeed in update: " + paths.get(i));
}
}
// verify cache
// TestHelper.printCache(accessor._zkCache._cache);
ret = TestHelper.verifyZkCache(zkCacheInitPaths, zkCacheInitPaths, accessor._zkCache._cache,
_gZkClient, true);
// ret = TestHelper.verifyZkCache(zkCacheInitPaths, accessor, _gZkClient, true);
// System.out.println("ret: " + ret);
Assert.assertTrue(ret, "zkCache doesn't match data on Zk");
// set 10 external views 10 times by this accessor
paths.clear();
records.clear();
for (int j = 0; j < 10; j++) {
for (int i = 0; i < 10; i++) {
String path = PropertyPathBuilder.externalView(clusterName, "TestDB" + i);
ZNRecord record = new ZNRecord("TestDB" + i);
record.setSimpleField("setKey", "" + j);
paths.add(path);
records.add(record);
}
success = accessor.setChildren(paths, records, AccessOption.PERSISTENT);
for (int i = 0; i < 10; i++) {
Assert.assertTrue(success[i], "Should succeed in set: " + paths.get(i));
}
}
// verify cache
// TestHelper.printCache(accessor._zkCache._cache);
ret = TestHelper.verifyZkCache(zkCacheInitPaths, accessor._zkCache._cache, _gZkClient, true);
// System.out.println("ret: " + ret);
Assert.assertTrue(ret, "zkCache doesn't match data on Zk");
// get 10 external views
paths.clear();
records.clear();
for (int i = 0; i < 10; i++) {
String path = extViewPath + "/TestDB" + i;
paths.add(path);
}
records = accessor.get(paths, null, 0, true);
for (int i = 0; i < 10; i++) {
Assert.assertEquals(records.get(i).getId(), "TestDB" + i);
}
// getChildren
records.clear();
records = accessor.getChildren(extViewPath, null, 0, 0, 0);
for (int i = 0; i < 10; i++) {
Assert.assertEquals(records.get(i).getId(), "TestDB" + i);
}
// // exists
paths.clear();
for (int i = 0; i < 10; i++) {
String path = curStatePath + "/session_0/TestDB" + i;
// // PropertyPathConfig.getPath(PropertyType.CURRENTSTATES,
// // clusterName,
// // "localhost_8901",
// // "session_0",
// // "TestDB" + i);
paths.add(path);
}
success = accessor.exists(paths, 0);
for (int i = 0; i < 10; i++) {
Assert.assertTrue(success[i], "Should exits: " + paths.get(i));
}
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
}
| 9,806 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestZKUtil.java | package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.TestHelper;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.AssertJUnit;
import org.testng.annotations.AfterClass;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestZKUtil extends ZkUnitTestBase {
private static Logger LOG = LoggerFactory.getLogger(TestZKUtil.class);
String clusterName = CLUSTER_PREFIX + "_" + getShortClassName();
@BeforeClass()
public void beforeClass() throws Exception {
boolean result = ZKUtil.isClusterSetup(clusterName, _gZkClient);
AssertJUnit.assertFalse(result);
result = ZKUtil.isClusterSetup(null, _gZkClient);
AssertJUnit.assertFalse(result);
result = ZKUtil.isClusterSetup(null, (HelixZkClient) null);
AssertJUnit.assertFalse(result);
result = ZKUtil.isClusterSetup(clusterName, (HelixZkClient) null);
AssertJUnit.assertFalse(result);
TestHelper.setupEmptyCluster(_gZkClient, clusterName);
}
@AfterClass()
public void afterClass() {
deleteCluster(clusterName);
}
@AfterMethod()
public void afterMethod() {
String path = PropertyPathBuilder.instanceConfig(clusterName);
_gZkClient.deleteRecursively(path);
_gZkClient.createPersistent(path);
}
@Test()
public void testIsClusterSetup() {
boolean result = ZKUtil.isClusterSetup(clusterName, _gZkClient);
AssertJUnit.assertTrue(result);
}
@Test()
public void testChildrenOperations() {
List<ZNRecord> list = new ArrayList<ZNRecord>();
list.add(new ZNRecord("id1"));
list.add(new ZNRecord("id2"));
String path = PropertyPathBuilder.instanceConfig(clusterName);
ZKUtil.createChildren(_gZkClient, path, list);
list = ZKUtil.getChildren(_gZkClient, path);
AssertJUnit.assertEquals(2, list.size());
ZKUtil.dropChildren(_gZkClient, path, list);
ZKUtil.dropChildren(_gZkClient, path, new ZNRecord("id1"));
list = ZKUtil.getChildren(_gZkClient, path);
AssertJUnit.assertEquals(0, list.size());
ZKUtil.dropChildren(_gZkClient, path, (List<ZNRecord>) null);
}
@Test()
public void testUpdateIfExists() {
String path = PropertyPathBuilder.instanceConfig(clusterName, "id3");
ZNRecord record = new ZNRecord("id4");
ZKUtil.updateIfExists(_gZkClient, path, record, false);
AssertJUnit.assertFalse(_gZkClient.exists(path));
_gZkClient.createPersistent(path);
ZKUtil.updateIfExists(_gZkClient, path, record, false);
AssertJUnit.assertTrue(_gZkClient.exists(path));
record = _gZkClient.readData(path);
AssertJUnit.assertEquals("id4", record.getId());
}
@Test()
public void testSubtract() {
String path = PropertyPathBuilder.instanceConfig(clusterName, "id5");
ZNRecord record = new ZNRecord("id5");
record.setSimpleField("key1", "value1");
_gZkClient.createPersistent(path, record);
ZKUtil.subtract(_gZkClient, path, record);
record = _gZkClient.readData(path);
AssertJUnit.assertNull(record.getSimpleField("key1"));
}
@Test()
public void testNullChildren() {
String path = PropertyPathBuilder.instanceConfig(clusterName, "id6");
ZKUtil.createChildren(_gZkClient, path, (List<ZNRecord>) null);
}
@Test()
public void testCreateOrMerge() {
String path = PropertyPathBuilder.instanceConfig(clusterName, "id7");
ZNRecord record = new ZNRecord("id7");
List<String> list = Arrays.asList("value1");
record.setListField("list", list);
ZKUtil.createOrMerge(_gZkClient, path, record, true, true);
record = _gZkClient.readData(path);
AssertJUnit.assertEquals(list, record.getListField("list"));
record = new ZNRecord("id7");
List<String> list2 = Arrays.asList("value2");
record.setListField("list", list2);
ZKUtil.createOrMerge(_gZkClient, path, record, true, true);
record = _gZkClient.readData(path);
AssertJUnit.assertEquals(Arrays.asList("value1", "value2"), record.getListField("list"));
Map<String, String> map = new HashMap<String, String>() {{
put("k1", "v1");
}};
record.setMapField("map", map);
ZKUtil.createOrMerge(_gZkClient, path, record, true, true);
record = _gZkClient.readData(path);
AssertJUnit.assertEquals(map, record.getMapField("map"));
record = new ZNRecord("id7");
Map<String, String> map2 = new HashMap<String, String>() {{
put("k2", "v2");
}};
record.setMapField("map", map2);
ZKUtil.createOrMerge(_gZkClient, path, record, true, true);
record = _gZkClient.readData(path);
AssertJUnit.assertEquals(new HashMap<String, String>() {{
put("k1", "v1");
put("k2", "v2");
}}, record.getMapField("map"));
}
@Test()
public void testCreateOrReplace() {
String path = PropertyPathBuilder.instanceConfig(clusterName, "id8");
ZNRecord record = new ZNRecord("id8");
ZKUtil.createOrReplace(_gZkClient, path, record, true);
record = _gZkClient.readData(path);
AssertJUnit.assertEquals("id8", record.getId());
record = new ZNRecord("id9");
ZKUtil.createOrReplace(_gZkClient, path, record, true);
record = _gZkClient.readData(path);
AssertJUnit.assertEquals("id9", record.getId());
}
@Test()
public void testCreateOrUpdate() {
String path = PropertyPathBuilder.instanceConfig(clusterName, "id9");
ZNRecord record = new ZNRecord("id9");
ZKUtil.createOrMerge(_gZkClient, path, record, true, true);
record = _gZkClient.readData(path);
AssertJUnit.assertEquals("id9", record.getId());
record = new ZNRecord("id9");
List<String> list = Arrays.asList("value1", "value2");
record.setListField("list", list);
ZKUtil.createOrUpdate(_gZkClient, path, record, true, true);
record = _gZkClient.readData(path);
AssertJUnit.assertEquals(list, record.getListField("list"));
record = new ZNRecord("id9");
List<String> list2 = Arrays.asList("value3", "value4");
record.setListField("list", list2);
ZKUtil.createOrUpdate(_gZkClient, path, record, true, true);
record = _gZkClient.readData(path);
AssertJUnit.assertEquals(list2, record.getListField("list"));
Map<String, String> map = new HashMap<String, String>() {{
put("k1", "v1");
}};
record.setMapField("map", map);
ZKUtil.createOrUpdate(_gZkClient, path, record, true, true);
record = _gZkClient.readData(path);
AssertJUnit.assertEquals(map, record.getMapField("map"));
record = new ZNRecord("id9");
Map<String, String> map2 = new HashMap<String, String>() {{
put("k2", "v2");
}};
record.setMapField("map", map2);
ZKUtil.createOrUpdate(_gZkClient, path, record, true, true);
record = _gZkClient.readData(path);
AssertJUnit.assertEquals(new HashMap<String, String>() {{
put("k2", "v2");
}}, record.getMapField("map"));
}
}
| 9,807 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestZkFlapping.java | package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Date;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.PropertyKey;
import org.apache.helix.SystemPropertyKeys;
import org.apache.helix.TestHelper;
import org.apache.helix.ZkTestHelper;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.manager.zk.zookeeper.IZkStateListener;
import org.apache.helix.model.LiveInstance;
import org.apache.zookeeper.Watcher.Event.KeeperState;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestZkFlapping extends ZkUnitTestBase {
private final int _disconnectThreshold = 5;
class ZkStateCountListener implements IZkStateListener {
int count = 0;
@Override
public void handleStateChanged(KeeperState state) {
if (state == KeeperState.Disconnected) {
count++;
}
}
@Override
public void handleNewSession(final String sessionId) {
}
@Override
public void handleSessionEstablishmentError(Throwable var1) {
}
}
@Test
public void testParticipantFlapping() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
final HelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
final PropertyKey.Builder keyBuilder = accessor.keyBuilder();
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
System.setProperty(SystemPropertyKeys.MAX_DISCONNECT_THRESHOLD,
Integer.toString(_disconnectThreshold));
try {
TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
"localhost", // participant name prefix
"TestDB", // resource name prefix
1, // resources
32, // partitions per resource
1, // number of nodes
1, // replicas
"MasterSlave", false);
final String instanceName = "localhost_12918";
MockParticipantManager participant =
new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participant.syncStart();
final ZkClient client = (ZkClient) participant.getZkClient();
final ZkStateCountListener listener = new ZkStateCountListener();
client.subscribeStateChanges(listener);
final AtomicInteger expectDisconnectCnt = new AtomicInteger(0);
final int n = _disconnectThreshold;
for (int i = 0; i < _disconnectThreshold; i++) {
String oldSessionId = ZkTestHelper.getSessionId(client);
ZkTestHelper.simulateZkStateReconnected(client);
expectDisconnectCnt.incrementAndGet();
// wait until we get invoked by zk state change to disconnected
TestHelper.verify(() -> listener.count == expectDisconnectCnt.get(), 30 * 1000);
String newSessionId = ZkTestHelper.getSessionId(client);
Assert.assertEquals(newSessionId, oldSessionId);
}
client.unsubscribeStateChanges(listener);
// make sure participant is NOT disconnected
LiveInstance liveInstance = accessor.getProperty(keyBuilder.liveInstance(instanceName));
Assert.assertNotNull(liveInstance, "Live-instance should exist after " + n + " disconnects");
// trigger flapping
ZkTestHelper.simulateZkStateReconnected(client);
// wait until we get invoked by zk state change to disconnected
boolean success = TestHelper.verify(client::getShutdownTrigger, 30 * 1000);
Assert.assertTrue(success,
"The " + (n + 1) + "th disconnect event should trigger ZkHelixManager#disonnect");
// make sure participant is disconnected
success = TestHelper.verify(() -> {
LiveInstance liveInstance1 = accessor.getProperty(keyBuilder.liveInstance(instanceName));
return liveInstance1 == null;
}, 3 * 1000);
Assert.assertTrue(success, "Live-instance should be gone after " + (n + 1) + " disconnects");
participant.syncStop();
} finally {
System.clearProperty(SystemPropertyKeys.MAX_DISCONNECT_THRESHOLD);
}
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testControllerFlapping() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
final HelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
final PropertyKey.Builder keyBuilder = accessor.keyBuilder();
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
System.setProperty(SystemPropertyKeys.MAX_DISCONNECT_THRESHOLD,
Integer.toString(_disconnectThreshold));
try {
TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
"localhost", // participant name prefix
"TestDB", // resource name prefix
1, // resources
32, // partitions per resource
1, // number of nodes
1, // replicas
"MasterSlave", false);
ClusterControllerManager controller =
new ClusterControllerManager(ZK_ADDR, clusterName, "controller");
controller.syncStart();
final ZkClient client = (ZkClient) controller.getZkClient();
final ZkStateCountListener listener = new ZkStateCountListener();
client.subscribeStateChanges(listener);
final AtomicInteger expectDisconnectCnt = new AtomicInteger(0);
final int n = _disconnectThreshold;
for (int i = 0; i < n; i++) {
String oldSessionId = ZkTestHelper.getSessionId(client);
ZkTestHelper.simulateZkStateReconnected(client);
expectDisconnectCnt.incrementAndGet();
// wait until we get invoked by zk state change to disconnected
TestHelper.verify(() -> listener.count == expectDisconnectCnt.get(), 30 * 1000);
String newSessionId = ZkTestHelper.getSessionId(client);
Assert.assertEquals(newSessionId, oldSessionId);
}
// make sure controller is NOT disconnected
LiveInstance leader = accessor.getProperty(keyBuilder.controllerLeader());
Assert.assertNotNull(leader, "Leader should exist after " + n + " disconnects");
// trigger flapping
ZkTestHelper.simulateZkStateReconnected(client);
// wait until we get invoked by zk state change to disconnected
boolean success = TestHelper.verify(client::getShutdownTrigger, 30 * 1000);
Assert.assertTrue(success,
"The " + (n + 1) + "th disconnect event should trigger ZkHelixManager#disonnect");
// make sure controller is disconnected
success = TestHelper.verify(() -> {
LiveInstance leader1 = accessor.getProperty(keyBuilder.controllerLeader());
return leader1 == null;
}, 5 * 1000);
Assert.assertTrue(success, "Leader should be gone after " + (n + 1) + " disconnects");
controller.syncStop();
} finally {
System.clearProperty(SystemPropertyKeys.MAX_DISCONNECT_THRESHOLD);
}
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
}
| 9,808 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestZkBaseDataAccessor.java | package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.stream.Collectors;
import com.google.common.collect.ImmutableList;
import org.apache.helix.AccessOption;
import org.apache.helix.BaseDataAccessor;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.TestHelper;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.api.client.RealmAwareZkClient;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.datamodel.ZNRecordUpdater;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.manager.zk.ZkBaseDataAccessor.AccessResult;
import org.apache.helix.manager.zk.ZkBaseDataAccessor.RetCode;
import org.apache.helix.zookeeper.exception.ZkClientException;
import org.apache.helix.zookeeper.zkclient.DataUpdater;
import org.apache.helix.zookeeper.zkclient.exception.ZkBadVersionException;
import org.apache.helix.zookeeper.zkclient.exception.ZkException;
import org.apache.helix.zookeeper.zkclient.exception.ZkMarshallingError;
import org.apache.helix.zookeeper.zkclient.serialize.ZkSerializer;
import org.apache.zookeeper.data.Stat;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
public class TestZkBaseDataAccessor extends ZkUnitTestBase {
// serialize/deserialize integer list to byte array
private static final ZkSerializer LIST_SERIALIZER = new ZkSerializer() {
@Override
public byte[] serialize(Object o)
throws ZkMarshallingError {
List<Integer> list = (List<Integer>) o;
return list.stream().map(String::valueOf).collect(Collectors.joining(","))
.getBytes();
}
@Override
public Object deserialize(byte[] bytes)
throws ZkMarshallingError {
String string = new String(bytes);
return Arrays.stream(string.split(",")).map(Integer::valueOf)
.collect(Collectors.toList());
}
};
String _rootPath = TestHelper.getTestClassName();
@AfterMethod
public void afterMethod() {
String path = "/" + _rootPath;
if (_gZkClient.exists(path)) {
_gZkClient.deleteRecursively(path);
}
}
@Test
public void testSyncSet() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String testName = className + "_" + methodName;
System.out.println("START " + testName + " at " + new Date(System.currentTimeMillis()));
String path = String.format("/%s/%s", _rootPath, "msg_0");
ZNRecord record = new ZNRecord("msg_0");
BaseDataAccessor<ZNRecord> accessor = new ZkBaseDataAccessor<ZNRecord>(_gZkClient);
boolean success = accessor.set(path, record, AccessOption.PERSISTENT);
Assert.assertTrue(success);
ZNRecord getRecord = _gZkClient.readData(path);
Assert.assertNotNull(getRecord);
Assert.assertEquals(getRecord.getId(), "msg_0");
System.out.println("END " + testName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testSyncSetWithVersion() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String testName = className + "_" + methodName;
System.out.println("START " + testName + " at " + new Date(System.currentTimeMillis()));
String path = String.format("/%s/%s", _rootPath, "msg_0");
ZNRecord record = new ZNRecord("msg_0");
BaseDataAccessor<ZNRecord> accessor = new ZkBaseDataAccessor<ZNRecord>(_gZkClient);
// set persistent
boolean success = accessor.set(path, record, 0, AccessOption.PERSISTENT);
Assert.assertFalse(success, "Should fail since version not match");
try {
_gZkClient.readData(path, false);
Assert.fail("Should get no node exception");
} catch (Exception e) {
// OK
}
success = accessor.set(path, record, -1, AccessOption.PERSISTENT);
Assert.assertTrue(success);
ZNRecord getRecord = _gZkClient.readData(path);
Assert.assertNotNull(getRecord);
Assert.assertEquals(getRecord.getId(), "msg_0");
// set ephemeral
path = String.format("/%s/%s", _rootPath, "msg_1");
record = new ZNRecord("msg_1");
success = accessor.set(path, record, 0, AccessOption.EPHEMERAL);
Assert.assertFalse(success);
try {
_gZkClient.readData(path, false);
Assert.fail("Should get no node exception");
} catch (Exception e) {
// OK
}
success = accessor.set(path, record, -1, AccessOption.EPHEMERAL);
Assert.assertTrue(success);
getRecord = _gZkClient.readData(path);
Assert.assertNotNull(getRecord);
Assert.assertEquals(getRecord.getId(), "msg_1");
record.setSimpleField("key0", "value0");
success = accessor.set(path, record, 0, AccessOption.PERSISTENT);
Assert.assertTrue(success, "Should pass. AccessOption.PERSISTENT is ignored");
getRecord = _gZkClient.readData(path);
Assert.assertNotNull(getRecord);
Assert.assertEquals(getRecord.getSimpleFields().size(), 1);
Assert.assertNotNull(getRecord.getSimpleField("key0"));
Assert.assertEquals(getRecord.getSimpleField("key0"), "value0");
System.out.println("END " + testName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testSyncDoSet() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String testName = className + "_" + methodName;
System.out.println("START " + testName + " at " + new Date(System.currentTimeMillis()));
String path = String.format("/%s/%s/%s", _rootPath, "msg_0", "submsg_0");
ZNRecord record = new ZNRecord("submsg_0");
ZkBaseDataAccessor<ZNRecord> accessor = new ZkBaseDataAccessor<ZNRecord>(_gZkClient);
AccessResult result = accessor.doSet(path, record, -1, AccessOption.PERSISTENT);
Assert.assertEquals(result._retCode, RetCode.OK);
Assert.assertEquals(result._pathCreated.size(), 3);
Assert.assertTrue(result._pathCreated.contains(String.format("/%s/%s", _rootPath, "msg_0")));
Assert.assertTrue(result._pathCreated.contains(path));
Assert.assertTrue(_gZkClient.exists(String.format("/%s/%s", _rootPath, "msg_0")));
ZNRecord getRecord = _gZkClient.readData(path);
Assert.assertNotNull(getRecord);
Assert.assertEquals(getRecord.getId(), "submsg_0");
System.out.println("END " + testName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testDoSetWithException() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String testName = className + "_" + methodName;
System.out.println("START " + testName + " at " + new Date(System.currentTimeMillis()));
String path = String.format("/%s/%s/%s", _rootPath, "msg_0", "submsg_0");
ZNRecord record = new ZNRecord("submsg_0");
ZkBaseDataAccessor<ZNRecord> accessor = new ZkBaseDataAccessor<ZNRecord>(_gZkClient);
AccessResult result = accessor.doSet(path, record, -1, AccessOption.PERSISTENT);
ZNRecord getRecord = _gZkClient.readData(path);
// create mock spy for _gZkClient
HelixZkClient mockZkClient = Mockito.spy(_gZkClient);
// mock so that _gZkClient throws ZkBadVersionException
Mockito.doThrow(new ZkBadVersionException(""))
.when(mockZkClient).writeDataGetStat(Mockito.anyString(), Mockito.any(), Mockito.anyInt());
try {
accessor.doSet(path, record, getRecord.getVersion(), AccessOption.PERSISTENT);
} catch (ZkBadVersionException e) {
// OK
}
System.out.println("END " + testName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testSyncCreate() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String testName = className + "_" + methodName;
System.out.println("START " + testName + " at " + new Date(System.currentTimeMillis()));
String path = String.format("/%s/%s", _rootPath, "msg_0");
ZNRecord record = new ZNRecord("msg_0");
ZkBaseDataAccessor<ZNRecord> accessor = new ZkBaseDataAccessor<>(_gZkClient);
boolean success = accessor.create(path, record, AccessOption.PERSISTENT);
Assert.assertTrue(success);
ZNRecord getRecord = _gZkClient.readData(path);
Assert.assertNotNull(getRecord);
Assert.assertEquals(getRecord.getId(), "msg_0");
record.setSimpleField("key0", "value0");
success = accessor.create(path, record, AccessOption.PERSISTENT);
Assert.assertFalse(success, "Should fail since node already exists");
getRecord = _gZkClient.readData(path);
Assert.assertNotNull(getRecord);
Assert.assertEquals(getRecord.getSimpleFields().size(), 0);
System.out.println("END " + testName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testSyncCreateWithTTL() {
System.setProperty("zookeeper.extendedTypesEnabled", "true");
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String testName = className + "_" + methodName;
System.out.println("START " + testName + " at " + new Date(System.currentTimeMillis()));
String path = String.format("/%s/%s", _rootPath, "msg_0");
ZNRecord record = new ZNRecord("msg_0");
ZkBaseDataAccessor<ZNRecord> accessor = new ZkBaseDataAccessor<>(_gZkClient);
boolean success = accessor.create(path, record, AccessOption.PERSISTENT_WITH_TTL);
Assert.assertFalse(success);
long ttl = 1L;
success = accessor.create(path, record, AccessOption.PERSISTENT_WITH_TTL, ttl);
Assert.assertTrue(success);
ZNRecord getRecord = _gZkClient.readData(path);
Assert.assertNotNull(getRecord);
Assert.assertEquals(getRecord.getId(), "msg_0");
record.setSimpleField("key0", "value0");
success = accessor.create(path, record, AccessOption.PERSISTENT_WITH_TTL, ttl);
Assert.assertFalse(success, "Should fail since node already exists");
getRecord = _gZkClient.readData(path);
Assert.assertNotNull(getRecord);
Assert.assertEquals(getRecord.getSimpleFields().size(), 0);
System.clearProperty("zookeeper.extendedTypesEnabled");
System.out.println("END " + testName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testSyncCreateContainer() {
System.setProperty("zookeeper.extendedTypesEnabled", "true");
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String testName = className + "_" + methodName;
System.out.println("START " + testName + " at " + new Date(System.currentTimeMillis()));
String path = String.format("/%s/%s", _rootPath, "msg_0");
ZNRecord record = new ZNRecord("msg_0");
ZkBaseDataAccessor<ZNRecord> accessor = new ZkBaseDataAccessor<>(_gZkClient);
boolean success = accessor.create(path, record, AccessOption.CONTAINER);
Assert.assertTrue(success);
ZNRecord getRecord = _gZkClient.readData(path);
Assert.assertNotNull(getRecord);
Assert.assertEquals(getRecord.getId(), "msg_0");
record.setSimpleField("key0", "value0");
success = accessor.create(path, record, AccessOption.CONTAINER);
Assert.assertFalse(success, "Should fail since node already exists");
getRecord = _gZkClient.readData(path);
Assert.assertNotNull(getRecord);
Assert.assertEquals(getRecord.getSimpleFields().size(), 0);
System.clearProperty("zookeeper.extendedTypesEnabled");
System.out.println("END " + testName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testDefaultAccessorCreateCustomData() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String testName = className + "_" + methodName;
System.out.println("START " + testName + " at " + new Date(System.currentTimeMillis()));
String path = String.format("/%s/%s", _rootPath, "msg_0");
ZkBaseDataAccessor defaultAccessor = new ZkBaseDataAccessor(ZK_ADDR);
List<Integer> l0 = ImmutableList.of(1, 2, 3);
boolean createResult = defaultAccessor.create(path, l0, AccessOption.PERSISTENT);
// The result is expected to be false because the list is not ZNRecord
Assert.assertFalse(createResult);
createResult = defaultAccessor.create(path, new ZNRecord("test"), AccessOption.PERSISTENT);
// The result is expected to be true
Assert.assertTrue(createResult);
defaultAccessor.close();
System.out.println("END " + testName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testCustomAccessorCreateZnRecord() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String testName = className + "_" + methodName;
System.out.println("START " + testName + " at " + new Date(System.currentTimeMillis()));
String path = String.format("/%s/%s", _rootPath, "msg_0");
ZkBaseDataAccessor customDataAccessor = new ZkBaseDataAccessor(ZK_ADDR, LIST_SERIALIZER);
boolean createResult = customDataAccessor.create(path, new ZNRecord("test"), AccessOption.PERSISTENT);
// The result is expected to be false because the ZnRecord is not List
Assert.assertFalse(createResult);
createResult = customDataAccessor.create(path, ImmutableList.of(1, 2, 3), AccessOption.PERSISTENT);
// The result is expected to be true
Assert.assertTrue(createResult);
customDataAccessor.close();
System.out.println("END " + testName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testSyncCreateWithCustomSerializer() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String testName = className + "_" + methodName;
System.out.println("START " + testName + " at " + new Date(System.currentTimeMillis()));
String path = String.format("/%s/%s", _rootPath, "msg_0");
ZkBaseDataAccessor<List<Integer>> accessor = new ZkBaseDataAccessor<>(ZK_ADDR, LIST_SERIALIZER);
List<Integer> l0 = ImmutableList.of(1, 2, 3);
List<Integer> l1 = ImmutableList.of(4, 5, 6);
boolean createResult = accessor.create(path, l0, AccessOption.PERSISTENT);
Assert.assertTrue(createResult);
List<Integer> data = (List<Integer>) accessor.get(path, null, AccessOption.PERSISTENT);
Assert.assertEquals(data, l0);
boolean setResult = accessor.set(path, l1, 0, AccessOption.PERSISTENT);
Assert.assertTrue(setResult);
data = (List<Integer>) accessor.get(path, null, AccessOption.PERSISTENT);
Assert.assertEquals(data, l1);
accessor.close();
System.out.println("END " + testName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testSyncUpdate() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String testName = className + "_" + methodName;
System.out.println("START " + testName + " at " + new Date(System.currentTimeMillis()));
String path = String.format("/%s/%s", _rootPath, "msg_0");
ZNRecord record = new ZNRecord("msg_0");
ZkBaseDataAccessor<ZNRecord> accessor = new ZkBaseDataAccessor<ZNRecord>(_gZkClient);
boolean success = accessor.update(path, new ZNRecordUpdater(record), AccessOption.PERSISTENT);
Assert.assertTrue(success);
ZNRecord getRecord = _gZkClient.readData(path);
Assert.assertNotNull(getRecord);
Assert.assertEquals(getRecord.getId(), "msg_0");
record.setSimpleField("key0", "value0");
success = accessor.update(path, new ZNRecordUpdater(record), AccessOption.PERSISTENT);
Assert.assertTrue(success);
getRecord = _gZkClient.readData(path);
Assert.assertNotNull(getRecord);
Assert.assertEquals(getRecord.getSimpleFields().size(), 1);
Assert.assertNotNull(getRecord.getSimpleField("key0"));
Assert.assertEquals(getRecord.getSimpleField("key0"), "value0");
// test throw exception from updater
success = accessor.update(path, new DataUpdater<ZNRecord>() {
@Override
public ZNRecord update(ZNRecord currentData) {
throw new RuntimeException("IGNORABLE: test throw exception from updater");
}
}, AccessOption.PERSISTENT);
Assert.assertFalse(success);
getRecord = _gZkClient.readData(path);
Assert.assertNotNull(getRecord);
Assert.assertEquals(getRecord.getSimpleFields().size(), 1);
System.out.println("END " + testName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testSyncRemove() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String testName = className + "_" + methodName;
System.out.println("START " + testName + " at " + new Date(System.currentTimeMillis()));
String path = String.format("/%s/%s", _rootPath, "msg_0");
ZNRecord record = new ZNRecord("msg_0");
ZkBaseDataAccessor<ZNRecord> accessor = new ZkBaseDataAccessor<ZNRecord>(_gZkClient);
// Base data accessor shall not fail when remove a non-exist path
boolean success = accessor.remove(path, 0);
Assert.assertTrue(success);
success = accessor.create(path, record, AccessOption.PERSISTENT);
Assert.assertTrue(success);
ZNRecord getRecord = _gZkClient.readData(path);
Assert.assertNotNull(getRecord);
Assert.assertEquals(getRecord.getId(), "msg_0");
// Tests that ZkClientException thrown from ZkClient should be caught
// and remove() should return false.
RealmAwareZkClient mockZkClient = Mockito.mock(RealmAwareZkClient.class);
Mockito.doThrow(new ZkException("Failed to delete " + path)).when(mockZkClient)
.delete(path);
Mockito.doThrow(new ZkClientException("Failed to recursively delete " + path)).when(mockZkClient)
.deleteRecursively(path);
ZkBaseDataAccessor<ZNRecord> accessorMock =
new ZkBaseDataAccessor<>(mockZkClient);
try {
Assert.assertFalse(accessorMock.remove(path, AccessOption.PERSISTENT),
"Should return false because ZkClientException is thrown");
} catch (ZkClientException e) {
Assert.fail("Should not throw ZkClientException because it should be caught.");
}
success = accessor.remove(path, 0);
Assert.assertTrue(success);
Assert.assertFalse(_gZkClient.exists(path));
System.out.println("END " + testName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testDeleteNodeWithChildren() {
String root = _rootPath;
ZkBaseDataAccessor<ZNRecord> accessor = new ZkBaseDataAccessor<>(_gZkClient);
// CreateChildren
List<ZNRecord> records = new ArrayList<>();
List<String> paths = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
paths.add(PropertyPathBuilder.instanceMessage(root, "host_1", msgId));
records.add(new ZNRecord(msgId));
}
boolean[] success = accessor.createChildren(paths, records, AccessOption.PERSISTENT);
// Attempt to remove parent. Shouldn't throw an error or warning log.
// Should return True if recursive deletion succeeds.
Assert.assertTrue(accessor.remove(PropertyPathBuilder.instanceMessage(root, "host_1"), 0),
"Should return True despite log errors.");
// Assert child message nodes were removed when calling remove on parent
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
String path = PropertyPathBuilder.instanceMessage(root, "host_1", msgId);
boolean pathExists = _gZkClient.exists(path);
Assert.assertFalse(pathExists, "Message znode should have been removed by accessor msgId=" + msgId);
}
}
@Test
public void testSyncGet() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String testName = className + "_" + methodName;
System.out.println("START " + testName + " at " + new Date(System.currentTimeMillis()));
String path = String.format("/%s/%s", _rootPath, "msg_0");
ZNRecord record = new ZNRecord("msg_0");
ZkBaseDataAccessor<ZNRecord> accessor = new ZkBaseDataAccessor<ZNRecord>(_gZkClient);
Stat stat = new Stat();
ZNRecord getRecord = accessor.get(path, stat, 0);
Assert.assertNull(getRecord);
try {
accessor.get(path, stat, AccessOption.THROW_EXCEPTION_IFNOTEXIST);
Assert.fail("Should throw exception if not exist");
} catch (Exception e) {
// OK
}
boolean success = accessor.create(path, record, AccessOption.PERSISTENT);
Assert.assertTrue(success);
getRecord = accessor.get(path, stat, 0);
Assert.assertNotNull(getRecord);
Assert.assertEquals(getRecord.getId(), "msg_0");
Assert.assertEquals(stat.getVersion(), 0);
record.setSimpleField("key0", "value0");
success = accessor.set(path, record, AccessOption.PERSISTENT);
Assert.assertTrue(success);
getRecord = accessor.get(path, stat, 0);
Assert.assertNotNull(getRecord);
Assert.assertEquals(record.getSimpleFields().size(), 1);
Assert.assertNotNull(getRecord.getSimpleField("key0"));
Assert.assertEquals(getRecord.getSimpleField("key0"), "value0");
Assert.assertEquals(stat.getVersion(), 1);
ZNRecord newRecord = new ZNRecord("msg_0");
newRecord.setSimpleField("key1", "value1");
success = accessor.update(path, new ZNRecordUpdater(newRecord), AccessOption.PERSISTENT);
Assert.assertTrue(success);
getRecord = accessor.get(path, stat, 0);
Assert.assertNotNull(getRecord);
Assert.assertEquals(getRecord.getSimpleFields().size(), 2);
Assert.assertNotNull(getRecord.getSimpleField("key0"));
Assert.assertEquals(getRecord.getSimpleField("key0"), "value0");
Assert.assertNotNull(getRecord.getSimpleField("key1"));
Assert.assertEquals(getRecord.getSimpleField("key1"), "value1");
Assert.assertEquals(stat.getVersion(), 2);
System.out.println("END " + testName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testSyncExist() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String testName = className + "_" + methodName;
System.out.println("START " + testName + " at " + new Date(System.currentTimeMillis()));
String path = String.format("/%s/%s", _rootPath, "msg_0");
ZNRecord record = new ZNRecord("msg_0");
ZkBaseDataAccessor<ZNRecord> accessor = new ZkBaseDataAccessor<ZNRecord>(_gZkClient);
boolean success = accessor.exists(path, 0);
Assert.assertFalse(success);
success = accessor.create(path, record, AccessOption.EPHEMERAL);
Assert.assertTrue(success);
success = accessor.exists(path, 0);
Assert.assertTrue(success);
System.out.println("END " + testName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testSyncGetStat() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String testName = className + "_" + methodName;
System.out.println("START " + testName + " at " + new Date(System.currentTimeMillis()));
String path = String.format("/%s/%s", _rootPath, "msg_0");
ZNRecord record = new ZNRecord("msg_0");
ZkBaseDataAccessor<ZNRecord> accessor = new ZkBaseDataAccessor<ZNRecord>(_gZkClient);
Stat stat = accessor.getStat(path, 0);
Assert.assertNull(stat);
boolean success = accessor.create(path, record, AccessOption.EPHEMERAL);
Assert.assertTrue(success);
stat = accessor.getStat(path, 0);
Assert.assertNotNull(stat);
Assert.assertEquals(stat.getVersion(), 0);
Assert.assertNotSame(stat.getEphemeralOwner(), 0);
System.out.println("END " + testName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testAsyncZkBaseDataAccessor() {
System.out.println(
"START TestZkBaseDataAccessor.async at " + new Date(System.currentTimeMillis()));
String root = _rootPath;
_gZkClient.deleteRecursively("/" + root);
ZkBaseDataAccessor<ZNRecord> accessor = new ZkBaseDataAccessor<>(_gZkClient);
// test async createChildren
List<ZNRecord> records = new ArrayList<>();
List<String> paths = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
paths.add(PropertyPathBuilder.instanceMessage(root, "host_1", msgId));
records.add(new ZNRecord(msgId));
}
boolean[] success = accessor.createChildren(paths, records, AccessOption.PERSISTENT);
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
Assert.assertTrue(success[i], "Should succeed in create " + msgId);
}
// test get what we created
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
String path = PropertyPathBuilder.instanceMessage(root, "host_1", msgId);
ZNRecord record = _gZkClient.readData(path);
Assert.assertEquals(record.getId(), msgId, "Should get what we created");
}
// test async createChildren with TTL
System.setProperty("zookeeper.extendedTypesEnabled", "true");
records = new ArrayList<>();
paths = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
paths.add(PropertyPathBuilder.instanceMessage(root, "host_2", msgId));
records.add(new ZNRecord(msgId));
}
success = accessor.createChildren(paths, records, AccessOption.PERSISTENT_WITH_TTL, 1L);
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
Assert.assertTrue(success[i], "Should succeed in create " + msgId);
}
// test get what we created
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
String path = PropertyPathBuilder.instanceMessage(root, "host_2", msgId);
ZNRecord record = _gZkClient.readData(path);
Assert.assertEquals(record.getId(), msgId, "Should get what we created");
}
// test async createChildren with Container mode
records = new ArrayList<>();
paths = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
paths.add(PropertyPathBuilder.instanceMessage(root, "host_3", msgId));
records.add(new ZNRecord(msgId));
}
success = accessor.createChildren(paths, records, AccessOption.CONTAINER);
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
Assert.assertTrue(success[i], "Should succeed in create " + msgId);
}
// test get what we created
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
String path = PropertyPathBuilder.instanceMessage(root, "host_3", msgId);
ZNRecord record = _gZkClient.readData(path);
Assert.assertEquals(record.getId(), msgId, "Should get what we created");
}
System.clearProperty("zookeeper.extendedTypesEnabled");
// test async setChildren
records = new ArrayList<>();
paths = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
paths.add(PropertyPathBuilder.instanceMessage(root, "host_1", msgId));
ZNRecord newRecord = new ZNRecord(msgId);
newRecord.setSimpleField("key1", "value1");
records.add(newRecord);
}
success = accessor.setChildren(paths, records, AccessOption.PERSISTENT);
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
Assert.assertTrue(success[i], "Should succeed in set " + msgId);
}
// test get what we set
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
String path = PropertyPathBuilder.instanceMessage(root, "host_1", msgId);
ZNRecord record = _gZkClient.readData(path);
Assert.assertEquals(record.getSimpleFields().size(), 1, "Should have 1 simple field set");
Assert.assertEquals(record.getSimpleField("key1"), "value1", "Should have value1 set");
}
// test async updateChildren
// records = new ArrayList<ZNRecord>();
List<DataUpdater<ZNRecord>> znrecordUpdaters = new ArrayList<DataUpdater<ZNRecord>>();
paths = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
paths.add(PropertyPathBuilder.instanceMessage(root, "host_1", msgId));
ZNRecord newRecord = new ZNRecord(msgId);
newRecord.setSimpleField("key2", "value2");
// records.add(newRecord);
znrecordUpdaters.add(new ZNRecordUpdater(newRecord));
}
success = accessor.updateChildren(paths, znrecordUpdaters, AccessOption.PERSISTENT);
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
Assert.assertTrue(success[i], "Should succeed in update " + msgId);
}
// test get what we updated
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
String path = PropertyPathBuilder.instanceMessage(root, "host_1", msgId);
ZNRecord record = _gZkClient.readData(path);
Assert.assertEquals(record.getSimpleFields().size(), 2, "Should have 2 simple fields set");
Assert.assertEquals(record.getSimpleField("key2"), "value2", "Should have value2 set");
}
// test async getChildren
String parentPath = PropertyPathBuilder.instanceMessage(root, "host_1");
records = accessor.getChildren(parentPath, null, 0, 0, 0);
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
ZNRecord record = records.get(i);
Assert.assertEquals(record.getId(), msgId, "Should get what we updated");
Assert.assertEquals(record.getSimpleFields().size(), 2, "Should have 2 simple fields set");
Assert.assertEquals(record.getSimpleField("key2"), "value2", "Should have value2 set");
}
// test async exists
paths = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
paths.add(PropertyPathBuilder.instanceMessage(root, "host_1", msgId));
}
boolean[] exists = accessor.exists(paths, 0);
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
Assert.assertTrue(exists[i], "Should exist " + msgId);
}
// test async getStats
paths = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
paths.add(PropertyPathBuilder.instanceMessage(root, "host_1", msgId));
}
Stat[] stats = accessor.getStats(paths, 0);
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
Assert.assertNotNull(stats[i], "Stat should exist for " + msgId);
Assert.assertEquals(stats[i].getVersion(), 2,
"DataVersion should be 2, since we set 1 and update 1 for " + msgId);
}
// test async remove
paths = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
paths.add(PropertyPathBuilder.instanceMessage(root, "host_1", msgId));
}
success = accessor.remove(paths, 0);
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
Assert.assertTrue(success[i], "Should succeed in remove " + msgId);
}
// test get what we removed
for (int i = 0; i < 10; i++) {
String msgId = "msg_" + i;
String path = PropertyPathBuilder.instanceMessage(root, "host_1", msgId);
boolean pathExists = _gZkClient.exists(path);
Assert.assertFalse(pathExists, "Should be removed " + msgId);
}
System.out.println("END TestZkBaseDataAccessor.async at "
+ new Date(System.currentTimeMillis()));
}
}
| 9,809 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestWtCacheAsyncOpMultiThread.java | package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.concurrent.Callable;
import org.apache.helix.AccessOption;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.TestHelper;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.datamodel.ZNRecordUpdater;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.zookeeper.zkclient.DataUpdater;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestWtCacheAsyncOpMultiThread extends ZkUnitTestBase {
class TestCreateZkCacheBaseDataAccessor implements Callable<Boolean> {
final ZkCacheBaseDataAccessor<ZNRecord> _accessor;
final String _clusterName;
final int _id;
TestCreateZkCacheBaseDataAccessor(ZkCacheBaseDataAccessor<ZNRecord> accessor,
String clusterName, int id) {
_accessor = accessor;
_clusterName = clusterName;
_id = id;
}
@Override
public Boolean call() throws Exception {
// create 10 current states in 2 steps
List<String> paths = new ArrayList<>();
List<ZNRecord> records = new ArrayList<>();
for (int j = 0; j < 2; j++) {
paths.clear();
records.clear();
if (_id == 1 && j == 0) {
// let thread_0 create 0-4
Thread.sleep(30);
}
if (_id == 0 && j == 1) {
// let thread_1 create 5-9
Thread.sleep(100);
}
for (int i = 0; i < 5; i++) {
int k = j * 5 + i;
String path = PropertyPathBuilder.instanceCurrentState(_clusterName, "localhost_8901",
"session_0", "TestDB" + k);
ZNRecord record = new ZNRecord("TestDB" + k);
paths.add(path);
records.add(record);
}
boolean[] success = _accessor.createChildren(paths, records, AccessOption.PERSISTENT);
// System.out.println("thread-" + _id + " creates " + j + ": " + Arrays.toString(success));
// create all all sync'ed, so we shall see either all true or all false
for (int i = 1; i < 5; i++) {
Assert.assertEquals(success[i], success[0], "Should be either all succeed of all fail");
}
}
return true;
}
}
class TestUpdateZkCacheBaseDataAccessor implements Callable<Boolean> {
final ZkCacheBaseDataAccessor<ZNRecord> _accessor;
final String _clusterName;
final int _id;
TestUpdateZkCacheBaseDataAccessor(ZkCacheBaseDataAccessor<ZNRecord> accessor,
String clusterName, int id) {
_accessor = accessor;
_clusterName = clusterName;
_id = id;
}
@Override
public Boolean call() {
// create 10 current states in 2 steps
List<String> paths = new ArrayList<>();
List<DataUpdater<ZNRecord>> updaters = new ArrayList<>();
for (int j = 0; j < 10; j++) {
paths.clear();
updaters.clear();
for (int i = 0; i < 10; i++) {
String path = PropertyPathBuilder.instanceCurrentState(_clusterName, "localhost_8901",
"session_0", "TestDB" + i);
ZNRecord newRecord = new ZNRecord("TestDB" + i);
newRecord.setSimpleField("" + j, "" + j);
DataUpdater<ZNRecord> updater = new ZNRecordUpdater(newRecord);
paths.add(path);
updaters.add(updater);
}
boolean[] success = _accessor.updateChildren(paths, updaters, AccessOption.PERSISTENT);
// System.out.println("thread-" + _id + " updates " + j + ": " + Arrays.toString(success));
for (int i = 0; i < 10; i++) {
Assert.assertTrue(success[i], "Should be all succeed");
}
}
return true;
}
}
class TestSetZkCacheBaseDataAccessor implements Callable<Boolean> {
final ZkCacheBaseDataAccessor<ZNRecord> _accessor;
final String _clusterName;
final int _id;
TestSetZkCacheBaseDataAccessor(ZkCacheBaseDataAccessor<ZNRecord> accessor, String clusterName,
int id) {
_accessor = accessor;
_clusterName = clusterName;
_id = id;
}
@Override
public Boolean call() throws Exception {
// create 10 current states in 2 steps
List<String> paths = new ArrayList<>();
List<ZNRecord> records = new ArrayList<>();
for (int j = 0; j < 2; j++) {
paths.clear();
records.clear();
if (_id == 1 && j == 0) {
// let thread_0 create 0-4
Thread.sleep(30);
}
if (_id == 0 && j == 1) {
// let thread_1 create 5-9
Thread.sleep(100);
}
for (int i = 0; i < 5; i++) {
int k = j * 5 + i;
String path = PropertyPathBuilder.externalView(_clusterName, "TestDB" + k);
ZNRecord record = new ZNRecord("TestDB" + k);
paths.add(path);
records.add(record);
}
boolean[] success = _accessor.setChildren(paths, records, AccessOption.PERSISTENT);
for (int i = 0; i < 5; i++) {
Assert.assertTrue(success[i]);
}
}
return true;
}
}
@Test
public void testHappyPathZkCacheBaseDataAccessor() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
// init zkCacheDataAccessor
String curStatePath = PropertyPathBuilder.instanceCurrentState(clusterName, "localhost_8901");
String extViewPath = PropertyPathBuilder.externalView(clusterName);
ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
baseAccessor.create(curStatePath, null, AccessOption.PERSISTENT);
List<String> cachePaths = Arrays.asList(curStatePath, extViewPath);
ZkCacheBaseDataAccessor<ZNRecord> accessor =
new ZkCacheBaseDataAccessor<>(baseAccessor, null, cachePaths, null);
// TestHelper.printCache(accessor._wtCache);
boolean ret = TestHelper.verifyZkCache(cachePaths, accessor._wtCache._cache, _gZkClient, false);
Assert.assertTrue(ret, "wtCache doesn't match data on Zk");
// create 10 current states using 2 threads
List<Callable<Boolean>> threads = new ArrayList<>();
for (int i = 0; i < 2; i++) {
threads.add(new TestCreateZkCacheBaseDataAccessor(accessor, clusterName, i));
}
TestHelper.startThreadsConcurrently(threads, 1000);
// verify wtCache
// TestHelper.printCache(accessor._wtCache);
ret = TestHelper.verifyZkCache(cachePaths, accessor._wtCache._cache, _gZkClient, false);
Assert.assertTrue(ret, "wtCache doesn't match data on Zk");
// update 10 current states 10 times using 2 threads
threads.clear();
for (int i = 0; i < 2; i++) {
threads.add(new TestUpdateZkCacheBaseDataAccessor(accessor, clusterName, i));
}
TestHelper.startThreadsConcurrently(threads, 1000);
// verify wtCache
// TestHelper.printCache(accessor._wtCache);
ret = TestHelper.verifyZkCache(cachePaths, accessor._wtCache._cache, _gZkClient, false);
Assert.assertTrue(ret, "wtCache doesn't match data on Zk");
// set 10 external views using 2 threads
threads.clear();
for (int i = 0; i < 2; i++) {
threads.add(new TestSetZkCacheBaseDataAccessor(accessor, clusterName, i));
}
TestHelper.startThreadsConcurrently(threads, 1000);
// verify wtCache
// TestHelper.printCache(accessor._wtCache);
ret = TestHelper.verifyZkCache(cachePaths, accessor._wtCache._cache, _gZkClient, false);
Assert.assertTrue(ret, "wtCache doesn't match data on Zk");
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
}
| 9,810 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestZKWatch.java | package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import org.apache.helix.ZkTestHelper;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.zookeeper.zkclient.IZkChildListener;
import org.apache.helix.zookeeper.zkclient.IZkDataListener;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestZKWatch extends ZkUnitTestBase {
private ZkClient _zkClient;
@BeforeClass
public void beforeClass() {
_zkClient = new ZkClient(ZK_ADDR);
}
@AfterClass
public void afterClass() {
_zkClient.close();
}
@Test
public void testSubscribeDataChange() throws Exception {
String existPath = "/existPath";
_zkClient.createPersistent(existPath);
final CountDownLatch deleteCondition = new CountDownLatch(1);
final IZkDataListener dataListener = new IZkDataListener() {
@Override
public void handleDataChange(String s, Object o) throws Exception {
}
@Override
public void handleDataDeleted(String path) throws Exception {
_zkClient.unsubscribeDataChanges(path, this);
deleteCondition.countDown();
}
};
_zkClient.subscribeDataChanges(existPath, dataListener);
Assert.assertEquals(_zkClient.numberOfListeners(), 1);
Map<String, List<String>> zkWatch = ZkTestHelper.getZkWatch(_zkClient);
Assert.assertEquals(zkWatch.get("dataWatches").size(), 1);
Assert.assertEquals(zkWatch.get("existWatches").size(), 0);
Assert.assertEquals(zkWatch.get("childWatches").size(), 0);
// remove the zk node, the NodeDeleted event will be processed
_zkClient.delete(existPath);
deleteCondition.await();
zkWatch = ZkTestHelper.getZkWatch(_zkClient);
Assert.assertEquals(zkWatch.get("dataWatches").size(), 0);
Assert.assertEquals(zkWatch.get("existWatches").size(), 0);
Assert.assertEquals(zkWatch.get("childWatches").size(), 0);
Assert.assertEquals(_zkClient.numberOfListeners(), 0);
}
@Test(dependsOnMethods = "testSubscribeDataChange")
public void testSubscribeChildChange() throws Exception {
String parentPath = "/tmp";
String childPath = parentPath + "/childNode";
_zkClient.createPersistent(childPath, true);
final CountDownLatch deleteCondition = new CountDownLatch(1);
IZkChildListener childListener = new IZkChildListener() {
@Override
public void handleChildChange(String parentPath, List<String> childrenPaths) throws Exception {
_zkClient.unsubscribeChildChanges(parentPath, this);
deleteCondition.countDown();
}
};
_zkClient.subscribeChildChanges(parentPath, childListener);
Map<String, List<String>> zkWatch = ZkTestHelper.getZkWatch(_zkClient);
Assert.assertEquals(zkWatch.get("dataWatches").size(), 1);
Assert.assertEquals(zkWatch.get("dataWatches").get(0), parentPath);
Assert.assertEquals(zkWatch.get("existWatches").size(), 0);
Assert.assertEquals(zkWatch.get("childWatches").size(), 1);
Assert.assertEquals(zkWatch.get("childWatches").get(0), parentPath);
// Delete the child node
_zkClient.delete(childPath);
deleteCondition.await();
zkWatch = ZkTestHelper.getZkWatch(_zkClient);
// Expectation: the child listener should still exist
Assert.assertEquals(zkWatch.get("dataWatches").size(), 1);
Assert.assertEquals(zkWatch.get("dataWatches").get(0), parentPath);
Assert.assertEquals(zkWatch.get("existWatches").size(), 0);
Assert.assertEquals(zkWatch.get("childWatches").size(), 1);
Assert.assertEquals(zkWatch.get("childWatches").get(0), parentPath);
Assert.assertEquals(_zkClient.numberOfListeners(), 0);
// delete the parent path
_zkClient.delete(parentPath);
zkWatch = ZkTestHelper.getZkWatch(_zkClient);
Assert.assertEquals(zkWatch.get("dataWatches").size(), 0);
Assert.assertEquals(zkWatch.get("existWatches").size(), 0);
Assert.assertEquals(zkWatch.get("childWatches").size(), 0);
}
@Test(dependsOnMethods = "testSubscribeChildChange")
public void testSubscribeDataChangeOnNonExistPath() throws Exception {
String nonExistPath = "/nonExistPath";
IZkDataListener dataListener = new IZkDataListener() {
@Override
public void handleDataChange(String s, Object o) throws Exception {
}
@Override
public void handleDataDeleted(String s) throws Exception {
}
};
_zkClient.subscribeDataChanges(nonExistPath, dataListener);
Map<String, List<String>> zkWatch = ZkTestHelper.getZkWatch(_zkClient);
Assert.assertEquals(zkWatch.get("dataWatches").size(), 0);
Assert.assertEquals(zkWatch.get("existWatches").size(), 1);
Assert.assertEquals(zkWatch.get("childWatches").size(), 0);
// cleanup (unsubscribe will not clean up the watcher on ZK server
_zkClient.unsubscribeDataChanges(nonExistPath, dataListener);
zkWatch = ZkTestHelper.getZkWatch(_zkClient);
Assert.assertEquals(zkWatch.get("dataWatches").size(), 0);
Assert.assertEquals(zkWatch.get("existWatches").size(), 1);
Assert.assertEquals(zkWatch.get("childWatches").size(), 0);
}
}
| 9,811 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestZNRecordSizeLimit.java | package org.apache.helix.manager.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Arrays;
import java.util.Date;
import org.apache.helix.HelixProperty;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.TestHelper;
import org.apache.helix.zookeeper.constant.ZkSystemPropertyKeys;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.exception.ZkClientException;
import org.apache.helix.zookeeper.impl.factory.SharedZkClientFactory;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.IdealState.RebalanceMode;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.zookeeper.zkclient.exception.ZkMarshallingError;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestZNRecordSizeLimit extends ZkUnitTestBase {
private static Logger LOG = LoggerFactory.getLogger(TestZNRecordSizeLimit.class);
private static final String ASSERTION_MESSAGE =
"Should succeed because compressed data is smaller than 1M. Caused by: ";
@Test
public void testZNRecordSizeLimitUseZNRecordSerializer() {
String className = getShortClassName();
System.out.println("START testZNRecordSizeLimitUseZNRecordSerializer at " + new Date(
System.currentTimeMillis()));
ZNRecordSerializer serializer = new ZNRecordSerializer();
String root = className;
byte[] buf = new byte[1024];
for (int i = 0; i < 1024; i++) {
buf[i] = 'a';
}
String bufStr = new String(buf);
// test zkClient
// legal-sized data gets written to zk
// write a znode of size less than 1m
final ZNRecord smallRecord = new ZNRecord("normalsize");
smallRecord.getSimpleFields().clear();
for (int i = 0; i < 900; i++) {
smallRecord.setSimpleField(i + "", bufStr);
}
String path1 = "/" + root + "/test1";
_gZkClient.createPersistent(path1, true);
_gZkClient.writeData(path1, smallRecord);
ZNRecord record = _gZkClient.readData(path1);
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
// oversized data doesn't create any data on zk
// prepare a znode of size larger than 1m
final ZNRecord largeRecord = new ZNRecord("oversize");
largeRecord.getSimpleFields().clear();
for (int i = 0; i < 1024; i++) {
largeRecord.setSimpleField(i + "", bufStr);
}
String path2 = "/" + root + "/test2";
_gZkClient.createPersistent(path2, true);
try {
_gZkClient.writeData(path2, largeRecord);
} catch (ZkMarshallingError e) {
Assert.fail(ASSERTION_MESSAGE + e);
}
record = _gZkClient.readData(path2);
Assert.assertNotNull(record);
// oversized write doesn't overwrite existing data on zk
record = _gZkClient.readData(path1);
try {
_gZkClient.writeData(path1, largeRecord);
} catch (ZkMarshallingError e) {
Assert.fail(ASSERTION_MESSAGE + e);
}
ZNRecord recordNew = _gZkClient.readData(path1);
try {
byte[] arr = serializer.serialize(record);
byte[] arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
} catch (ZkMarshallingError e) {
Assert.fail(ASSERTION_MESSAGE + e);
}
// test ZkDataAccessor
ZKHelixAdmin admin = new ZKHelixAdmin(_gZkClient);
admin.addCluster(className, true);
InstanceConfig instanceConfig = new InstanceConfig("localhost_12918");
admin.addInstance(className, instanceConfig);
// oversized data should not create any new data on zk
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(className, new ZkBaseDataAccessor(_gZkClient));
Builder keyBuilder = accessor.keyBuilder();
IdealState idealState = new IdealState("currentState");
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState);
Assert.assertTrue(succeed);
HelixProperty property = accessor.getProperty(
keyBuilder.stateTransitionStatus("localhost_12918", "session_1", "partition_1"));
Assert.assertNull(property);
// legal sized data gets written to zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 900; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
succeed = accessor.setProperty(keyBuilder.idealStates("TestDB1"), idealState);
Assert.assertTrue(succeed);
record = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
try {
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
} catch (ZkMarshallingError e) {
Assert.fail(ASSERTION_MESSAGE + e);
}
// oversized data should not update existing data on zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 900; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
// System.out.println("record: " + idealState.getRecord());
succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB1"), idealState);
Assert.assertTrue(succeed);
recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
try {
byte[] arr = serializer.serialize(record);
byte[] arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
} catch (ZkMarshallingError e) {
Assert.fail(ASSERTION_MESSAGE + e);
}
System.out.println("END testZNRecordSizeLimitUseZNRecordSerializer at " + new Date(
System.currentTimeMillis()));
}
@Test(dependsOnMethods = "testZNRecordSizeLimitUseZNRecordSerializer")
public void testZNRecordSizeLimitUseZNRecordStreamingSerializer() {
String className = getShortClassName();
System.out.println("START testZNRecordSizeLimitUseZNRecordStreamingSerializer at " + new Date(
System.currentTimeMillis()));
ZNRecordStreamingSerializer serializer = new ZNRecordStreamingSerializer();
HelixZkClient zkClient = SharedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(ZK_ADDR));
try {
zkClient.setZkSerializer(serializer);
String root = className;
byte[] buf = new byte[1024];
for (int i = 0; i < 1024; i++) {
buf[i] = 'a';
}
String bufStr = new String(buf);
// test zkClient
// legal-sized data gets written to zk
// write a znode of size less than 1m
final ZNRecord smallRecord = new ZNRecord("normalsize");
smallRecord.getSimpleFields().clear();
for (int i = 0; i < 900; i++) {
smallRecord.setSimpleField(i + "", bufStr);
}
String path1 = "/" + root + "/test1";
zkClient.createPersistent(path1, true);
zkClient.writeData(path1, smallRecord);
ZNRecord record = zkClient.readData(path1);
try {
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
} catch (ZkMarshallingError e) {
Assert.fail(ASSERTION_MESSAGE + e);
}
// oversized data doesn't create any data on zk
// prepare a znode of size larger than 1m
final ZNRecord largeRecord = new ZNRecord("oversize");
largeRecord.getSimpleFields().clear();
for (int i = 0; i < 1024; i++) {
largeRecord.setSimpleField(i + "", bufStr);
}
String path2 = "/" + root + "/test2";
zkClient.createPersistent(path2, true);
try {
zkClient.writeData(path2, largeRecord);
} catch (ZkMarshallingError e) {
Assert.fail(ASSERTION_MESSAGE + e);
}
record = zkClient.readData(path2);
Assert.assertNotNull(record);
// oversized write doesn't overwrite existing data on zk
record = zkClient.readData(path1);
try {
zkClient.writeData(path1, largeRecord);
} catch (ZkMarshallingError e) {
Assert.fail(ASSERTION_MESSAGE + e);
}
ZNRecord recordNew = zkClient.readData(path1);
try {
byte[] arr = serializer.serialize(record);
byte[] arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
} catch (ZkMarshallingError e) {
Assert.fail(ASSERTION_MESSAGE + e);
}
// test ZkDataAccessor
ZKHelixAdmin admin = new ZKHelixAdmin(zkClient);
admin.addCluster(className, true);
InstanceConfig instanceConfig = new InstanceConfig("localhost_12918");
admin.addInstance(className, instanceConfig);
// oversized data should not create any new data on zk
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(className, new ZkBaseDataAccessor(zkClient));
Builder keyBuilder = accessor.keyBuilder();
// ZNRecord statusUpdates = new ZNRecord("statusUpdates");
IdealState idealState = new IdealState("currentState");
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB_1"), idealState);
Assert.assertTrue(succeed);
HelixProperty property = accessor.getProperty(keyBuilder.idealStates("TestDB_1"));
Assert.assertNotNull(property);
// legal sized data gets written to zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 900; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
succeed = accessor.setProperty(keyBuilder.idealStates("TestDB_2"), idealState);
Assert.assertTrue(succeed);
record = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord();
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
// oversized data should not update existing data on zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 900; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
// System.out.println("record: " + idealState.getRecord());
succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB_2"), idealState);
Assert.assertTrue(succeed);
recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord();
try {
byte[] arr = serializer.serialize(record);
byte[] arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
} catch (ZkMarshallingError e) {
Assert.fail(ASSERTION_MESSAGE + e);
}
} finally {
zkClient.close();
}
System.out.println("END testZNRecordSizeLimitUseZNRecordStreamingSerializer at " + new Date(
System.currentTimeMillis()));
}
/*
* Tests ZNRecordSerializer threshold.
* Two cases using ZkClient and ZkDataAccessor:
* 1. serialized data size is less than threshold and could be written to ZK.
* 2. serialized data size is greater than threshold, so ZkClientException is thrown.
*/
@Test(dependsOnMethods = "testZNRecordSizeLimitUseZNRecordStreamingSerializer")
public void testZNRecordSerializerWriteSizeLimit() throws Exception {
// Backup properties for later resetting.
final String thresholdProperty =
System.getProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES);
try {
ZNRecordSerializer serializer = new ZNRecordSerializer();
String root = getShortClassName();
byte[] buf = new byte[1024];
for (int i = 0; i < 1024; i++) {
buf[i] = 'a';
}
String bufStr = new String(buf);
// 1. legal-sized data gets written to zk
// write a znode of size less than writeSizeLimit
int rawZnRecordSize = 700;
int writeSizeLimitKb = 800;
int writeSizeLimit = writeSizeLimitKb * 1024;
System.setProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES,
String.valueOf(writeSizeLimit));
final ZNRecord normalSizeRecord = new ZNRecord("normal-size");
for (int i = 0; i < rawZnRecordSize; i++) {
normalSizeRecord.setSimpleField(Integer.toString(i), bufStr);
}
String path = "/" + root + "/normal";
_gZkClient.createPersistent(path, true);
_gZkClient.writeData(path, normalSizeRecord);
ZNRecord record = _gZkClient.readData(path);
// Successfully reads the same data.
Assert.assertEquals(normalSizeRecord, record);
int length = serializer.serialize(record).length;
// Less than writeSizeLimit so it is written to ZK.
Assert.assertTrue(length < writeSizeLimit);
// 2. Large size data is not allowed to write to ZK
// Set raw record size to be large enough so its serialized data exceeds the writeSizeLimit.
rawZnRecordSize = 2000;
// Set the writeSizeLimit to very small so serialized data size exceeds the writeSizeLimit.
writeSizeLimitKb = 1;
writeSizeLimit = writeSizeLimitKb * 1024;
System.setProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES,
String.valueOf(writeSizeLimit));
final ZNRecord largeRecord = new ZNRecord("large-size");
for (int i = 0; i < rawZnRecordSize; i++) {
largeRecord.setSimpleField(Integer.toString(i), bufStr);
}
path = "/" + root + "/large";
_gZkClient.createPersistent(path, true);
try {
_gZkClient.writeData(path, largeRecord);
Assert.fail("Data should not be written to ZK because data size exceeds writeSizeLimit!");
} catch (ZkMarshallingError expected) {
Assert.assertTrue(
expected.getMessage().contains(" is greater than " + writeSizeLimit + " bytes"));
}
// test ZkDataAccessor
ZKHelixAdmin admin = new ZKHelixAdmin(ZK_ADDR);
admin.addCluster(root, true);
InstanceConfig instanceConfig = new InstanceConfig("localhost_12918");
admin.addInstance(root, instanceConfig);
// Set the writeSizeLimit to 10KB so serialized data size does not exceed writeSizeLimit.
writeSizeLimitKb = 10;
writeSizeLimit = writeSizeLimitKb * 1024;
System.setProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES,
String.valueOf(writeSizeLimit));
// oversized data should not create any new data on zk
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(root, new ZkBaseDataAccessor<>(ZK_ADDR));
Builder keyBuilder = accessor.keyBuilder();
IdealState idealState = new IdealState("currentState");
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 1024; i++) {
idealState.getRecord().setSimpleField(Integer.toString(i), bufStr);
}
boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState);
Assert.assertTrue(succeed);
HelixProperty property = accessor.getProperty(
keyBuilder.stateTransitionStatus("localhost_12918", "session_1", "partition_1"));
Assert.assertNull(property);
// legal sized data gets written to zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 900; i++) {
idealState.getRecord().setSimpleField(Integer.toString(i), bufStr);
}
succeed = accessor.setProperty(keyBuilder.idealStates("TestDB1"), idealState);
Assert.assertTrue(succeed);
record = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
Assert.assertTrue(serializer.serialize(record).length < writeSizeLimit);
// Set small write size limit so writing does not succeed.
writeSizeLimitKb = 1;
writeSizeLimit = writeSizeLimitKb * 1024;
System.setProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES,
String.valueOf(writeSizeLimit));
// oversized data should not update existing data on zk
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 900; i < 1024; i++) {
idealState.getRecord().setSimpleField(Integer.toString(i), bufStr);
}
succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB1"), idealState);
Assert.assertFalse(succeed,
"Update property should not succeed because data exceeds znode write limit!");
// Delete the nodes.
deletePath(_gZkClient, "/" + root);
} finally {
// Reset: add the properties back to system properties if they were originally available.
if (thresholdProperty != null) {
System.setProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES,
thresholdProperty);
} else {
System.clearProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES);
}
}
}
/*
* Tests ZNRecordStreamingSerializer threshold.
* Two cases using ZkClient and ZkDataAccessor:
* 1. serialized data size is less than threshold and could be written to ZK.
* 2. serialized data size is greater than threshold, so ZkClientException is thrown.
*/
@Test(dependsOnMethods = "testZNRecordSerializerWriteSizeLimit")
public void testZNRecordStreamingSerializerWriteSizeLimit() throws Exception {
// Backup properties for later resetting.
final String thresholdProperty =
System.getProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES);
ZNRecordStreamingSerializer serializer = new ZNRecordStreamingSerializer();
HelixZkClient zkClient = SharedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(ZK_ADDR));
try {
zkClient.setZkSerializer(serializer);
String root = getShortClassName();
byte[] buf = new byte[1024];
for (int i = 0; i < 1024; i++) {
buf[i] = 'a';
}
String bufStr = new String(buf);
// 1. legal-sized data gets written to zk
// write a znode of size less than writeSizeLimit
int rawZnRecordSize = 700;
int writeSizeLimitKb = 800;
int writeSizeLimit = writeSizeLimitKb * 1024;
System.setProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES,
String.valueOf(writeSizeLimit));
final ZNRecord normalSizeRecord = new ZNRecord("normal-size");
for (int i = 0; i < rawZnRecordSize; i++) {
normalSizeRecord.setSimpleField(Integer.toString(i), bufStr);
}
String path = "/" + root + "/normal";
zkClient.createPersistent(path, true);
zkClient.writeData(path, normalSizeRecord);
ZNRecord record = zkClient.readData(path);
// Successfully reads the same data.
Assert.assertEquals(normalSizeRecord, record);
int length = serializer.serialize(record).length;
// Less than writeSizeLimit so it is written to ZK.
Assert.assertTrue(length < writeSizeLimit);
// 2. Large size data is not allowed to write to ZK
// Set raw record size to be large enough so its serialized data exceeds the writeSizeLimit.
rawZnRecordSize = 2000;
// Set the writeSizeLimit to very small so serialized data size exceeds the writeSizeLimit.
writeSizeLimitKb = 1;
writeSizeLimit = writeSizeLimitKb * 1024;
System.setProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES,
String.valueOf(writeSizeLimit));
final ZNRecord largeRecord = new ZNRecord("large-size");
for (int i = 0; i < rawZnRecordSize; i++) {
largeRecord.setSimpleField(Integer.toString(i), bufStr);
}
path = "/" + root + "/large";
zkClient.createPersistent(path, true);
try {
zkClient.writeData(path, largeRecord);
Assert.fail("Data should not written to ZK because data size exceeds writeSizeLimit!");
} catch (ZkMarshallingError expected) {
Assert.assertTrue(
expected.getMessage().contains(" is greater than " + writeSizeLimit + " bytes"));
}
// test ZkDataAccessor
ZKHelixAdmin admin = new ZKHelixAdmin(ZK_ADDR);
admin.addCluster(root, true);
InstanceConfig instanceConfig = new InstanceConfig("localhost_12918");
admin.addInstance(root, instanceConfig);
// Set the writeSizeLimit to 10KB so serialized data size does not exceed writeSizeLimit.
writeSizeLimitKb = 10;
writeSizeLimit = writeSizeLimitKb * 1024;
System.setProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES,
String.valueOf(writeSizeLimit));
// oversize data should not create any new data on zk
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(root, new ZkBaseDataAccessor<>(ZK_ADDR));
Builder keyBuilder = accessor.keyBuilder();
IdealState idealState = new IdealState("currentState");
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 1024; i++) {
idealState.getRecord().setSimpleField(Integer.toString(i), bufStr);
}
boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState);
Assert.assertTrue(succeed);
HelixProperty property = accessor.getProperty(
keyBuilder.stateTransitionStatus("localhost_12918", "session_1", "partition_1"));
Assert.assertNull(property);
// legal sized data gets written to zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 900; i++) {
idealState.getRecord().setSimpleField(Integer.toString(i), bufStr);
}
succeed = accessor.setProperty(keyBuilder.idealStates("TestDB1"), idealState);
Assert.assertTrue(succeed);
record = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
Assert.assertTrue(serializer.serialize(record).length < writeSizeLimit);
// Set small write size limit so writing does not succeed.
writeSizeLimitKb = 1;
writeSizeLimit = writeSizeLimitKb * 1024;
System.setProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES,
String.valueOf(writeSizeLimit));
// oversize data should not update existing data on zk
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 900; i < 1024; i++) {
idealState.getRecord().setSimpleField(Integer.toString(i), bufStr);
}
succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB1"), idealState);
Assert.assertFalse(succeed,
"Update property should not succeed because data exceeds znode write limit!");
// Delete the nodes.
deletePath(zkClient, "/" + root);
} finally {
zkClient.close();
// Reset: add the properties back to system properties if they were originally available.
if (thresholdProperty != null) {
System.setProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES,
thresholdProperty);
} else {
System.clearProperty(ZkSystemPropertyKeys.ZK_SERIALIZER_ZNRECORD_WRITE_SIZE_LIMIT_BYTES);
}
}
}
private void deletePath(final HelixZkClient zkClient, final String path) throws Exception {
Assert.assertTrue(TestHelper.verify(() -> {
do {
try {
zkClient.deleteRecursively(path);
} catch (ZkClientException ex) {
// ignore
}
} while (zkClient.exists(path));
return true;
}, TestHelper.WAIT_DURATION));
}
}
| 9,812 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/TestZNRecordStreamingSerializer.java | package org.apache.helix.manager.zk;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.testng.Assert;
import org.testng.annotations.Test;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
public class TestZNRecordStreamingSerializer {
/**
* Test the normal case of serialize/deserialize where ZNRecord is well-formed
*/
@Test
public void basicTest() {
ZNRecord record = new ZNRecord("testId");
record.setMapField("k1", ImmutableMap.of("a", "b", "c", "d"));
record.setMapField("k2", ImmutableMap.of("e", "f", "g", "h"));
record.setListField("k3", ImmutableList.of("a", "b", "c", "d"));
record.setListField("k4", ImmutableList.of("d", "e", "f", "g"));
record.setSimpleField("k5", "a");
record.setSimpleField("k5", "b");
ZNRecordStreamingSerializer serializer = new ZNRecordStreamingSerializer();
ZNRecord result = (ZNRecord) serializer.deserialize(serializer.serialize(record));
Assert.assertEquals(result, record);
}
// TODO: need to fix ZnRecordStreamingSerializer before enabling this test.
@Test (enabled = false)
public void testNullFields() {
ZNRecord record = new ZNRecord("testId");
record.setMapField("K1", null);
record.setListField("k2", null);
record.setSimpleField("k3", null);
ZNRecordStreamingSerializer serializer = new ZNRecordStreamingSerializer();
byte [] data = serializer.serialize(record);
ZNRecord result = (ZNRecord) serializer.deserialize(data);
Assert.assertEquals(result, record);
Assert.assertNull(result.getMapField("K1"));
Assert.assertNull(result.getListField("K2"));
Assert.assertNull(result.getSimpleField("K3"));
Assert.assertNull(result.getListField("K4"));
}
/**
* Check that the ZNRecord is not constructed if there is no id in the json
*/
@Test
public void noIdTest() {
StringBuilder jsonString =
new StringBuilder("{\n").append(" \"simpleFields\": {},\n")
.append(" \"listFields\": {},\n").append(" \"mapFields\": {}\n").append("}\n");
ZNRecordStreamingSerializer serializer = new ZNRecordStreamingSerializer();
ZNRecord result = (ZNRecord) serializer.deserialize(jsonString.toString().getBytes());
Assert.assertNull(result);
}
/**
* Test that the json still deserizalizes correctly if id is not first
*/
@Test
public void idNotFirstTest() {
StringBuilder jsonString =
new StringBuilder("{\n").append(" \"simpleFields\": {},\n")
.append(" \"listFields\": {},\n").append(" \"mapFields\": {},\n")
.append("\"id\": \"myId\"\n").append("}");
ZNRecordStreamingSerializer serializer = new ZNRecordStreamingSerializer();
ZNRecord result = (ZNRecord) serializer.deserialize(jsonString.toString().getBytes());
Assert.assertNotNull(result);
Assert.assertEquals(result.getId(), "myId");
}
/**
* Test that simple, list, and map fields are initialized as empty even when not in json
*/
@Test
public void fieldAutoInitTest() {
StringBuilder jsonString = new StringBuilder("{\n").append("\"id\": \"myId\"\n").append("}");
ZNRecordStreamingSerializer serializer = new ZNRecordStreamingSerializer();
ZNRecord result = (ZNRecord) serializer.deserialize(jsonString.toString().getBytes());
Assert.assertNotNull(result);
Assert.assertEquals(result.getId(), "myId");
Assert.assertNotNull(result.getSimpleFields());
Assert.assertTrue(result.getSimpleFields().isEmpty());
Assert.assertNotNull(result.getListFields());
Assert.assertTrue(result.getListFields().isEmpty());
Assert.assertNotNull(result.getMapFields());
Assert.assertTrue(result.getMapFields().isEmpty());
}
@Test
public void testBasicCompression() {
ZNRecord record = new ZNRecord("testId");
int numPartitions = 1024;
int replicas = 3;
int numNodes = 100;
Random random = new Random();
for (int p = 0; p < numPartitions; p++) {
Map<String, String> map = new HashMap<String, String>();
for (int r = 0; r < replicas; r++) {
map.put("host_" + random.nextInt(numNodes), "ONLINE");
}
record.setMapField("TestResource_" + p, map);
}
ZNRecordStreamingSerializer serializer = new ZNRecordStreamingSerializer();
byte[] serializedBytes;
serializedBytes = serializer.serialize(record);
int uncompressedSize = serializedBytes.length;
System.out.println("raw serialized data length = " + serializedBytes.length);
record.setSimpleField("enableCompression", "true");
serializedBytes = serializer.serialize(record);
int compressedSize = serializedBytes.length;
System.out.println("compressed serialized data length = " + serializedBytes.length);
System.out.printf("compression ratio: %.2f \n", (uncompressedSize * 1.0 / compressedSize));
ZNRecord result = (ZNRecord) serializer.deserialize(serializedBytes);
Assert.assertEquals(result, record);
}
@Test
public void testCompression() {
int runId = 1;
while (runId < 20) {
int numPartitions = runId * 1000;
int replicas = 3;
int numNodes = 100;
Random random = new Random();
ZNRecord record = new ZNRecord("testId");
System.out.println("Partitions:" + numPartitions);
for (int p = 0; p < numPartitions; p++) {
Map<String, String> map = new HashMap<String, String>();
for (int r = 0; r < replicas; r++) {
map.put("host_" + random.nextInt(numNodes), "ONLINE");
}
record.setMapField("TestResource_" + p, map);
}
ZNRecordStreamingSerializer serializer = new ZNRecordStreamingSerializer();
byte[] serializedBytes;
record.setSimpleField("enableCompression", "true");
serializedBytes = serializer.serialize(record);
int compressedSize = serializedBytes.length;
System.out.println("compressed serialized data length = " + compressedSize);
ZNRecord result = (ZNRecord) serializer.deserialize(serializedBytes);
Assert.assertEquals(result, record);
runId = runId + 1;
}
}
}
| 9,813 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/serializer/TestJacksonPayloadSerializer.java | package org.apache.helix.manager.zk.serializer;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.helix.manager.zk.ZNRecordSerializer;
import org.apache.helix.manager.zk.ZNRecordStreamingSerializer;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestJacksonPayloadSerializer {
/**
* Ensure that the JacksonPayloadSerializer can serialize and deserialize arbitrary objects
*/
@Test
public void testJacksonSerializeDeserialize() {
final String RECORD_ID = "testJacksonSerializeDeserialize";
SampleDeserialized sample = getSample();
ZNRecord znRecord = new ZNRecord(RECORD_ID);
znRecord.setPayloadSerializer(new JacksonPayloadSerializer());
znRecord.setPayload(sample);
SampleDeserialized duplicate = znRecord.getPayload(SampleDeserialized.class);
Assert.assertEquals(duplicate, sample);
}
/**
* Test that the payload can be deserialized after serializing and deserializing the ZNRecord
* that encloses it. This uses ZNRecordSerializer.
*/
@Test
public void testFullZNRecordSerializeDeserialize() {
final String RECORD_ID = "testFullZNRecordSerializeDeserialize";
SampleDeserialized sample = getSample();
ZNRecord znRecord = new ZNRecord(RECORD_ID);
znRecord.setPayloadSerializer(new JacksonPayloadSerializer());
znRecord.setPayload(sample);
ZNRecordSerializer znRecordSerializer = new ZNRecordSerializer();
byte[] serialized = znRecordSerializer.serialize(znRecord);
ZNRecord deserialized = (ZNRecord) znRecordSerializer.deserialize(serialized);
deserialized.setPayloadSerializer(new JacksonPayloadSerializer());
SampleDeserialized duplicate = deserialized.getPayload(SampleDeserialized.class);
Assert.assertEquals(duplicate, sample);
}
/**
* Test that the payload can be deserialized after serializing and deserializing the ZNRecord
* that encloses it. This uses ZNRecordStreamingSerializer.
*/
@Test
public void testFullZNRecordStreamingSerializeDeserialize() {
final String RECORD_ID = "testFullZNRecordStreamingSerializeDeserialize";
SampleDeserialized sample = getSample();
ZNRecord znRecord = new ZNRecord(RECORD_ID);
znRecord.setPayloadSerializer(new JacksonPayloadSerializer());
znRecord.setPayload(sample);
ZNRecordStreamingSerializer znRecordSerializer = new ZNRecordStreamingSerializer();
byte[] serialized = znRecordSerializer.serialize(znRecord);
ZNRecord deserialized = (ZNRecord) znRecordSerializer.deserialize(serialized);
deserialized.setPayloadSerializer(new JacksonPayloadSerializer());
SampleDeserialized duplicate = deserialized.getPayload(SampleDeserialized.class);
Assert.assertEquals(duplicate, sample);
}
/**
* Test that the payload is not included whenever it is not null. This is mainly to maintain
* backward
* compatibility.
*/
@Test
public void testRawPayloadMissingIfUnspecified() {
final String RECORD_ID = "testRawPayloadMissingIfUnspecified";
ZNRecord znRecord = new ZNRecord(RECORD_ID);
ZNRecordSerializer znRecordSerializer = new ZNRecordSerializer();
byte[] serialized = znRecordSerializer.serialize(znRecord);
ZNRecordStreamingSerializer znRecordStreamingSerializer = new ZNRecordStreamingSerializer();
byte[] streamingSerialized = znRecordStreamingSerializer.serialize(znRecord);
ObjectMapper mapper = new ObjectMapper();
try {
JsonNode jsonNode = mapper.readTree(new String(serialized));
Assert.assertFalse(jsonNode.has("rawPayload"));
JsonNode streamingJsonNode = mapper.readTree(new String(streamingSerialized));
Assert.assertFalse(streamingJsonNode.has("rawPayload"));
} catch (JsonProcessingException e) {
Assert.fail();
} catch (IOException e) {
Assert.fail();
}
}
/**
* Get an object which can be tested for serialization success or failure
* @return Initialized SampleDeserialized object
*/
private SampleDeserialized getSample() {
final int INT_FIELD_VALUE = 12345;
final int LIST_FIELD_COUNT = 5;
List<Integer> intList = new LinkedList<Integer>();
for (int i = 0; i < LIST_FIELD_COUNT; i++) {
intList.add(i);
}
return new SampleDeserialized(INT_FIELD_VALUE, intList);
}
@JsonIgnoreProperties(ignoreUnknown = true)
public static class SampleDeserialized {
private int _intField;
private List<Integer> _listField;
public SampleDeserialized() {
}
public SampleDeserialized(int intField, List<Integer> listField) {
_intField = intField;
_listField = listField;
}
@JsonProperty
public void setIntField(int value) {
_intField = value;
}
@JsonProperty
public int getIntField() {
return _intField;
}
@JsonProperty
public void setListField(final List<Integer> listField) {
_listField = listField;
}
@JsonProperty
public List<Integer> getListField() {
return _listField;
}
@Override
public boolean equals(Object other) {
boolean result = true;
if (other instanceof SampleDeserialized) {
SampleDeserialized that = (SampleDeserialized) other;
if (_intField != that._intField) {
// ints must match
result = false;
} else if (_listField != null) {
// lists must match if one is not null
if (!_listField.equals(that._listField)) {
result = false;
}
} else {
// both must be null if one is null
if (that._listField != null) {
result = false;
}
}
} else {
// only compare objects of the same type
result = false;
}
return result;
}
}
}
| 9,814 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk | Create_ds/helix/helix-core/src/test/java/org/apache/helix/manager/zk/client/TestHelixZkClient.java | package org.apache.helix.manager.zk.client;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.concurrent.TimeUnit;
import org.apache.helix.TestHelper;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.exception.ZkClientException;
import org.apache.helix.zookeeper.zkclient.IZkDataListener;
import org.apache.helix.zookeeper.zkclient.ZkConnection;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestHelixZkClient extends ZkUnitTestBase {
private final String TEST_NODE = "/test_helix_zkclient";
@Test
public void testZkConnectionManager() {
final String TEST_ROOT = "/testZkConnectionManager/IDEALSTATES";
final String TEST_PATH = TEST_ROOT + TEST_NODE;
ZkConnectionManager zkConnectionManager =
new ZkConnectionManager(new ZkConnection(ZK_ADDR), HelixZkClient.DEFAULT_CONNECTION_TIMEOUT,
null);
Assert.assertTrue(zkConnectionManager.waitUntilConnected(1, TimeUnit.SECONDS));
// This client can write/read from ZK
zkConnectionManager.createPersistent(TEST_PATH, true);
zkConnectionManager.writeData(TEST_PATH, "Test");
Assert.assertNotNull(zkConnectionManager.readData(TEST_PATH));
zkConnectionManager.deleteRecursively(TEST_ROOT);
// This client can be shared, and cannot close when sharing
SharedZkClient sharedZkClient =
new SharedZkClient(zkConnectionManager, new HelixZkClient.ZkClientConfig(), null);
try {
zkConnectionManager.close();
Assert.fail("Dedicated ZkClient cannot be closed while sharing!");
} catch (ZkClientException hex) {
// expected
}
// This client can be closed normally when sharing ends
sharedZkClient.close();
Assert.assertTrue(sharedZkClient.isClosed());
Assert.assertFalse(sharedZkClient.waitUntilConnected(100, TimeUnit.MILLISECONDS));
zkConnectionManager.close();
Assert.assertTrue(zkConnectionManager.isClosed());
Assert.assertFalse(zkConnectionManager.waitUntilConnected(100, TimeUnit.MILLISECONDS));
// Sharing a closed dedicated ZkClient shall fail
try {
new SharedZkClient(zkConnectionManager, new HelixZkClient.ZkClientConfig(), null);
Assert.fail("Sharing a closed dedicated ZkClient shall fail.");
} catch (ZkClientException hex) {
// expected
}
deleteCluster("testZkConnectionManager");
}
@Test(dependsOnMethods = "testZkConnectionManager")
public void testSharingZkClient() throws Exception {
final String TEST_ROOT = "/testSharingZkClient/IDEALSTATES";
final String TEST_PATH = TEST_ROOT + TEST_NODE;
// A factory just for this tests, this for avoiding the impact from other tests running in
// parallel.
final SharedZkClientFactory testFactory = new SharedZkClientFactory();
HelixZkClient.ZkConnectionConfig connectionConfig =
new HelixZkClient.ZkConnectionConfig(ZK_ADDR);
HelixZkClient sharedZkClientA =
testFactory.buildZkClient(connectionConfig, new HelixZkClient.ZkClientConfig());
Assert.assertTrue(sharedZkClientA.waitUntilConnected(1, TimeUnit.SECONDS));
long sessionIdClientA = sharedZkClientA.getSessionId();
HelixZkClient sharedZkClientB =
testFactory.buildZkClient(connectionConfig, new HelixZkClient.ZkClientConfig());
Assert.assertTrue(sharedZkClientB.waitUntilConnected(1, TimeUnit.SECONDS));
long sessionIdClientB = sharedZkClientB.getSessionId();
Assert.assertEquals(testFactory.getActiveConnectionCount(), 1);
// client A and B is sharing the same session.
Assert.assertEquals(sharedZkClientA.getSessionId(), sharedZkClientB.getSessionId());
long sessionId = sharedZkClientA.getSessionId();
final int[] notificationCountA = {0, 0};
sharedZkClientA.subscribeDataChanges(TEST_PATH, new IZkDataListener() {
@Override
public void handleDataChange(String s, Object o) {
notificationCountA[0]++;
}
@Override
public void handleDataDeleted(String s) {
notificationCountA[1]++;
}
});
final int[] notificationCountB = {0, 0};
sharedZkClientB.subscribeDataChanges(TEST_PATH, new IZkDataListener() {
@Override
public void handleDataChange(String s, Object o) {
notificationCountB[0]++;
}
@Override
public void handleDataDeleted(String s) {
notificationCountB[1]++;
}
});
// Modify using client A and client B will get notification.
// note, if session changed in between, we may get a dataDelete callback. This is current
// zkclient behavior.
sharedZkClientA.createPersistent(TEST_PATH, true);
Assert.assertTrue(TestHelper.verify(() -> notificationCountB[0] == 1, TestHelper.WAIT_DURATION),
String.format("Original sidA before %d and sidB %d, current sidA %d, sidB %d",
sessionIdClientA, sessionIdClientB, sharedZkClientA.getSessionId(), sharedZkClientB.getSessionId()));
Assert.assertEquals(notificationCountB[1], 0);
sharedZkClientA.deleteRecursively(TEST_ROOT);
Assert.assertTrue(TestHelper.verify(() -> notificationCountB[1] == 1, TestHelper.WAIT_DURATION));
Assert.assertEquals(notificationCountB[0], 1);
try {
sharedZkClientA.createEphemeral(TEST_PATH, true);
Assert.fail("Create Ephemeral nodes using shared client should fail.");
} catch (UnsupportedOperationException e) {
// expected.
}
sharedZkClientA.close();
// Shared client A closed.
Assert.assertTrue(sharedZkClientA.isClosed());
Assert.assertFalse(sharedZkClientA.waitUntilConnected(100, TimeUnit.MILLISECONDS));
// Shared client B still open.
Assert.assertFalse(sharedZkClientB.isClosed());
Assert.assertTrue(sharedZkClientB.waitUntilConnected(100, TimeUnit.MILLISECONDS));
// client A cannot do any modify once closed.
try {
sharedZkClientA.createPersistent(TEST_PATH, true);
Assert.fail("Should not be able to create node with a closed client.");
} catch (Exception e) {
// expected to be here.
}
// client B needs to re-install the data watch
sharedZkClientB.watchForData(TEST_PATH);
// Now modify using client B, and client A won't get notification.
sharedZkClientB.createPersistent(TEST_PATH, true);
Assert.assertTrue(TestHelper.verify(() -> notificationCountB[0] == 2, TestHelper.WAIT_DURATION));
Assert.assertFalse(TestHelper.verify(() -> notificationCountA[0] == 2, TestHelper.WAIT_DURATION));
sharedZkClientB.deleteRecursively(TEST_ROOT);
Assert.assertEquals(testFactory.getActiveConnectionCount(), 1);
sharedZkClientB.close();
// Shared client B closed.
Assert.assertTrue(sharedZkClientB.isClosed());
Assert.assertFalse(sharedZkClientB.waitUntilConnected(100, TimeUnit.MILLISECONDS));
Assert.assertEquals(testFactory.getActiveConnectionCount(), 0);
// Try to create new shared ZkClient, will get a different session
HelixZkClient sharedZkClientC =
testFactory.buildZkClient(connectionConfig, new HelixZkClient.ZkClientConfig());
Assert.assertFalse(sessionId == sharedZkClientC.getSessionId());
Assert.assertEquals(testFactory.getActiveConnectionCount(), 1);
sharedZkClientC.close();
// Shared client C closed.
Assert.assertTrue(sharedZkClientC.isClosed());
Assert.assertFalse(sharedZkClientC.waitUntilConnected(100, TimeUnit.MILLISECONDS));
Assert.assertEquals(testFactory.getActiveConnectionCount(), 0);
deleteCluster("testSharingZkClient");
}
}
| 9,815 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/common/ZkTestBase.java | package org.apache.helix.common;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.google.common.base.Preconditions;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.logging.Level;
import javax.management.MBeanServerConnection;
import javax.management.ObjectName;
import org.apache.helix.BaseDataAccessor;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixProperty;
import org.apache.helix.PropertyKey;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.SystemPropertyKeys;
import org.apache.helix.TestHelper;
import org.apache.helix.api.config.HelixConfigProperty;
import org.apache.helix.controller.pipeline.AbstractAsyncBaseStage;
import org.apache.helix.controller.pipeline.Pipeline;
import org.apache.helix.controller.pipeline.Stage;
import org.apache.helix.controller.pipeline.StageContext;
import org.apache.helix.controller.rebalancer.DelayedAutoRebalancer;
import org.apache.helix.controller.rebalancer.strategy.AutoRebalanceStrategy;
import org.apache.helix.controller.rebalancer.waged.WagedRebalancer;
import org.apache.helix.controller.stages.AttributeName;
import org.apache.helix.controller.stages.ClusterEvent;
import org.apache.helix.manager.zk.ZKHelixAdmin;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZNRecordSerializer;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.model.BuiltInStateModelDefinitions;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Message;
import org.apache.helix.model.OnlineOfflineSMD;
import org.apache.helix.model.ResourceConfig;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.tools.ClusterStateVerifier;
import org.apache.helix.tools.StateModelConfigGenerator;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.apache.helix.zookeeper.impl.factory.DedicatedZkClientFactory;
import org.apache.helix.zookeeper.zkclient.ZkServer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.ITestContext;
import org.testng.annotations.AfterClass;
import org.testng.annotations.AfterSuite;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.BeforeSuite;
public class ZkTestBase {
private static final Logger LOG = LoggerFactory.getLogger(ZkTestBase.class);
private static final String MULTI_ZK_PROPERTY_KEY = "multiZk";
private static final String NUM_ZK_PROPERTY_KEY = "numZk";
protected static ZkServer _zkServer;
protected static HelixZkClient _gZkClient;
protected static ClusterSetup _gSetupTool;
protected static BaseDataAccessor<ZNRecord> _baseAccessor;
protected static MBeanServerConnection _server = ManagementFactory.getPlatformMBeanServer();
private final Map<String, Map<String, HelixZkClient>> _liveInstanceOwners = new HashMap<>();
private static final String ZK_PREFIX = "localhost:";
private static final int ZK_START_PORT = 2183;
public static final String ZK_ADDR = ZK_PREFIX + ZK_START_PORT;
protected static final String CLUSTER_PREFIX = "CLUSTER";
protected static final String CONTROLLER_CLUSTER_PREFIX = "CONTROLLER_CLUSTER";
protected final String CONTROLLER_PREFIX = "controller";
protected final String PARTICIPANT_PREFIX = "localhost";
private static final long MANUAL_GC_PAUSE = 4000L;
/*
* Multiple ZK references
*/
// The following maps hold ZK connect string as keys
protected static final Map<String, ZkServer> _zkServerMap = new HashMap<>();
protected static final Map<String, HelixZkClient> _helixZkClientMap = new HashMap<>();
protected static final Map<String, ClusterSetup> _clusterSetupMap = new HashMap<>();
protected static final Map<String, BaseDataAccessor> _baseDataAccessorMap = new HashMap<>();
static public void reportPhysicalMemory() {
com.sun.management.OperatingSystemMXBean os = (com.sun.management.OperatingSystemMXBean)
java.lang.management.ManagementFactory.getOperatingSystemMXBean();
long physicalMemorySize = os.getTotalPhysicalMemorySize();
System.out.println("************ SYSTEM Physical Memory:" + physicalMemorySize);
long MB = 1024 * 1024;
Runtime runtime = Runtime.getRuntime();
long free = runtime.freeMemory()/MB;
long total = runtime.totalMemory()/MB;
System.out.println("************ total memory:" + total + " free memory:" + free);
}
@BeforeSuite
public void beforeSuite() throws Exception {
// TODO: use logging.properties file to config java.util.logging.Logger levels
java.util.logging.Logger topJavaLogger = java.util.logging.Logger.getLogger("");
topJavaLogger.setLevel(Level.WARNING);
// Due to ZOOKEEPER-2693 fix, we need to specify whitelist for execute zk commends
System.setProperty("zookeeper.4lw.commands.whitelist", "*");
System.setProperty(SystemPropertyKeys.CONTROLLER_MESSAGE_PURGE_DELAY, "3000");
// Start in-memory ZooKeepers
// If multi-ZooKeeper is enabled, start more ZKs. Otherwise, just set up one ZK
int numZkToStart = 1;
String multiZkConfig = System.getProperty(MULTI_ZK_PROPERTY_KEY);
if (multiZkConfig != null && multiZkConfig.equalsIgnoreCase(Boolean.TRUE.toString())) {
String numZkFromConfig = System.getProperty(NUM_ZK_PROPERTY_KEY);
if (numZkFromConfig != null) {
try {
numZkToStart = Math.max(Integer.parseInt(numZkFromConfig), numZkToStart);
} catch (Exception e) {
Assert.fail("Failed to parse the number of ZKs from config!");
}
} else {
Assert.fail("multiZk config is set but numZk config is missing!");
}
}
// Start "numZkFromConfigInt" ZooKeepers
for (int i = 0; i < numZkToStart; i++) {
startZooKeeper(i);
}
// Set the references for backward-compatibility with a single ZK environment
_zkServer = _zkServerMap.get(ZK_ADDR);
_gZkClient = _helixZkClientMap.get(ZK_ADDR);
_gSetupTool = _clusterSetupMap.get(ZK_ADDR);
_baseAccessor = _baseDataAccessorMap.get(ZK_ADDR);
// Clean up all JMX objects
for (ObjectName mbean : _server.queryNames(null, null)) {
try {
_server.unregisterMBean(mbean);
} catch (Exception e) {
// OK
}
}
}
/**
* Starts an additional in-memory ZooKeeper for testing.
* @param i index to be added to the ZK port to avoid conflicts
* @throws Exception
*/
private static synchronized void startZooKeeper(int i) {
String zkAddress = ZK_PREFIX + (ZK_START_PORT + i);
_zkServerMap.computeIfAbsent(zkAddress, ZkTestBase::createZookeeperServer);
_helixZkClientMap.computeIfAbsent(zkAddress, ZkTestBase::createZkClient);
_clusterSetupMap.computeIfAbsent(zkAddress, key -> new ClusterSetup(_helixZkClientMap.get(key)));
_baseDataAccessorMap.computeIfAbsent(zkAddress, key -> new ZkBaseDataAccessor(_helixZkClientMap.get(key)));
}
private static ZkServer createZookeeperServer(String zkAddress) {
try {
return Preconditions.checkNotNull(TestHelper.startZkServer(zkAddress));
} catch (Exception e) {
throw new IllegalArgumentException("Failed to start zookeeper server at " + zkAddress, e);
}
}
private static HelixZkClient createZkClient(String zkAddress) {
HelixZkClient.ZkClientConfig clientConfig = new HelixZkClient.ZkClientConfig();
clientConfig.setZkSerializer(new ZNRecordSerializer());
return DedicatedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(zkAddress), clientConfig);
}
@AfterSuite
public void afterSuite() throws IOException {
// Clean up all JMX objects
for (ObjectName mbean : _server.queryNames(null, null)) {
try {
_server.unregisterMBean(mbean);
} catch (Exception e) {
// OK
}
}
synchronized (ZkTestBase.class) {
// Close all ZK resources
_baseDataAccessorMap.values().forEach(BaseDataAccessor::close);
_clusterSetupMap.values().forEach(ClusterSetup::close);
_helixZkClientMap.values().forEach(HelixZkClient::close);
_zkServerMap.values().forEach(TestHelper::stopZkServer);
}
}
@BeforeClass
public void beforeClass() throws Exception {
cleanupJMXObjects();
// Giving each test some time to settle (such as gc pause, etc).
// Note that this is the best effort we could make to stabilize tests, not a complete solution
Runtime.getRuntime().gc();
Thread.sleep(MANUAL_GC_PAUSE);
}
@BeforeMethod
public void beforeTest(Method testMethod, ITestContext testContext) {
testContext.setAttribute("StartTime", System.currentTimeMillis());
}
protected void cleanupJMXObjects() throws IOException {
// Clean up all JMX objects
for (ObjectName mbean : _server.queryNames(null, null)) {
try {
_server.unregisterMBean(mbean);
} catch (Exception e) {
// OK
}
}
}
protected String getShortClassName() {
return this.getClass().getSimpleName();
}
protected String getCurrentLeader(HelixZkClient zkClient, String clusterName) {
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(zkClient));
Builder keyBuilder = accessor.keyBuilder();
LiveInstance leader = accessor.getProperty(keyBuilder.controllerLeader());
if (leader == null) {
return null;
}
return leader.getInstanceName();
}
protected void enablePersistBestPossibleAssignment(HelixZkClient zkClient, String clusterName,
Boolean enabled) {
ConfigAccessor configAccessor = new ConfigAccessor(zkClient);
ClusterConfig clusterConfig = configAccessor.getClusterConfig(clusterName);
clusterConfig.setPersistBestPossibleAssignment(enabled);
configAccessor.setClusterConfig(clusterName, clusterConfig);
}
protected void enablePersistIntermediateAssignment(HelixZkClient zkClient, String clusterName,
Boolean enabled) {
ConfigAccessor configAccessor = new ConfigAccessor(zkClient);
ClusterConfig clusterConfig = configAccessor.getClusterConfig(clusterName);
clusterConfig.setPersistIntermediateAssignment(enabled);
configAccessor.setClusterConfig(clusterName, clusterConfig);
}
protected void enableTopologyAwareRebalance(HelixZkClient zkClient, String clusterName,
Boolean enabled) {
ConfigAccessor configAccessor = new ConfigAccessor(zkClient);
ClusterConfig clusterConfig = configAccessor.getClusterConfig(clusterName);
clusterConfig.setTopologyAwareEnabled(enabled);
configAccessor.setClusterConfig(clusterName, clusterConfig);
}
protected void enableDelayRebalanceInCluster(HelixZkClient zkClient, String clusterName,
boolean enabled) {
ConfigAccessor configAccessor = new ConfigAccessor(zkClient);
ClusterConfig clusterConfig = configAccessor.getClusterConfig(clusterName);
clusterConfig.setDelayRebalaceEnabled(enabled);
configAccessor.setClusterConfig(clusterName, clusterConfig);
}
protected void enableDelayRebalanceInInstance(HelixZkClient zkClient, String clusterName,
String instanceName, boolean enabled) {
ConfigAccessor configAccessor = new ConfigAccessor(zkClient);
InstanceConfig instanceConfig = configAccessor.getInstanceConfig(clusterName, instanceName);
instanceConfig.setDelayRebalanceEnabled(enabled);
configAccessor.setInstanceConfig(clusterName, instanceName, instanceConfig);
}
protected void enableDelayRebalanceInCluster(HelixZkClient zkClient, String clusterName,
boolean enabled, long delay) {
ConfigAccessor configAccessor = new ConfigAccessor(zkClient);
ClusterConfig clusterConfig = configAccessor.getClusterConfig(clusterName);
clusterConfig.setDelayRebalaceEnabled(enabled);
clusterConfig.setRebalanceDelayTime(delay);
configAccessor.setClusterConfig(clusterName, clusterConfig);
}
protected void enableP2PInCluster(String clusterName, ConfigAccessor configAccessor,
boolean enable) {
// enable p2p message in cluster.
if (enable) {
ClusterConfig clusterConfig = configAccessor.getClusterConfig(clusterName);
clusterConfig.enableP2PMessage(true);
configAccessor.setClusterConfig(clusterName, clusterConfig);
} else {
ClusterConfig clusterConfig = configAccessor.getClusterConfig(clusterName);
clusterConfig.getRecord().getSimpleFields()
.remove(HelixConfigProperty.P2P_MESSAGE_ENABLED.name());
configAccessor.setClusterConfig(clusterName, clusterConfig);
}
}
protected void enableP2PInResource(String clusterName, ConfigAccessor configAccessor,
String dbName, boolean enable) {
if (enable) {
ResourceConfig resourceConfig =
new ResourceConfig.Builder(dbName).setP2PMessageEnabled(true).build();
configAccessor.setResourceConfig(clusterName, dbName, resourceConfig);
} else {
// remove P2P Message in resource config
ResourceConfig resourceConfig = configAccessor.getResourceConfig(clusterName, dbName);
if (resourceConfig != null) {
resourceConfig.getRecord().getSimpleFields()
.remove(HelixConfigProperty.P2P_MESSAGE_ENABLED.name());
configAccessor.setResourceConfig(clusterName, dbName, resourceConfig);
}
}
}
protected void setDelayTimeInCluster(HelixZkClient zkClient, String clusterName, long delay) {
ConfigAccessor configAccessor = new ConfigAccessor(zkClient);
ClusterConfig clusterConfig = configAccessor.getClusterConfig(clusterName);
clusterConfig.setRebalanceDelayTime(delay);
configAccessor.setClusterConfig(clusterName, clusterConfig);
}
protected void setLastOnDemandRebalanceTimeInCluster(HelixZkClient zkClient,
String clusterName, long lastOnDemandTime) {
ConfigAccessor configAccessor = new ConfigAccessor(zkClient);
ClusterConfig clusterConfig = configAccessor.getClusterConfig(clusterName);
clusterConfig.setLastOnDemandRebalanceTimestamp(lastOnDemandTime);
configAccessor.setClusterConfig(clusterName, clusterConfig);
}
protected IdealState createResourceWithDelayedRebalance(String clusterName, String db,
String stateModel, int numPartition, int replica, int minActiveReplica, long delay) {
return createResourceWithDelayedRebalance(clusterName, db, stateModel, numPartition, replica,
minActiveReplica, delay, AutoRebalanceStrategy.class.getName());
}
protected IdealState createResourceWithDelayedRebalance(String clusterName, String db,
String stateModel, int numPartition, int replica, int minActiveReplica, long delay,
String rebalanceStrategy) {
return createResource(clusterName, db, stateModel, numPartition, replica, minActiveReplica,
delay, DelayedAutoRebalancer.class.getName(), rebalanceStrategy);
}
protected IdealState createResourceWithWagedRebalance(String clusterName, String db,
String stateModel, int numPartition, int replica, int minActiveReplica) {
return createResource(clusterName, db, stateModel, numPartition, replica, minActiveReplica,
-1, WagedRebalancer.class.getName(), null);
}
private IdealState createResource(String clusterName, String db, String stateModel,
int numPartition, int replica, int minActiveReplica, long delay, String rebalancerClassName,
String rebalanceStrategy) {
IdealState idealState =
_gSetupTool.getClusterManagementTool().getResourceIdealState(clusterName, db);
if (idealState == null) {
_gSetupTool.addResourceToCluster(clusterName, db, numPartition, stateModel,
IdealState.RebalanceMode.FULL_AUTO + "", rebalanceStrategy);
}
idealState = _gSetupTool.getClusterManagementTool().getResourceIdealState(clusterName, db);
idealState.setMinActiveReplicas(minActiveReplica);
if (!idealState.isDelayRebalanceEnabled()) {
idealState.setDelayRebalanceEnabled(true);
}
if (delay > 0) {
idealState.setRebalanceDelay(delay);
}
idealState.setRebalancerClassName(rebalancerClassName);
_gSetupTool.getClusterManagementTool().setResourceIdealState(clusterName, db, idealState);
_gSetupTool.rebalanceStorageCluster(clusterName, db, replica);
idealState = _gSetupTool.getClusterManagementTool().getResourceIdealState(clusterName, db);
return idealState;
}
protected IdealState createIdealState(String resourceGroupName, String instanceGroupTag,
List<String> instanceNames, int numPartition, int replica, String rebalanceMode,
String stateModelDef) {
IdealState is = _gSetupTool.createIdealStateForResourceGroup(resourceGroupName,
instanceGroupTag, numPartition, replica, rebalanceMode, stateModelDef);
// setup initial partition->instance mapping.
int nodeIdx = 0;
int numNode = instanceNames.size();
assert (numNode >= replica);
for (int i = 0; i < numPartition; i++) {
String partitionName = resourceGroupName + "_" + i;
for (int j = 0; j < replica; j++) {
is.setPartitionState(partitionName, instanceNames.get((nodeIdx + j) % numNode),
OnlineOfflineSMD.States.ONLINE.toString());
}
nodeIdx++;
}
return is;
}
protected void createDBInSemiAuto(ClusterSetup clusterSetup, String clusterName, String dbName,
List<String> preferenceList, String stateModelDef, int numPartition, int replica) {
clusterSetup.addResourceToCluster(clusterName, dbName, numPartition, stateModelDef,
IdealState.RebalanceMode.SEMI_AUTO.toString());
clusterSetup.rebalanceStorageCluster(clusterName, dbName, replica);
IdealState is =
_gSetupTool.getClusterManagementTool().getResourceIdealState(clusterName, dbName);
for (String p : is.getPartitionSet()) {
is.setPreferenceList(p, preferenceList);
}
clusterSetup.getClusterManagementTool().setResourceIdealState(clusterName, dbName, is);
}
/**
* Validate there should be always minimal active replica and top state replica for each
* partition.
* Also make sure there is always some partitions with only active replica count.
*/
protected void validateMinActiveAndTopStateReplica(IdealState is, ExternalView ev,
int minActiveReplica, int numNodes) {
StateModelDefinition stateModelDef =
BuiltInStateModelDefinitions.valueOf(is.getStateModelDefRef()).getStateModelDefinition();
String topState = stateModelDef.getStatesPriorityList().get(0);
int replica = Integer.valueOf(is.getReplicas());
Map<String, Integer> stateCount = stateModelDef.getStateCountMap(numNodes, replica);
Set<String> activeStates = stateCount.keySet();
for (String partition : is.getPartitionSet()) {
Map<String, String> assignmentMap = ev.getRecord().getMapField(partition);
Assert.assertNotNull(assignmentMap,
is.getResourceName() + "'s best possible assignment is null for partition " + partition);
Assert.assertTrue(!assignmentMap.isEmpty(),
is.getResourceName() + "'s partition " + partition + " has no best possible map in IS.");
boolean hasTopState = false;
int activeReplica = 0;
for (String state : assignmentMap.values()) {
if (topState.equalsIgnoreCase(state)) {
hasTopState = true;
}
if (activeStates.contains(state)) {
activeReplica++;
}
}
if (activeReplica < minActiveReplica) {
int a = 0;
}
Assert.assertTrue(hasTopState, String.format("%s missing %s replica", partition, topState));
Assert.assertTrue(activeReplica >= minActiveReplica,
String.format("%s has less active replica %d then required %d", partition, activeReplica,
minActiveReplica));
}
}
protected void runStage(HelixManager manager, ClusterEvent event, Stage stage) throws Exception {
event.addAttribute(AttributeName.helixmanager.name(), manager);
StageContext context = new StageContext();
stage.init(context);
stage.preProcess();
// AbstractAsyncBaseStage will run asynchronously, and it's main logics are implemented in
// execute() function call
if (stage instanceof AbstractAsyncBaseStage) {
((AbstractAsyncBaseStage) stage).execute(event);
} else {
stage.process(event);
}
stage.postProcess();
}
public void verifyInstance(HelixZkClient zkClient, String clusterName, String instance,
boolean wantExists) {
// String instanceConfigsPath = HelixUtil.getConfigPath(clusterName);
String instanceConfigsPath = PropertyPathBuilder.instanceConfig(clusterName);
String instanceConfigPath = instanceConfigsPath + "/" + instance;
String instancePath = PropertyPathBuilder.instance(clusterName, instance);
Assert.assertEquals(wantExists, zkClient.exists(instanceConfigPath));
Assert.assertEquals(wantExists, zkClient.exists(instancePath));
}
public void verifyResource(HelixZkClient zkClient, String clusterName, String resource,
boolean wantExists) {
String resourcePath = PropertyPathBuilder.idealState(clusterName, resource);
Assert.assertEquals(wantExists, zkClient.exists(resourcePath));
}
public void verifyEnabled(HelixZkClient zkClient, String clusterName, String instance,
boolean wantEnabled) {
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(zkClient));
Builder keyBuilder = accessor.keyBuilder();
InstanceConfig config = accessor.getProperty(keyBuilder.instanceConfig(instance));
Assert.assertEquals(wantEnabled, config.getInstanceEnabled());
}
public void verifyReplication(HelixZkClient zkClient, String clusterName, String resource,
int repl) {
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(zkClient));
Builder keyBuilder = accessor.keyBuilder();
IdealState idealState = accessor.getProperty(keyBuilder.idealStates(resource));
for (String partitionName : idealState.getPartitionSet()) {
if (idealState.getRebalanceMode() == IdealState.RebalanceMode.SEMI_AUTO) {
Assert.assertEquals(repl, idealState.getPreferenceList(partitionName).size());
} else if (idealState.getRebalanceMode() == IdealState.RebalanceMode.CUSTOMIZED) {
Assert.assertEquals(repl, idealState.getInstanceStateMap(partitionName).size());
}
}
}
protected void setupStateModel(String clusterName) {
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
Builder keyBuilder = accessor.keyBuilder();
StateModelDefinition masterSlave =
new StateModelDefinition(StateModelConfigGenerator.generateConfigForMasterSlave());
accessor.setProperty(keyBuilder.stateModelDef(masterSlave.getId()), masterSlave);
StateModelDefinition leaderStandby =
new StateModelDefinition(StateModelConfigGenerator.generateConfigForLeaderStandby());
accessor.setProperty(keyBuilder.stateModelDef(leaderStandby.getId()), leaderStandby);
StateModelDefinition onlineOffline =
new StateModelDefinition(StateModelConfigGenerator.generateConfigForOnlineOffline());
accessor.setProperty(keyBuilder.stateModelDef(onlineOffline.getId()), onlineOffline);
}
protected Message createMessage(Message.MessageType type, String msgId, String fromState,
String toState, String resourceName, String tgtName) {
Message msg = new Message(type.toString(), msgId);
msg.setFromState(fromState);
msg.setToState(toState);
msg.getRecord().setSimpleField(Message.Attributes.RESOURCE_NAME.toString(), resourceName);
msg.setTgtName(tgtName);
return msg;
}
protected List<IdealState> setupIdealState(String clusterName, int[] nodes, String[] resources,
int partitions, int replicas) {
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
Builder keyBuilder = accessor.keyBuilder();
List<IdealState> idealStates = new ArrayList<>();
List<String> instances = new ArrayList<>();
for (int i : nodes) {
instances.add("localhost_" + i);
}
for (String resourceName : resources) {
IdealState idealState = new IdealState(resourceName);
for (int p = 0; p < partitions; p++) {
List<String> value = new ArrayList<>();
for (int r = 0; r < replicas; r++) {
int n = nodes[(p + r) % nodes.length];
value.add("localhost_" + n);
}
idealState.getRecord().setListField(resourceName + "_" + p, value);
}
idealState.setReplicas(Integer.toString(replicas));
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(IdealState.RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(partitions);
idealStates.add(idealState);
// System.out.println(idealState);
accessor.setProperty(keyBuilder.idealStates(resourceName), idealState);
}
return idealStates;
}
@AfterClass
public void cleanupLiveInstanceOwners() throws InterruptedException {
String testClassName = this.getShortClassName();
System.out.println("AfterClass: " + testClassName + " called.");
for (String cluster : _liveInstanceOwners.keySet()) {
Map<String, HelixZkClient> clientMap = _liveInstanceOwners.get(cluster);
for (HelixZkClient client : clientMap.values()) {
client.close();
}
clientMap.clear();
}
_liveInstanceOwners.clear();
}
protected List<LiveInstance> setupLiveInstances(String clusterName, int[] liveInstances) {
HelixZkClient.ZkClientConfig clientConfig = new HelixZkClient.ZkClientConfig();
clientConfig.setZkSerializer(new ZNRecordSerializer());
List<LiveInstance> result = new ArrayList<>();
for (int i = 0; i < liveInstances.length; i++) {
String instance = "localhost_" + liveInstances[i];
_liveInstanceOwners.putIfAbsent(clusterName, new HashMap<>());
Map<String, HelixZkClient> clientMap = _liveInstanceOwners.get(clusterName);
clientMap.putIfAbsent(instance, DedicatedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(ZK_ADDR), clientConfig));
HelixZkClient client = clientMap.get(instance);
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(client));
Builder keyBuilder = accessor.keyBuilder();
LiveInstance liveInstance = new LiveInstance(instance);
// Keep setting the session id in the deprecated field for ensure the same behavior as a real participant.
// Note the participant is doing so for backward compatibility.
liveInstance.setSessionId(Long.toHexString(client.getSessionId()));
// Please refer to the version requirement here: helix-core/src/main/resources/cluster-manager-version.properties
// Ensuring version compatibility can avoid the warning message during test.
liveInstance.setHelixVersion("0.4");
accessor.setProperty(keyBuilder.liveInstance(instance), liveInstance);
result.add(accessor.getProperty(keyBuilder.liveInstance(instance)));
}
return result;
}
protected void deleteLiveInstances(String clusterName) {
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
Builder keyBuilder = accessor.keyBuilder();
Map<String, HelixZkClient> clientMap = _liveInstanceOwners.getOrDefault(clusterName, Collections.emptyMap());
for (String liveInstance : accessor.getChildNames(keyBuilder.liveInstances())) {
ZKHelixDataAccessor dataAccessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
dataAccessor.removeProperty(keyBuilder.liveInstance(liveInstance));
HelixZkClient client = clientMap.remove(liveInstance);
if (client != null) {
client.close();
}
}
if (clientMap.isEmpty()) {
_liveInstanceOwners.remove(clusterName);
}
}
protected void setupInstances(String clusterName, int[] instances) {
HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
for (int i = 0; i < instances.length; i++) {
String instance = "localhost_" + instances[i];
InstanceConfig instanceConfig = new InstanceConfig(instance);
instanceConfig.setHostName("localhost");
instanceConfig.setPort("" + instances[i]);
instanceConfig.setInstanceEnabled(true);
admin.addInstance(clusterName, instanceConfig);
}
}
protected void runPipeline(ClusterEvent event, Pipeline pipeline, boolean shouldThrowException)
throws Exception {
try {
pipeline.handle(event);
pipeline.finish();
} catch (Exception e) {
if (shouldThrowException) {
throw e;
} else {
LOG.error("Exception while executing pipeline: {}. Will not continue to next pipeline",
pipeline, e);
}
}
}
protected void runStage(ClusterEvent event, Stage stage) throws Exception {
StageContext context = new StageContext();
stage.init(context);
stage.preProcess();
// AbstractAsyncBaseStage will run asynchronously, and it's main logics are implemented in
// execute() function call
// TODO (harry): duplicated code in ZkIntegrationTestBase, consider moving runStage()
// to a shared library
if (stage instanceof AbstractAsyncBaseStage) {
((AbstractAsyncBaseStage) stage).execute(event);
} else {
stage.process(event);
}
stage.postProcess();
}
protected void deleteCluster(String clusterName) {
TestHelper.dropCluster(clusterName, _gZkClient, _gSetupTool);
}
/**
* Poll for the existence (or lack thereof) of a specific Helix property
* @param clazz the HelixProeprty subclass
* @param accessor connected HelixDataAccessor
* @param key the property key to look up
* @param shouldExist true if the property should exist, false otherwise
* @return the property if found, or null if it does not exist
*/
protected <T extends HelixProperty> T pollForProperty(Class<T> clazz, HelixDataAccessor accessor,
PropertyKey key, boolean shouldExist) throws InterruptedException {
final int POLL_TIMEOUT = 5000;
final int POLL_INTERVAL = 50;
T property = accessor.getProperty(key);
int timeWaited = 0;
while (((shouldExist && property == null) || (!shouldExist && property != null))
&& timeWaited < POLL_TIMEOUT) {
Thread.sleep(POLL_INTERVAL);
timeWaited += POLL_INTERVAL;
property = accessor.getProperty(key);
}
return property;
}
/**
* Ensures that external view and current state are empty
*/
protected static class EmptyZkVerifier implements ClusterStateVerifier.ZkVerifier {
private final String _clusterName;
private final String _resourceName;
private final HelixZkClient _zkClient;
/**
* Instantiate the verifier
* @param clusterName the cluster to verify
* @param resourceName the resource to verify
*/
public EmptyZkVerifier(String clusterName, String resourceName) {
_clusterName = clusterName;
_resourceName = resourceName;
_zkClient = DedicatedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(ZK_ADDR));
_zkClient.setZkSerializer(new ZNRecordSerializer());
}
@Override
public boolean verify() {
BaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<ZNRecord>(_zkClient);
HelixDataAccessor accessor = new ZKHelixDataAccessor(_clusterName, baseAccessor);
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
ExternalView externalView = accessor.getProperty(keyBuilder.externalView(_resourceName));
// verify external view empty
if (externalView != null) {
for (String partition : externalView.getPartitionSet()) {
Map<String, String> stateMap = externalView.getStateMap(partition);
if (stateMap != null && !stateMap.isEmpty()) {
LOG.error("External view not empty for " + partition);
return false;
}
}
}
// verify current state empty
List<String> liveParticipants = accessor.getChildNames(keyBuilder.liveInstances());
for (String participant : liveParticipants) {
List<String> sessionIds = accessor.getChildNames(keyBuilder.sessions(participant));
for (String sessionId : sessionIds) {
CurrentState currentState =
accessor.getProperty(keyBuilder.currentState(participant, sessionId, _resourceName));
Map<String, String> partitionStateMap = currentState.getPartitionStateMap();
if (partitionStateMap != null && !partitionStateMap.isEmpty()) {
LOG.error("Current state not empty for " + participant);
return false;
}
}
List<String> taskSessionIds =
accessor.getChildNames(keyBuilder.taskCurrentStateSessions(participant));
for (String sessionId : taskSessionIds) {
CurrentState taskCurrentState = accessor
.getProperty(keyBuilder.taskCurrentState(participant, sessionId, _resourceName));
Map<String, String> taskPartitionStateMap = taskCurrentState.getPartitionStateMap();
if (taskPartitionStateMap != null && !taskPartitionStateMap.isEmpty()) {
LOG.error("Task current state not empty for " + participant);
return false;
}
}
}
return true;
}
@Override
public ZkClient getZkClient() {
return (ZkClient) _zkClient;
}
@Override
public String getClusterName() {
return _clusterName;
}
}
}
| 9,816 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/common | Create_ds/helix/helix-core/src/test/java/org/apache/helix/common/caches/TestPropertyCache.java | package org.apache.helix.common.caches;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.HashMap;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixProperty;
import org.apache.helix.PropertyKey;
import org.apache.helix.common.controllers.ControlContextProvider;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.apache.helix.PropertyType.IDEALSTATES;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Unit test for {@link PropertyCache}
*/
public class TestPropertyCache {
private static final ControlContextProvider MOCK_CONTROL_CONTEXT_PROVIDER =
new ControlContextProvider() {
@Override
public String getClusterName() {
return "mockCluster";
}
@Override
public String getClusterEventId() {
return "id";
}
@Override
public void setClusterEventId(String eventId) {
}
@Override
public String getPipelineName() {
return "pipeline";
}
};
@Test(description = "Unit test for simple cache refresh")
public void testSimpleCacheRefresh() {
PropertyCache.PropertyCacheKeyFuncs propertyCacheKeyFuncs =
mock(PropertyCache.PropertyCacheKeyFuncs.class);
// Return a random property key, it does not impact test result.
when(propertyCacheKeyFuncs.getRootKey(any(HelixDataAccessor.class)))
.thenReturn(new PropertyKey(IDEALSTATES, IdealState.class, "Foobar"));
PropertyCache<HelixProperty> propertyCache =
new PropertyCache<>(MOCK_CONTROL_CONTEXT_PROVIDER, "mock property cache",
propertyCacheKeyFuncs, false);
HelixDataAccessor accessor = mock(HelixDataAccessor.class);
Map<String, HelixProperty> propertyConfigMap = ImmutableMap.of("id", new HelixProperty("test"));
when(accessor.getChildValuesMap(any(PropertyKey.class), anyBoolean()))
.thenReturn(propertyConfigMap);
propertyCache.refresh(accessor);
Assert.assertEquals(propertyCache.getPropertyMap(), propertyConfigMap);
Assert.assertEquals(propertyCache.getPropertyByName("id"), new HelixProperty("test"));
}
@Test(description = "Unit test for selective cache refresh")
public void testSelectivePropertyRefreshInputs() {
HelixDataAccessor accessor = mock(HelixDataAccessor.class);
Map<String, HelixProperty> currentCache = ImmutableMap.of("instance0",
new HelixProperty("key0"), "instance1", new HelixProperty("key1"));
PropertyCache.PropertyCacheKeyFuncs<HelixProperty> mockCacheKeyFuncs =
new PropertyCache.PropertyCacheKeyFuncs<HelixProperty>() {
@Override
public PropertyKey getRootKey(HelixDataAccessor accessor) {
return mock(PropertyKey.class);
}
@Override
public PropertyKey getObjPropertyKey(HelixDataAccessor accessor, String objName) {
return new PropertyKey.Builder("fake").instance(objName);
}
@Override
public String getObjName(HelixProperty obj) {
return obj.getRecord().getId();
}
};
when(accessor.getChildNames(any(PropertyKey.class)))
.thenReturn(ImmutableList.of("instance1", "instance2"));
@SuppressWarnings("unchecked")
PropertyCache<HelixProperty> propertyCache = new PropertyCache<>(MOCK_CONTROL_CONTEXT_PROVIDER,
"mock property cache", mock(PropertyCache.PropertyCacheKeyFuncs.class), false);
PropertyCache.SelectivePropertyRefreshInputs<HelixProperty> selectivePropertyRefreshInputs =
propertyCache.genSelectiveUpdateInput(accessor, currentCache, mockCacheKeyFuncs);
Assert.assertEquals(selectivePropertyRefreshInputs.getReloadKeys().size(), 1);
Assert.assertEquals(selectivePropertyRefreshInputs.getReloadKeys().get(0),
new PropertyKey.Builder("fake").instance("instance2"));
}
@Test(description = "First set the property cache and update the object from caller")
public void testDefensiveCopyOnDataUpdate() {
@SuppressWarnings("unchecked")
PropertyCache<HelixProperty> propertyCache = new PropertyCache<>(MOCK_CONTROL_CONTEXT_PROVIDER,
"mock property cache", mock(PropertyCache.PropertyCacheKeyFuncs.class), false);
HelixProperty helixProperty = new HelixProperty("id");
Map<String, HelixProperty> propertyConfigMap = new HashMap<>();
propertyCache.setPropertyMap(propertyConfigMap);
// increment the property map from outside
propertyConfigMap.put("id", helixProperty);
Assert.assertTrue(propertyCache.getPropertyMap().isEmpty());
}
//TODO investigate if deep copy is needed for PropertyCache
@Test(enabled = false, description = "First set the property cache and mutate the object from caller")
public void testDefensiveCopyOnDataMutate() {
// init
@SuppressWarnings("unchecked")
PropertyCache<InstanceConfig> propertyCache = new PropertyCache<>(MOCK_CONTROL_CONTEXT_PROVIDER,
"mock property cache", mock(PropertyCache.PropertyCacheKeyFuncs.class), false);
InstanceConfig instanceConfig = new InstanceConfig("id");
Map<String, InstanceConfig> propertyConfigMap = ImmutableMap.of("id", instanceConfig);
propertyCache.setPropertyMap(propertyConfigMap);
// mutate the property from outside
instanceConfig.setHostName("fakeHost");
String hostName = propertyCache.getPropertyByName("id").getHostName();
Assert.assertTrue(hostName.isEmpty());
}
}
| 9,817 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/common | Create_ds/helix/helix-core/src/test/java/org/apache/helix/common/caches/TestCurrentStateSnapshot.java | package org.apache.helix.common.caches;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.helix.MockAccessor;
import org.apache.helix.PropertyKey;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.LiveInstance;
import org.testng.Assert;
import org.testng.annotations.Test;
/**
* Unit test for {@link CurrentStateSnapshot}
*/
public class TestCurrentStateSnapshot {
// This test makes sure that currentStateEndTimes calculation would record correct partition replica.
// Specifically, if a replicate has not endTime field set, we should not put an entry into currentStateEndTime
// calculation. Otherwise, we see huge statePropagation latency of 1.4Tms.
@Test(description = "test getNewCurrentStateEndTimes")
public void testGetNewCurrentStateEndTimes() {
String instance1 = "instance1";
String session1 = "session1";
String resource1 = "resource1";
String partition1 = "partition1";
String partition2 = "partition2";
PropertyKey key = new PropertyKey.Builder("cluster").currentState(instance1, session1, resource1);
CurrentState nxtState = new CurrentState(resource1);
// partition 1, expect to record in endTimesMap
nxtState.setState(partition1, "SLAVE");
nxtState.setEndTime(partition1, 200);
// partition 2, expect to not record in endTimeMap. This is fixing current 1.4T observed timestamp issue
nxtState.setState(partition2, "MASTER");
Map<PropertyKey, CurrentState> currentStateMap = new HashMap<>();
Map<PropertyKey, CurrentState> nextStateMap = new HashMap<>();
nextStateMap.put(key, nxtState);
Set<PropertyKey> updateKeys = new HashSet<>();
updateKeys.add(key);
CurrentStateSnapshot snapshot = new CurrentStateSnapshot(nextStateMap, currentStateMap, updateKeys);
Map<PropertyKey, Map<String, Long>> endTimesMap = snapshot.getNewCurrentStateEndTimes();
Assert.assertEquals(endTimesMap.size(), 1);
Assert.assertTrue(endTimesMap.get(key).get(partition1) == 200);
}
// This test makes sure that all the changed current State is reflected in newCurrentStateEndTimes calculation.
// Previously, we have bugs that all newly created current state would be reflected in newCurrentStateEndTimes
// calculation.
@Test(description = "testRefreshCurrentStateCache")
public void testRefreshCurrentStateCache() {
String instanceName = "instance1";
long instanceSession = 12345;
String resourceName = "resource";
String partitionName = "resource_partition1";
MockAccessor accessor = new MockAccessor();
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
// construct liveInstance
ZNRecord record = new ZNRecord(instanceName);
record.setEphemeralOwner(instanceSession);
LiveInstance instance = new LiveInstance(record);
boolean retVal = accessor.setProperty(keyBuilder.liveInstance(instanceName), instance);
Assert.assertTrue(retVal);
// construct currentstate
CurrentState originState = new CurrentState(resourceName);
originState.setEndTime(partitionName, 100);
CurrentState currentState = new CurrentState(resourceName);
currentState.setEndTime(partitionName, 300);
retVal = accessor.setProperty(keyBuilder.currentState(instanceName, instance.getEphemeralOwner(), resourceName),
originState);
Assert.assertTrue(retVal);
CurrentStateCache cache = new CurrentStateCache("cluster");
Map<String, LiveInstance> liveInstanceMap = new HashMap<>();
liveInstanceMap.put(instanceName, instance);
retVal = cache.refresh(accessor, liveInstanceMap);
Assert.assertTrue(retVal);
retVal = accessor.setProperty(keyBuilder.currentState(instanceName, instance.getEphemeralOwner(), resourceName),
currentState);
Assert.assertTrue(retVal);
retVal = cache.refresh(accessor, liveInstanceMap);
Assert.assertTrue(retVal);
CurrentStateSnapshot snapshot = cache.getSnapshot();
Map<PropertyKey, Map<String, Long>> endTimesMap = snapshot.getNewCurrentStateEndTimes();
Assert.assertEquals(endTimesMap.size(), 1);
// note, without this fix, the endTimesMap would be size zero.
Assert.assertTrue(endTimesMap.get(keyBuilder.currentState(instanceName, instance.getEphemeralOwner(), resourceName))
.get(partitionName) == 300);
}
}
| 9,818 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/cloud/MockHttpClient.java | package org.apache.helix.cloud;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.InputStream;
import org.apache.http.HttpEntity;
import org.apache.http.StatusLine;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.mockito.Matchers;
import org.mockito.Mockito;
/**
* Mock a http client and provide response using resource file. This is for unit test purpose only.
*/
public class MockHttpClient {
protected CloseableHttpClient createMockHttpClient(String file) throws Exception {
InputStream responseInputStream = Thread.currentThread().getContextClassLoader().getResourceAsStream(file);
HttpEntity httpEntity = Mockito.mock(HttpEntity.class);
StatusLine statusLine = Mockito.mock(StatusLine.class);
CloseableHttpResponse mockCloseableHttpResponse = Mockito.mock(CloseableHttpResponse.class);
CloseableHttpClient mockCloseableHttpClient = Mockito.mock(CloseableHttpClient.class);
Mockito.when(httpEntity.getContent()).thenReturn(responseInputStream);
Mockito.when(mockCloseableHttpClient.execute(Matchers.any(HttpGet.class))).thenReturn(mockCloseableHttpResponse);
Mockito.when(mockCloseableHttpResponse.getEntity()).thenReturn(httpEntity);
Mockito.when(mockCloseableHttpResponse.getStatusLine()).thenReturn(statusLine);
Mockito.when(statusLine.getStatusCode()).thenReturn(200);
return mockCloseableHttpClient;
}
} | 9,819 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/cloud/TestAzureCloudInstanceInformationProcessor.java | package org.apache.helix.cloud;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import org.apache.helix.HelixCloudProperty;
import org.apache.helix.api.cloud.CloudInstanceInformation;
import org.apache.helix.cloud.azure.AzureCloudInstanceInformation;
import org.apache.helix.cloud.azure.AzureCloudInstanceInformationProcessor;
import org.apache.helix.cloud.constants.CloudProvider;
import org.apache.helix.model.CloudConfig;
import org.testng.Assert;
import org.testng.annotations.Test;
/**
* Unit test for {@link AzureCloudInstanceInformationProcessor}
*/
public class TestAzureCloudInstanceInformationProcessor extends MockHttpClient {
@Test()
public void testAzureCloudInstanceInformationProcessing() throws Exception {
String responseFile = "AzureResponse.json";
CloudConfig.Builder cloudConfigBuilder = new CloudConfig.Builder();
cloudConfigBuilder.setCloudEnabled(true);
cloudConfigBuilder.setCloudProvider(CloudProvider.AZURE);
cloudConfigBuilder.setCloudID("TestID");
HelixCloudProperty helixCloudProperty = new HelixCloudProperty(cloudConfigBuilder.build());
AzureCloudInstanceInformationProcessor processor = new AzureCloudInstanceInformationProcessor(
helixCloudProperty, createMockHttpClient(responseFile));
List<String> response = processor.fetchCloudInstanceInformation();
Assert.assertEquals(response.size(), 1);
Assert.assertNotNull(response.get(0));
// Verify the response from mock http client
AzureCloudInstanceInformation azureCloudInstanceInformation =
processor.parseCloudInstanceInformation(response);
Assert.assertEquals(
azureCloudInstanceInformation
.get(CloudInstanceInformation.CloudInstanceField.FAULT_DOMAIN.name()),
"faultDomain=2," + "hostname=");
Assert.assertEquals(azureCloudInstanceInformation
.get(CloudInstanceInformation.CloudInstanceField.INSTANCE_SET_NAME.name()), "test-helix");
Assert.assertEquals(
azureCloudInstanceInformation
.get(CloudInstanceInformation.CloudInstanceField.INSTANCE_NAME.name()),
"test-helix_1");
}
}
| 9,820 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/cloud | Create_ds/helix/helix-core/src/test/java/org/apache/helix/cloud/virtualTopologyGroup/TestVirtualTopologyGroupAssignment.java | package org.apache.helix.cloud.virtualTopologyGroup;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.google.common.collect.Sets;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.helix.cloud.constants.VirtualTopologyGroupConstants;
import org.apache.helix.cloud.topology.FifoVirtualGroupAssignmentAlgorithm;
import org.apache.helix.cloud.topology.VirtualGroupAssignmentAlgorithm;
import org.apache.helix.util.HelixUtil;
import org.testng.Assert;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
public class TestVirtualTopologyGroupAssignment {
private static final String GROUP_NAME = "test_virtual_group";
private final List<String> _flattenExpected = Arrays.asList(
"1", "2", "3",
"4", "5", "6",
"7", "8", "9",
"a", "b", "c", "d");
private Map<String, Set<String>> _zoneMapping = new HashMap<>();
@BeforeTest
public void prepare() {
_zoneMapping = new HashMap<>();
_zoneMapping.put("c", Sets.newHashSet("9", "8", "7"));
_zoneMapping.put("a", Sets.newHashSet("2", "3", "1"));
_zoneMapping.put("z", Sets.newHashSet("b", "c", "d", "a"));
_zoneMapping.put("b", Sets.newHashSet("5", "4", "6"));
}
@Test
public void testFlattenZoneMapping() {
Assert.assertEquals(HelixUtil.sortAndFlattenZoneMapping(_zoneMapping), _flattenExpected);
}
@Test(dataProvider = "getMappingTests")
public void testAssignmentScheme(int numGroups, Map<String, Set<String>> expected,
VirtualGroupAssignmentAlgorithm algorithm) {
Assert.assertEquals(algorithm.computeAssignment(numGroups, GROUP_NAME, _zoneMapping), expected);
}
@DataProvider
public Object[][] getMappingTests() {
Map<String, Set<String>> virtualMapping = new HashMap<>();
VirtualGroupAssignmentAlgorithm algorithm = FifoVirtualGroupAssignmentAlgorithm.getInstance();
virtualMapping.put(computeVirtualGroupId(0), Sets.newHashSet("1", "2", "3", "4", "5"));
virtualMapping.put(computeVirtualGroupId(1), Sets.newHashSet("6", "7", "8", "9"));
virtualMapping.put(computeVirtualGroupId(2), Sets.newHashSet("a", "b", "c", "d"));
Assert.assertEquals(algorithm.computeAssignment(3, GROUP_NAME, _zoneMapping),
virtualMapping);
Map<String, Set<String>> virtualMapping2 = new HashMap<>();
virtualMapping2.put(computeVirtualGroupId(0), Sets.newHashSet("1", "2"));
virtualMapping2.put(computeVirtualGroupId(1), Sets.newHashSet("3", "4"));
virtualMapping2.put(computeVirtualGroupId(2), Sets.newHashSet("5", "6"));
virtualMapping2.put(computeVirtualGroupId(3), Sets.newHashSet("7", "8"));
virtualMapping2.put(computeVirtualGroupId(4), Sets.newHashSet("9", "a"));
virtualMapping2.put(computeVirtualGroupId(5), Sets.newHashSet("b"));
virtualMapping2.put(computeVirtualGroupId(6), Sets.newHashSet("c"));
virtualMapping2.put(computeVirtualGroupId(7), Sets.newHashSet("d"));
return new Object[][] {
{3, virtualMapping, algorithm},
{8, virtualMapping2, algorithm}
};
}
private static String computeVirtualGroupId(int groupIndex) {
return GROUP_NAME + VirtualTopologyGroupConstants.GROUP_NAME_SPLITTER + groupIndex;
}
}
| 9,821 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/cloud | Create_ds/helix/helix-core/src/test/java/org/apache/helix/cloud/event/TestDefaultCloudEventCallbackImpl.java | package org.apache.helix.cloud.event;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.HelixAdmin;
import org.apache.helix.cloud.event.helix.DefaultCloudEventCallbackImpl;
import org.apache.helix.constants.InstanceConstants;
import org.apache.helix.integration.common.ZkStandAloneCMTestBase;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.util.InstanceValidationUtil;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestDefaultCloudEventCallbackImpl extends ZkStandAloneCMTestBase {
private final DefaultCloudEventCallbackImpl _impl = new DefaultCloudEventCallbackImpl();
private MockParticipantManager _instanceManager;
private HelixAdmin _admin;
public TestDefaultCloudEventCallbackImpl() throws IllegalAccessException, InstantiationException {
}
@BeforeClass
public void beforeClass() throws Exception {
super.beforeClass();
_instanceManager = _participants[0];
_admin = _instanceManager.getClusterManagmentTool();
}
@Test
public void testDisableInstance() {
Assert.assertTrue(InstanceValidationUtil
.isEnabled(_manager.getHelixDataAccessor(), _instanceManager.getInstanceName()));
_impl.disableInstance(_instanceManager, null);
Assert.assertFalse(InstanceValidationUtil
.isEnabled(_manager.getHelixDataAccessor(), _instanceManager.getInstanceName()));
Assert.assertEquals(_manager.getConfigAccessor()
.getInstanceConfig(CLUSTER_NAME, _instanceManager.getInstanceName())
.getInstanceDisabledType(), InstanceConstants.InstanceDisabledType.CLOUD_EVENT.name());
// Should not disable instance if it is already disabled due to other reasons
// And disabled type should remain unchanged
_admin.enableInstance(CLUSTER_NAME, _instanceManager.getInstanceName(), false);
_impl.disableInstance(_instanceManager, null);
Assert.assertFalse(InstanceValidationUtil
.isEnabled(_manager.getHelixDataAccessor(), _instanceManager.getInstanceName()));
Assert.assertEquals(_manager.getConfigAccessor()
.getInstanceConfig(CLUSTER_NAME, _instanceManager.getInstanceName())
.getInstanceDisabledType(),
InstanceConstants.InstanceDisabledType.DEFAULT_INSTANCE_DISABLE_TYPE.name());
_admin.enableInstance(CLUSTER_NAME, _instanceManager.getInstanceName(), false,
InstanceConstants.InstanceDisabledType.CLOUD_EVENT, null);
}
@Test (dependsOnMethods = "testDisableInstance")
public void testEnableInstance() {
Assert.assertFalse(InstanceValidationUtil
.isEnabled(_manager.getHelixDataAccessor(), _instanceManager.getInstanceName()));
// Should enable instance if the instance is disabled due to cloud event
_impl.enableInstance(_instanceManager, null);
Assert.assertTrue(InstanceValidationUtil
.isEnabled(_manager.getHelixDataAccessor(), _instanceManager.getInstanceName()));
// Should not enable instance if it is not disabled due to cloud event
_admin.enableInstance(CLUSTER_NAME, _instanceManager.getInstanceName(), false);
_impl.enableInstance(_instanceManager, null);
Assert.assertFalse(InstanceValidationUtil
.isEnabled(_manager.getHelixDataAccessor(), _instanceManager.getInstanceName()));
_admin.enableInstance(_instanceManager.getClusterName(), _instanceManager.getInstanceName(),
true);
}
@Test
public void testEnterMaintenanceMode() {
Assert.assertFalse(_admin.isInMaintenanceMode(CLUSTER_NAME));
_impl.enterMaintenanceMode(_instanceManager, null);
_impl.disableInstance(_instanceManager, null);
Assert.assertTrue(_admin.isInMaintenanceMode(CLUSTER_NAME));
}
@Test (dependsOnMethods = "testEnterMaintenanceMode")
public void testExitMaintenanceMode() {
Assert.assertTrue(_admin.isInMaintenanceMode(CLUSTER_NAME));
// Should not exit maintenance mode if there is remaining live instance that is disabled due to cloud event
_impl.exitMaintenanceMode(_instanceManager, null);
Assert.assertTrue(_admin.isInMaintenanceMode(CLUSTER_NAME));
// Should exit maintenance mode if there is no remaining live instance that is disabled due to cloud event
_impl.enableInstance(_instanceManager, null);
_impl.exitMaintenanceMode(_instanceManager, null);
Assert.assertFalse(_admin.isInMaintenanceMode(CLUSTER_NAME));
}
} | 9,822 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/cloud | Create_ds/helix/helix-core/src/test/java/org/apache/helix/cloud/event/HelixTestCloudEventHandler.java | package org.apache.helix.cloud.event;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
public class HelixTestCloudEventHandler extends CloudEventHandler {
private static final int TIMEOUT = 900; // second to timeout
private static ExecutorService executorService = Executors.newSingleThreadExecutor();
public static boolean anyListenerIsRegisterFlag = false;
@Override
public void registerCloudEventListener(CloudEventListener listener) {
super.registerCloudEventListener(listener);
anyListenerIsRegisterFlag = true;
}
@Override
public void unregisterCloudEventListener(CloudEventListener listener) {
super.unregisterCloudEventListener(listener);
anyListenerIsRegisterFlag = false;
}
} | 9,823 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/cloud | Create_ds/helix/helix-core/src/test/java/org/apache/helix/cloud/event/MockCloudEventCallbackImpl.java | package org.apache.helix.cloud.event;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.HashSet;
import java.util.Set;
import org.apache.helix.HelixManager;
import org.apache.helix.cloud.event.helix.DefaultCloudEventCallbackImpl;
public class MockCloudEventCallbackImpl extends DefaultCloudEventCallbackImpl {
public enum OperationType {
ON_PAUSE_DISABLE_INSTANCE,
ON_RESUME_ENABLE_INSTANCE,
ON_PAUSE_MAINTENANCE_MODE,
ON_RESUME_MAINTENANCE_MODE,
PRE_ON_PAUSE,
POST_ON_PAUSE,
PRE_ON_RESUME,
POST_ON_RESUME
}
public static Set<OperationType> triggeredOperation = new HashSet<>();
@Override
public void disableInstance(HelixManager manager, Object eventInfo) {
triggeredOperation.add(OperationType.ON_PAUSE_DISABLE_INSTANCE);
}
@Override
public void enableInstance(HelixManager manager, Object eventInfo) {
triggeredOperation.add(OperationType.ON_RESUME_ENABLE_INSTANCE);
}
@Override
public void enterMaintenanceMode(HelixManager manager, Object eventInfo) {
triggeredOperation.add(OperationType.ON_PAUSE_MAINTENANCE_MODE);
}
@Override
public void exitMaintenanceMode(HelixManager manager, Object eventInfo) {
triggeredOperation.add(OperationType.ON_RESUME_MAINTENANCE_MODE);
}
}
| 9,824 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/cloud | Create_ds/helix/helix-core/src/test/java/org/apache/helix/cloud/event/MockCloudEventAwareHelixManager.java | package org.apache.helix.cloud.event;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Optional;
import java.util.Set;
import org.apache.helix.ClusterMessagingService;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixCloudProperty;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerProperties;
import org.apache.helix.HelixManagerProperty;
import org.apache.helix.InstanceType;
import org.apache.helix.LiveInstanceInfoProvider;
import org.apache.helix.PreConnectCallback;
import org.apache.helix.PropertyKey;
import org.apache.helix.api.listeners.ClusterConfigChangeListener;
import org.apache.helix.api.listeners.ConfigChangeListener;
import org.apache.helix.api.listeners.ControllerChangeListener;
import org.apache.helix.api.listeners.CurrentStateChangeListener;
import org.apache.helix.api.listeners.CustomizedStateChangeListener;
import org.apache.helix.api.listeners.CustomizedStateConfigChangeListener;
import org.apache.helix.api.listeners.CustomizedStateRootChangeListener;
import org.apache.helix.api.listeners.CustomizedViewChangeListener;
import org.apache.helix.api.listeners.CustomizedViewRootChangeListener;
import org.apache.helix.api.listeners.ExternalViewChangeListener;
import org.apache.helix.api.listeners.IdealStateChangeListener;
import org.apache.helix.api.listeners.InstanceConfigChangeListener;
import org.apache.helix.api.listeners.LiveInstanceChangeListener;
import org.apache.helix.api.listeners.MessageListener;
import org.apache.helix.api.listeners.ResourceConfigChangeListener;
import org.apache.helix.api.listeners.ScopedConfigChangeListener;
import org.apache.helix.cloud.constants.CloudProvider;
import org.apache.helix.cloud.event.helix.HelixCloudEventListener;
import org.apache.helix.controller.pipeline.Pipeline;
import org.apache.helix.healthcheck.ParticipantHealthReportCollector;
import org.apache.helix.model.CloudConfig;
import org.apache.helix.model.HelixConfigScope;
import org.apache.helix.participant.StateMachineEngine;
import org.apache.helix.store.zk.ZkHelixPropertyStore;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
public class MockCloudEventAwareHelixManager implements HelixManager {
private final HelixManagerProperty _helixManagerProperty;
private CloudEventListener _cloudEventListener;
/**
* Use a mock zk helix manager to avoid the need to connect to zk
* Change the cloud event related logic here every time the real logic is modified in ZKHelixManager
*/
public MockCloudEventAwareHelixManager(HelixManagerProperty helixManagerProperty) {
_helixManagerProperty = helixManagerProperty;
_helixManagerProperty.getHelixCloudProperty().populateFieldsWithCloudConfig(
new CloudConfig.Builder().setCloudEnabled(true).setCloudProvider(CloudProvider.AZURE)
.build());
}
@Override
public void connect()
throws IllegalAccessException, InstantiationException, ClassNotFoundException {
if (_helixManagerProperty != null) {
HelixCloudProperty helixCloudProperty = _helixManagerProperty.getHelixCloudProperty();
if (helixCloudProperty != null && helixCloudProperty.isCloudEventCallbackEnabled()) {
_cloudEventListener =
new HelixCloudEventListener(helixCloudProperty.getCloudEventCallbackProperty(), this);
System.out.println("Using handler: " + helixCloudProperty.getCloudEventHandlerClassName());
CloudEventHandlerFactory.getInstance(
_helixManagerProperty.getHelixCloudProperty().getCloudEventHandlerClassName())
.registerCloudEventListener(_cloudEventListener);
}
}
}
@Override
public void disconnect() {
if (_cloudEventListener != null) {
try {
CloudEventHandlerFactory.getInstance(
_helixManagerProperty.getHelixCloudProperty().getCloudEventHandlerClassName())
.unregisterCloudEventListener(_cloudEventListener);
} catch (Exception e) {
System.out.println("Failed to unregister cloudEventListener." );
e.printStackTrace();
}
}
}
@Override
public boolean isConnected() {
return false;
}
@Override
public void addIdealStateChangeListener(IdealStateChangeListener listener) throws Exception {
}
@Override
public void addIdealStateChangeListener(org.apache.helix.IdealStateChangeListener listener)
throws Exception {
}
@Override
public void addLiveInstanceChangeListener(LiveInstanceChangeListener listener) throws Exception {
}
@Override
public void addLiveInstanceChangeListener(org.apache.helix.LiveInstanceChangeListener listener)
throws Exception {
}
@Override
public void addConfigChangeListener(ConfigChangeListener listener) throws Exception {
}
@Override
public void addInstanceConfigChangeListener(InstanceConfigChangeListener listener)
throws Exception {
}
@Override
public void addInstanceConfigChangeListener(
org.apache.helix.InstanceConfigChangeListener listener) throws Exception {
}
@Override
public void addResourceConfigChangeListener(ResourceConfigChangeListener listener)
throws Exception {
}
@Override
public void addCustomizedStateConfigChangeListener(CustomizedStateConfigChangeListener listener)
throws Exception {
}
@Override
public void addClusterfigChangeListener(ClusterConfigChangeListener listener) throws Exception {
}
@Override
public void addConfigChangeListener(ScopedConfigChangeListener listener,
HelixConfigScope.ConfigScopeProperty scope) throws Exception {
}
@Override
public void addConfigChangeListener(org.apache.helix.ScopedConfigChangeListener listener,
HelixConfigScope.ConfigScopeProperty scope) throws Exception {
}
@Override
public void addMessageListener(MessageListener listener, String instanceName) throws Exception {
}
@Override
public void addMessageListener(org.apache.helix.MessageListener listener, String instanceName)
throws Exception {
}
@Override
public void addCurrentStateChangeListener(CurrentStateChangeListener listener,
String instanceName, String sessionId) throws Exception {
}
@Override
public void addCurrentStateChangeListener(org.apache.helix.CurrentStateChangeListener listener,
String instanceName, String sessionId) throws Exception {
}
@Override
public void addTaskCurrentStateChangeListener(CurrentStateChangeListener listener,
String instanceName, String sessionId) throws Exception {
}
@Override
public void addCustomizedStateRootChangeListener(CustomizedStateRootChangeListener listener,
String instanceName) throws Exception {
}
@Override
public void addCustomizedStateChangeListener(CustomizedStateChangeListener listener,
String instanceName, String stateName) throws Exception {
}
@Override
public void addExternalViewChangeListener(ExternalViewChangeListener listener) throws Exception {
}
@Override
public void addCustomizedViewChangeListener(CustomizedViewChangeListener listener,
String customizedStateType) throws Exception {
}
@Override
public void addCustomizedViewRootChangeListener(CustomizedViewRootChangeListener listener)
throws Exception {
}
@Override
public void addTargetExternalViewChangeListener(ExternalViewChangeListener listener)
throws Exception {
}
@Override
public void addExternalViewChangeListener(org.apache.helix.ExternalViewChangeListener listener)
throws Exception {
}
@Override
public void addControllerListener(ControllerChangeListener listener) {
}
@Override
public void addControllerListener(org.apache.helix.ControllerChangeListener listener) {
}
@Override
public void addControllerMessageListener(MessageListener listener) {
}
@Override
public void addControllerMessageListener(org.apache.helix.MessageListener listener) {
}
@Override
public void setEnabledControlPipelineTypes(Set<Pipeline.Type> types) {
}
@Override
public boolean removeListener(PropertyKey key, Object listener) {
return false;
}
@Override
public HelixDataAccessor getHelixDataAccessor() {
return null;
}
@Override
public ConfigAccessor getConfigAccessor() {
return null;
}
@Override
public String getClusterName() {
return null;
}
@Override
public String getMetadataStoreConnectionString() {
return null;
}
@Override
public String getInstanceName() {
return null;
}
@Override
public String getSessionId() {
return null;
}
@Override
public long getLastNotificationTime() {
return 0;
}
@Override
public HelixAdmin getClusterManagmentTool() {
return null;
}
@Override
public ZkHelixPropertyStore<ZNRecord> getHelixPropertyStore() {
return null;
}
@Override
public ClusterMessagingService getMessagingService() {
return null;
}
@Override
public InstanceType getInstanceType() {
return null;
}
@Override
public String getVersion() {
return null;
}
@Override
public HelixManagerProperties getProperties() {
return null;
}
@Override
public StateMachineEngine getStateMachineEngine() {
return null;
}
@Override
public Long getSessionStartTime() {
return null;
}
@Override
public Optional<String> getSessionIdIfLead() {
return Optional.empty();
}
@Override
public boolean isLeader() {
return false;
}
@Override
public void startTimerTasks() {
}
@Override
public void stopTimerTasks() {
}
@Override
public void addPreConnectCallback(PreConnectCallback callback) {
}
@Override
public void setLiveInstanceInfoProvider(LiveInstanceInfoProvider liveInstanceInfoProvider) {
}
@Override
public ParticipantHealthReportCollector getHealthReportCollector() {
return null;
}
} | 9,825 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/cloud | Create_ds/helix/helix-core/src/test/java/org/apache/helix/cloud/event/TestCloudEventCallbackProperty.java | package org.apache.helix.cloud.event;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.apache.helix.HelixCloudProperty;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerProperty;
import org.apache.helix.cloud.event.helix.CloudEventCallbackProperty;
import org.apache.helix.cloud.event.helix.CloudEventCallbackProperty.HelixOperation;
import org.apache.helix.cloud.event.helix.CloudEventCallbackProperty.UserDefinedCallbackType;
import org.apache.helix.cloud.event.helix.HelixCloudEventListener;
import org.apache.helix.model.CloudConfig;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.testng.Assert;
import org.testng.annotations.AfterTest;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestCloudEventCallbackProperty {
private HelixManager _helixManager;
private HelixCloudProperty _cloudProperty;
private final static String CLUSTER_NAME = "testCluster";
@BeforeClass
public void beforeClass() throws Exception {
// Set up Helix manager property: Helix Cloud Property
_cloudProperty = new HelixCloudProperty(new CloudConfig(new ZNRecord(CLUSTER_NAME)));
_cloudProperty.setCloudEventCallbackEnabled(true);
HelixManagerProperty.Builder managerPropertyBuilder = new HelixManagerProperty.Builder();
managerPropertyBuilder.setHelixCloudProperty(_cloudProperty);
// Build Helix manager property
HelixManagerProperty managerProperty = managerPropertyBuilder.build();
// Create Helix Manager
_helixManager = new MockCloudEventAwareHelixManager(managerProperty);
}
@AfterTest
public void afterTest() {
_helixManager.disconnect();
_cloudProperty.getCloudEventCallbackProperty()
.setHelixOperationEnabled(HelixOperation.ENABLE_DISABLE_INSTANCE, false);
_cloudProperty.getCloudEventCallbackProperty()
.setHelixOperationEnabled(HelixOperation.MAINTENANCE_MODE, false);
_cloudProperty.getCloudEventCallbackProperty()
.unregisterUserDefinedCallback(UserDefinedCallbackType.PRE_ON_PAUSE);
_cloudProperty.getCloudEventCallbackProperty()
.unregisterUserDefinedCallback(UserDefinedCallbackType.POST_ON_PAUSE);
_cloudProperty.getCloudEventCallbackProperty()
.unregisterUserDefinedCallback(UserDefinedCallbackType.PRE_ON_RESUME);
_cloudProperty.getCloudEventCallbackProperty()
.unregisterUserDefinedCallback(UserDefinedCallbackType.POST_ON_RESUME);
MockCloudEventCallbackImpl.triggeredOperation.clear();
}
@Test
public void testOptionalHelixOperation() throws Exception {
// Cloud event callback property
Map<String, String> paramMap = new HashMap<>();
paramMap.put(CloudEventCallbackProperty.UserArgsInputKey.CALLBACK_IMPL_CLASS_NAME,
MockCloudEventCallbackImpl.class.getCanonicalName());
paramMap.put(CloudEventCallbackProperty.UserArgsInputKey.CLOUD_EVENT_HANDLER_CLASS_NAME,
HelixTestCloudEventHandler.class.getCanonicalName());
CloudEventCallbackProperty property = new CloudEventCallbackProperty(paramMap);
property.setHelixOperationEnabled(HelixOperation.ENABLE_DISABLE_INSTANCE, true);
_cloudProperty.setCloudEventCallbackProperty(property);
_helixManager.connect();
// Manually trigger event
((CloudEventHandler)CloudEventHandlerFactory.getInstance(HelixTestCloudEventHandler.class.getCanonicalName()))
.performAction(HelixCloudEventListener.EventType.ON_PAUSE, null);
Assert.assertTrue(
callbackTriggered(MockCloudEventCallbackImpl.OperationType.ON_PAUSE_DISABLE_INSTANCE));
Assert.assertFalse(
callbackTriggered(MockCloudEventCallbackImpl.OperationType.ON_PAUSE_MAINTENANCE_MODE));
Assert.assertFalse(
callbackTriggered(MockCloudEventCallbackImpl.OperationType.ON_RESUME_MAINTENANCE_MODE));
Assert.assertFalse(
callbackTriggered(MockCloudEventCallbackImpl.OperationType.ON_RESUME_ENABLE_INSTANCE));
property.setHelixOperationEnabled(HelixOperation.MAINTENANCE_MODE, true);
MockCloudEventCallbackImpl.triggeredOperation.clear();
// Manually trigger event
((CloudEventHandler)CloudEventHandlerFactory.getInstance(HelixTestCloudEventHandler.class.getCanonicalName()))
.performAction(HelixCloudEventListener.EventType.ON_PAUSE, null);
Assert.assertTrue(
callbackTriggered(MockCloudEventCallbackImpl.OperationType.ON_PAUSE_DISABLE_INSTANCE));
Assert.assertTrue(
callbackTriggered(MockCloudEventCallbackImpl.OperationType.ON_PAUSE_MAINTENANCE_MODE));
Assert.assertFalse(
callbackTriggered(MockCloudEventCallbackImpl.OperationType.ON_RESUME_MAINTENANCE_MODE));
Assert.assertFalse(
callbackTriggered(MockCloudEventCallbackImpl.OperationType.ON_RESUME_ENABLE_INSTANCE));
MockCloudEventCallbackImpl.triggeredOperation.clear();
// Manually trigger event
((CloudEventHandler) CloudEventHandlerFactory.getInstance(HelixTestCloudEventHandler.class.getCanonicalName()))
.performAction(HelixCloudEventListener.EventType.ON_RESUME, null);
Assert.assertFalse(
callbackTriggered(MockCloudEventCallbackImpl.OperationType.ON_PAUSE_DISABLE_INSTANCE));
Assert.assertFalse(
callbackTriggered(MockCloudEventCallbackImpl.OperationType.ON_PAUSE_MAINTENANCE_MODE));
Assert.assertTrue(
callbackTriggered(MockCloudEventCallbackImpl.OperationType.ON_RESUME_ENABLE_INSTANCE));
Assert.assertTrue(
callbackTriggered(MockCloudEventCallbackImpl.OperationType.ON_RESUME_MAINTENANCE_MODE));
}
@Test
public void testUserDefinedCallback() throws Exception {
afterTest();
// Cloud event callback property
CloudEventCallbackProperty property = new CloudEventCallbackProperty(Collections
.singletonMap(CloudEventCallbackProperty.UserArgsInputKey.CALLBACK_IMPL_CLASS_NAME,
MockCloudEventCallbackImpl.class.getCanonicalName()));
_cloudProperty.setCloudEventCallbackProperty(property);
_helixManager.connect();
property
.registerUserDefinedCallback(UserDefinedCallbackType.PRE_ON_PAUSE, (manager, eventInfo) -> {
MockCloudEventCallbackImpl.triggeredOperation
.add(MockCloudEventCallbackImpl.OperationType.PRE_ON_PAUSE);
});
property.registerUserDefinedCallback(UserDefinedCallbackType.POST_ON_PAUSE,
(manager, eventInfo) -> {
MockCloudEventCallbackImpl.triggeredOperation
.add(MockCloudEventCallbackImpl.OperationType.POST_ON_PAUSE);
});
property.registerUserDefinedCallback(UserDefinedCallbackType.PRE_ON_RESUME,
(manager, eventInfo) -> {
MockCloudEventCallbackImpl.triggeredOperation
.add(MockCloudEventCallbackImpl.OperationType.PRE_ON_RESUME);
});
property.registerUserDefinedCallback(UserDefinedCallbackType.POST_ON_RESUME,
(manager, eventInfo) -> {
MockCloudEventCallbackImpl.triggeredOperation
.add(MockCloudEventCallbackImpl.OperationType.POST_ON_RESUME);
});
// Manually trigger event
((CloudEventHandler) CloudEventHandlerFactory.getInstance(CloudEventHandler.class.getCanonicalName()))
.performAction(HelixCloudEventListener.EventType.ON_PAUSE, null);
Assert.assertTrue(callbackTriggered(MockCloudEventCallbackImpl.OperationType.PRE_ON_PAUSE));
Assert.assertTrue(callbackTriggered(MockCloudEventCallbackImpl.OperationType.POST_ON_PAUSE));
Assert.assertFalse(callbackTriggered(MockCloudEventCallbackImpl.OperationType.PRE_ON_RESUME));
Assert.assertFalse(callbackTriggered(MockCloudEventCallbackImpl.OperationType.POST_ON_RESUME));
MockCloudEventCallbackImpl.triggeredOperation.clear();
((CloudEventHandler) CloudEventHandlerFactory.getInstance(CloudEventHandler.class.getCanonicalName()))
.performAction(HelixCloudEventListener.EventType.ON_RESUME, null);
Assert.assertFalse(callbackTriggered(MockCloudEventCallbackImpl.OperationType.PRE_ON_PAUSE));
Assert.assertFalse(callbackTriggered(MockCloudEventCallbackImpl.OperationType.POST_ON_PAUSE));
Assert.assertTrue(callbackTriggered(MockCloudEventCallbackImpl.OperationType.PRE_ON_RESUME));
Assert.assertTrue(callbackTriggered(MockCloudEventCallbackImpl.OperationType.POST_ON_RESUME));
}
@Test
public void testUsingInvalidImplClassName() throws Exception {
// Cloud event callback property
CloudEventCallbackProperty property = new CloudEventCallbackProperty(Collections
.singletonMap(CloudEventCallbackProperty.UserArgsInputKey.CALLBACK_IMPL_CLASS_NAME,
"org.apache.helix.cloud.InvalidClassName"));
_cloudProperty.setCloudEventCallbackProperty(property);
_helixManager.connect();
}
@Test
public void testRegisterAndUnregister() throws Exception {
// Cloud event callback property
Map<String, String> paramMap = new HashMap<>();
paramMap.put(CloudEventCallbackProperty.UserArgsInputKey.CALLBACK_IMPL_CLASS_NAME,
MockCloudEventCallbackImpl.class.getCanonicalName());
paramMap.put(CloudEventCallbackProperty.UserArgsInputKey.CLOUD_EVENT_HANDLER_CLASS_NAME,
HelixTestCloudEventHandler.class.getCanonicalName());
CloudEventCallbackProperty property = new CloudEventCallbackProperty(paramMap);
property.setHelixOperationEnabled(HelixOperation.ENABLE_DISABLE_INSTANCE, true);
_cloudProperty.setCloudEventCallbackProperty(property);
_helixManager.connect();
Assert.assertTrue(HelixTestCloudEventHandler.anyListenerIsRegisterFlag);
_helixManager.disconnect();
Assert.assertFalse(HelixTestCloudEventHandler.anyListenerIsRegisterFlag);
}
@Test
public void testUsingInvalidHandlerClassName() throws Exception {
// Cloud event callback property
CloudEventCallbackProperty property = new CloudEventCallbackProperty(Collections
.singletonMap(CloudEventCallbackProperty.UserArgsInputKey.CLOUD_EVENT_HANDLER_CLASS_NAME,
"org.apache.helix.cloud.InvalidClassName"));
_cloudProperty.setCloudEventCallbackProperty(property);
try {
_helixManager.connect();}
catch (Exception ex){
Assert.assertEquals(ex.getClass(), java.lang.ClassNotFoundException.class);
}
// Manually trigger event
((CloudEventHandler) CloudEventHandlerFactory.getInstance(CloudEventHandler.class.getCanonicalName()))
.performAction(HelixCloudEventListener.EventType.ON_PAUSE, null);
}
private boolean callbackTriggered(MockCloudEventCallbackImpl.OperationType type) {
return MockCloudEventCallbackImpl.triggeredOperation.contains(type);
}
}
| 9,826 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/TestJobStateOnCreation.java | package org.apache.helix.task;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Map;
import com.google.common.collect.Sets;
import org.apache.helix.controller.dataproviders.WorkflowControllerDataProvider;
import org.apache.helix.controller.stages.CurrentStateOutput;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.integration.task.MockTask;
import org.apache.helix.integration.task.WorkflowGenerator;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.Resource;
import org.apache.helix.model.ResourceAssignment;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestJobStateOnCreation extends TaskSynchronizedTestBase {
private static final String WORKFLOW_NAME = "testWorkflow";
private WorkflowControllerDataProvider _cache;
private IdealState _idealState;
private Resource _resource;
private CurrentStateOutput _currStateOutput;
@BeforeClass
public void beforeClass() throws Exception {
_cache = new WorkflowControllerDataProvider();
_idealState = new IdealState(WORKFLOW_NAME);
_resource = new Resource(WORKFLOW_NAME);
_currStateOutput = new CurrentStateOutput();
_participants = new MockParticipantManager[_numNodes];
_gSetupTool.addCluster(CLUSTER_NAME, true);
createManagers();
}
@Test
public void testJobStateOnCreation() {
Workflow.Builder builder = new Workflow.Builder(WORKFLOW_NAME);
JobConfig.Builder jobConfigBuilder = new JobConfig.Builder().setCommand(MockTask.TASK_COMMAND)
.setTargetResource(WORKFLOW_NAME).setTargetPartitionStates(Sets.newHashSet("SLAVE","MASTER"))
.setJobCommandConfigMap(WorkflowGenerator.DEFAULT_COMMAND_CONFIG);
String jobName = "job";
builder = builder.addJob(jobName, jobConfigBuilder);
Workflow workflow = builder.build();
WorkflowConfig workflowConfig = workflow.getWorkflowConfig();
JobConfig jobConfig = jobConfigBuilder.build();
workflowConfig.getRecord().merge(jobConfig.getRecord());
_cache.getJobConfigMap().put(WORKFLOW_NAME + "_" + jobName, jobConfig);
_cache.getWorkflowConfigMap().put(WORKFLOW_NAME, workflowConfig);
WorkflowRebalancer workflowRebalancer = new WorkflowRebalancer();
workflowRebalancer.init(_manager);
ResourceAssignment resourceAssignment = workflowRebalancer
.computeBestPossiblePartitionState(_cache, _idealState, _resource, _currStateOutput);
WorkflowContext workflowContext = _cache.getWorkflowContext(WORKFLOW_NAME);
Map<String, TaskState> jobStates = workflowContext.getJobStates();
for (String job : jobStates.keySet()) {
Assert.assertEquals(jobStates.get(job), TaskState.NOT_STARTED);
}
}
}
| 9,827 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/TestTargetedTaskStateChange.java | package org.apache.helix.task;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.helix.common.caches.TaskDataCache;
import org.apache.helix.controller.dataproviders.WorkflowControllerDataProvider;
import org.apache.helix.controller.stages.BestPossibleStateOutput;
import org.apache.helix.controller.stages.CurrentStateOutput;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Partition;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestTargetedTaskStateChange {
private static final String CLUSTER_NAME = "TestCluster";
private static final String INSTANCE_PREFIX = "Instance_";
private static final int NUM_PARTICIPANTS = 3;
private static final String WORKFLOW_NAME = "TestWorkflow";
private static final String JOB_NAME = "TestJob";
private static final String PARTITION_NAME = "0";
private static final String TARGET_RESOURCES = "TestDB";
private Map<String, LiveInstance> _liveInstances;
private Map<String, InstanceConfig> _instanceConfigs;
private ClusterConfig _clusterConfig;
private AssignableInstanceManager _assignableInstanceManager;
@BeforeClass
public void beforeClass() {
// Populate live instances and their corresponding instance configs
_liveInstances = new HashMap<>();
_instanceConfigs = new HashMap<>();
_clusterConfig = new ClusterConfig(CLUSTER_NAME);
for (int i = 0; i < NUM_PARTICIPANTS; i++) {
String instanceName = INSTANCE_PREFIX + i;
LiveInstance liveInstance = new LiveInstance(instanceName);
InstanceConfig instanceConfig = new InstanceConfig(instanceName);
_liveInstances.put(instanceName, liveInstance);
_instanceConfigs.put(instanceName, instanceConfig);
}
_assignableInstanceManager = new AssignableInstanceManager();
}
/**
* This test checks the behaviour of the controller while there are two current states for two
* different instances.
* Scenario:
* Instance0: Slave, Instance1: Master, Instance2: Slave
* CurrentState: Instance0: Running, Instance1: Running
* Expected paMap: Instance0 -> Dropped
*/
@Test
public void testTwoRunningCurrentStates() {
MockTestInformation mock = new MockTestInformation();
when(mock._cache.getWorkflowConfig(WORKFLOW_NAME)).thenReturn(mock._workflowConfig);
when(mock._cache.getJobConfig(JOB_NAME)).thenReturn(mock._jobConfig);
when(mock._cache.getTaskDataCache()).thenReturn(mock._taskDataCache);
when(mock._cache.getJobContext(JOB_NAME)).thenReturn(mock._jobContext);
when(mock._cache.getIdealStates()).thenReturn(mock._idealStates);
when(mock._cache.getEnabledLiveInstances()).thenReturn(_liveInstances.keySet());
when(mock._cache.getInstanceConfigMap()).thenReturn(_instanceConfigs);
when(mock._cache.getClusterConfig()).thenReturn(_clusterConfig);
when(mock._taskDataCache.getRuntimeJobDag(WORKFLOW_NAME)).thenReturn(mock._runtimeJobDag);
_assignableInstanceManager.buildAssignableInstances(_clusterConfig, mock._taskDataCache,
_liveInstances, _instanceConfigs);
when(mock._cache.getAssignableInstanceManager()).thenReturn(_assignableInstanceManager);
when(mock._cache.getExistsLiveInstanceOrCurrentStateOrMessageChange()).thenReturn(true);
Set<String> inflightJobDag = new HashSet<>();
inflightJobDag.add(JOB_NAME);
when(mock._taskDataCache.getRuntimeJobDag(WORKFLOW_NAME).getInflightJobList())
.thenReturn(inflightJobDag);
WorkflowDispatcher workflowDispatcher = new WorkflowDispatcher();
workflowDispatcher.updateCache(mock._cache);
BestPossibleStateOutput bestPossibleStateOutput = new BestPossibleStateOutput();
workflowDispatcher.updateWorkflowStatus(WORKFLOW_NAME, mock._workflowConfig,
mock._workflowContext, mock._currentStateOutput, bestPossibleStateOutput);
Partition taskPartition = new Partition(JOB_NAME + "_" + PARTITION_NAME);
Assert.assertEquals(TaskPartitionState.DROPPED.name(), bestPossibleStateOutput
.getPartitionStateMap(JOB_NAME).getPartitionMap(taskPartition).get(INSTANCE_PREFIX + "0"));
}
/**
* This test checks the behaviour of the controller while there is one current state.
* Scenario:
* Instance0: Slave, Instance1: Master, Instance2: Slave
* CurrentState: Instance0: Running
* Expected paMap: Instance1 -> Running
*/
@Test
public void testOneRunningOneNull() {
MockTestInformation mock = new MockTestInformation();
when(mock._cache.getWorkflowConfig(WORKFLOW_NAME)).thenReturn(mock._workflowConfig);
when(mock._cache.getJobConfig(JOB_NAME)).thenReturn(mock._jobConfig);
when(mock._cache.getTaskDataCache()).thenReturn(mock._taskDataCache);
when(mock._cache.getJobContext(JOB_NAME)).thenReturn(mock._jobContext);
when(mock._cache.getIdealStates()).thenReturn(mock._idealStates);
when(mock._cache.getEnabledLiveInstances()).thenReturn(_liveInstances.keySet());
when(mock._cache.getInstanceConfigMap()).thenReturn(_instanceConfigs);
when(mock._cache.getClusterConfig()).thenReturn(_clusterConfig);
when(mock._taskDataCache.getRuntimeJobDag(WORKFLOW_NAME)).thenReturn(mock._runtimeJobDag);
_assignableInstanceManager.buildAssignableInstances(_clusterConfig, mock._taskDataCache,
_liveInstances, _instanceConfigs);
when(mock._cache.getAssignableInstanceManager()).thenReturn(_assignableInstanceManager);
when(mock._cache.getExistsLiveInstanceOrCurrentStateOrMessageChange()).thenReturn(false);
Set<String> inflightJobDag = new HashSet<>();
inflightJobDag.add(JOB_NAME);
when(mock._taskDataCache.getRuntimeJobDag(WORKFLOW_NAME).getInflightJobList())
.thenReturn(inflightJobDag);
BestPossibleStateOutput bestPossibleStateOutput = new BestPossibleStateOutput();
WorkflowDispatcher workflowDispatcher = new WorkflowDispatcher();
workflowDispatcher.updateCache(mock._cache);
workflowDispatcher.updateWorkflowStatus(WORKFLOW_NAME, mock._workflowConfig,
mock._workflowContext, mock._currentStateOutput2, bestPossibleStateOutput);
Partition taskPartition = new Partition(JOB_NAME + "_" + PARTITION_NAME);
Assert.assertEquals(TaskPartitionState.RUNNING.name(), bestPossibleStateOutput
.getPartitionStateMap(JOB_NAME).getPartitionMap(taskPartition).get(INSTANCE_PREFIX + "1"));
}
private WorkflowConfig prepareWorkflowConfig() {
WorkflowConfig.Builder workflowConfigBuilder = new WorkflowConfig.Builder();
workflowConfigBuilder.setWorkflowId(WORKFLOW_NAME);
workflowConfigBuilder.setTerminable(false);
workflowConfigBuilder.setTargetState(TargetState.START);
workflowConfigBuilder.setJobQueue(true);
JobDag jobDag = new JobDag();
jobDag.addNode(JOB_NAME);
workflowConfigBuilder.setJobDag(jobDag);
return workflowConfigBuilder.build();
}
private JobConfig prepareJobConfig() {
JobConfig.Builder jobConfigBuilder = new JobConfig.Builder();
jobConfigBuilder.setWorkflow(WORKFLOW_NAME);
jobConfigBuilder.setCommand("TestCommand");
jobConfigBuilder.setTargetResource(TARGET_RESOURCES);
jobConfigBuilder.setJobId(JOB_NAME);
List<String> targetPartition = new ArrayList<>();
targetPartition.add(TARGET_RESOURCES + "_0");
jobConfigBuilder.setTargetPartitions(targetPartition);
Set<String> targetPartitionStates = new HashSet<>();
targetPartitionStates.add("MASTER");
List<TaskConfig> taskConfigs = new ArrayList<>();
TaskConfig.Builder taskConfigBuilder = new TaskConfig.Builder();
taskConfigBuilder.setTaskId("0");
taskConfigs.add(taskConfigBuilder.build());
jobConfigBuilder.setTargetPartitionStates(targetPartitionStates);
jobConfigBuilder.addTaskConfigs(taskConfigs);
JobConfig jobConfig = jobConfigBuilder.build();
return jobConfig;
}
private WorkflowContext prepareWorkflowContext() {
ZNRecord record = new ZNRecord(WORKFLOW_NAME);
record.setSimpleField(WorkflowContext.WorkflowContextProperties.StartTime.name(), "0");
record.setSimpleField(WorkflowContext.WorkflowContextProperties.NAME.name(), WORKFLOW_NAME);
record.setSimpleField(WorkflowContext.WorkflowContextProperties.STATE.name(),
TaskState.IN_PROGRESS.name());
Map<String, String> jobState = new HashMap<>();
jobState.put(JOB_NAME, TaskState.IN_PROGRESS.name());
record.setMapField(WorkflowContext.WorkflowContextProperties.JOB_STATES.name(), jobState);
return new WorkflowContext(record);
}
private JobContext prepareJobContext(String instance) {
ZNRecord record = new ZNRecord(JOB_NAME);
JobContext jobContext = new JobContext(record);
jobContext.setStartTime(0L);
jobContext.setName(JOB_NAME);
jobContext.setStartTime(0L);
jobContext.setPartitionState(0, TaskPartitionState.RUNNING);
jobContext.setPartitionTarget(0, instance);
jobContext.setPartitionTarget(0, TARGET_RESOURCES + "_0");
return jobContext;
}
private Map<String, IdealState> prepareIdealStates(String instance1, String instance2,
String instance3) {
ZNRecord record = new ZNRecord(JOB_NAME);
record.setSimpleField(IdealState.IdealStateProperty.NUM_PARTITIONS.name(), "1");
record.setSimpleField(IdealState.IdealStateProperty.EXTERNAL_VIEW_DISABLED.name(), "true");
record.setSimpleField(IdealState.IdealStateProperty.IDEAL_STATE_MODE.name(), "AUTO");
record.setSimpleField(IdealState.IdealStateProperty.REBALANCE_MODE.name(), "TASK");
record.setSimpleField(IdealState.IdealStateProperty.REPLICAS.name(), "1");
record.setSimpleField(IdealState.IdealStateProperty.STATE_MODEL_DEF_REF.name(), "Task");
record.setSimpleField(IdealState.IdealStateProperty.STATE_MODEL_FACTORY_NAME.name(), "DEFAULT");
record.setSimpleField(IdealState.IdealStateProperty.REBALANCER_CLASS_NAME.name(),
"org.apache.helix.task.JobRebalancer");
record.setMapField(JOB_NAME + "_" + PARTITION_NAME, new HashMap<>());
record.setListField(JOB_NAME + "_" + PARTITION_NAME, new ArrayList<>());
Map<String, IdealState> idealStates = new HashMap<>();
idealStates.put(JOB_NAME, new IdealState(record));
ZNRecord recordDB = new ZNRecord(TARGET_RESOURCES);
recordDB.setSimpleField(IdealState.IdealStateProperty.REPLICAS.name(), "3");
recordDB.setSimpleField(IdealState.IdealStateProperty.REBALANCE_MODE.name(), "FULL_AUTO");
record.setSimpleField(IdealState.IdealStateProperty.IDEAL_STATE_MODE.name(), "AUTO_REBALANCE");
record.setSimpleField(IdealState.IdealStateProperty.STATE_MODEL_DEF_REF.name(), "MasterSlave");
record.setSimpleField(IdealState.IdealStateProperty.STATE_MODEL_DEF_REF.name(),
"org.apache.helix.controller.rebalancer.strategy.CrushEdRebalanceStrategy");
record.setSimpleField(IdealState.IdealStateProperty.REBALANCER_CLASS_NAME.name(),
"org.apache.helix.controller.rebalancer.DelayedAutoRebalancer");
Map<String, String> mapping = new HashMap<>();
mapping.put(instance1, "MASTER");
mapping.put(instance2, "SLAVE");
mapping.put(instance3, "SLAVE");
recordDB.setMapField(TARGET_RESOURCES + "_0", mapping);
List<String> listField = new ArrayList<>();
listField.add(instance1);
listField.add(instance2);
listField.add(instance3);
recordDB.setListField(TARGET_RESOURCES + "_0", listField);
idealStates.put(TARGET_RESOURCES, new IdealState(recordDB));
return idealStates;
}
private CurrentStateOutput prepareCurrentState(String masterInstance, String slaveInstance,
String masterState, String slaveState) {
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
currentStateOutput.setResourceStateModelDef(JOB_NAME, "TASK");
currentStateOutput.setBucketSize(JOB_NAME, 0);
Partition taskPartition = new Partition(JOB_NAME + "_" + PARTITION_NAME);
currentStateOutput.setEndTime(JOB_NAME, taskPartition, masterInstance, 0L);
currentStateOutput.setEndTime(JOB_NAME, taskPartition, slaveInstance, 0L);
currentStateOutput.setCurrentState(JOB_NAME, taskPartition, masterInstance, masterState);
currentStateOutput.setCurrentState(JOB_NAME, taskPartition, slaveInstance, slaveState);
currentStateOutput.setInfo(JOB_NAME, taskPartition, masterInstance, "");
currentStateOutput.setInfo(JOB_NAME, taskPartition, slaveInstance, "");
currentStateOutput.setResourceStateModelDef(TARGET_RESOURCES, "MasterSlave");
currentStateOutput.setBucketSize(TARGET_RESOURCES, 0);
Partition dbPartition = new Partition(TARGET_RESOURCES + "_0");
currentStateOutput.setEndTime(TARGET_RESOURCES, dbPartition, masterInstance, 0L);
currentStateOutput.setCurrentState(TARGET_RESOURCES, dbPartition, masterInstance, "MASTER");
currentStateOutput.setInfo(TARGET_RESOURCES, dbPartition, masterInstance, "");
return currentStateOutput;
}
private CurrentStateOutput prepareCurrentState2(String masterInstance, String masterState) {
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
currentStateOutput.setResourceStateModelDef(JOB_NAME, "TASK");
currentStateOutput.setBucketSize(JOB_NAME, 0);
Partition taskPartition = new Partition(JOB_NAME + "_" + PARTITION_NAME);
currentStateOutput.setEndTime(JOB_NAME, taskPartition, masterInstance, 0L);
currentStateOutput.setCurrentState(JOB_NAME, taskPartition, masterInstance, masterState);
currentStateOutput.setInfo(JOB_NAME, taskPartition, masterInstance, "");
currentStateOutput.setResourceStateModelDef(TARGET_RESOURCES, "MasterSlave");
currentStateOutput.setBucketSize(TARGET_RESOURCES, 0);
Partition dbPartition = new Partition(TARGET_RESOURCES + "_0");
currentStateOutput.setEndTime(TARGET_RESOURCES, dbPartition, masterInstance, 0L);
currentStateOutput.setCurrentState(TARGET_RESOURCES, dbPartition, masterInstance, "MASTER");
currentStateOutput.setInfo(TARGET_RESOURCES, dbPartition, masterInstance, "");
return currentStateOutput;
}
private class MockTestInformation {
private static final String SLAVE_INSTANCE = INSTANCE_PREFIX + "0";
private static final String MASTER_INSTANCE = INSTANCE_PREFIX + "1";
private static final String SLAVE_INSTANCE_2 = INSTANCE_PREFIX + "2";
private WorkflowControllerDataProvider _cache = mock(WorkflowControllerDataProvider.class);
private WorkflowConfig _workflowConfig = prepareWorkflowConfig();
private WorkflowContext _workflowContext = prepareWorkflowContext();
private Map<String, IdealState> _idealStates =
prepareIdealStates(MASTER_INSTANCE, SLAVE_INSTANCE, SLAVE_INSTANCE_2);
private JobConfig _jobConfig = prepareJobConfig();
private JobContext _jobContext = prepareJobContext(SLAVE_INSTANCE);
private CurrentStateOutput _currentStateOutput = prepareCurrentState(MASTER_INSTANCE,
SLAVE_INSTANCE, TaskPartitionState.RUNNING.name(), TaskPartitionState.RUNNING.name());
private CurrentStateOutput _currentStateOutput2 =
prepareCurrentState2(MASTER_INSTANCE, TaskPartitionState.RUNNING.name());
private TaskDataCache _taskDataCache = mock(TaskDataCache.class);
private RuntimeJobDag _runtimeJobDag = mock(RuntimeJobDag.class);
MockTestInformation() {
}
}
}
| 9,828 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/TestUpdatePreviousAssignedTaskStatusWithPendingMessage.java | package org.apache.helix.task;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeMap;
import java.util.TreeSet;
import org.apache.helix.controller.dataproviders.WorkflowControllerDataProvider;
import org.apache.helix.controller.stages.CurrentStateOutput;
import org.apache.helix.model.Message;
import org.apache.helix.model.Partition;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.testng.Assert;
import org.testng.annotations.Test;
/**
* This test checks the scheduling decision for the task that has already been assigned to an
* instance and there exists a message pending for that task.
*/
public class TestUpdatePreviousAssignedTaskStatusWithPendingMessage {
private static final String WORKFLOW_NAME = "TestWorkflow";
private static final String INSTANCE_NAME = "TestInstance";
private static final String JOB_NAME = "TestJob";
private static final String PARTITION_NAME = "0";
private static final String TARGET_RESOURCES = "TestDB";
private static final int PARTITION_ID = 0;
/**
* Scenario:
* JobState = TIMING_OUT
* Task State: Context= INIT, CurrentState = INIT
* Pending Message: FromState = INIT, ToState = RUNNING
*/
@Test
public void testTaskWithPendingMessageWhileJobTimingOut() {
JobDispatcher jobDispatcher = new JobDispatcher();
// Preparing the inputs
Map<String, SortedSet<Integer>> currentInstanceToTaskAssignments = new HashMap<>();
SortedSet<Integer> tasks = new TreeSet<>();
tasks.add(PARTITION_ID);
currentInstanceToTaskAssignments.put(INSTANCE_NAME, tasks);
Map<Integer, AbstractTaskDispatcher.PartitionAssignment> paMap = new TreeMap<>();
CurrentStateOutput currentStateOutput = prepareCurrentState(TaskPartitionState.INIT,
TaskPartitionState.INIT, TaskPartitionState.RUNNING);
JobContext jobContext = prepareJobContext(TaskPartitionState.INIT);
JobConfig jobConfig = prepareJobConfig();
Map<String, Set<Integer>> tasksToDrop = new HashMap<>();
tasksToDrop.put(INSTANCE_NAME, new HashSet<>());
WorkflowControllerDataProvider cache = new WorkflowControllerDataProvider();
jobDispatcher.updatePreviousAssignedTasksStatus(currentInstanceToTaskAssignments,
new HashSet<>(), JOB_NAME, currentStateOutput, jobContext, jobConfig, TaskState.TIMING_OUT,
new HashMap<>(), new HashSet<>(), paMap, TargetState.STOP, new HashSet<>(), cache,
tasksToDrop);
Assert.assertEquals(paMap.get(0)._state, TaskPartitionState.INIT.name());
}
/**
* Scenario:
* JobState = IN_PROGRESS
* Task State: Context= RUNNING, CurrentState = RUNNING
* Pending Message: FromState = RUNNING, ToState = DROPPED
*/
@Test
public void testTaskWithPendingMessage() {
JobDispatcher jobDispatcher = new JobDispatcher();
// Preparing the inputs
Map<String, SortedSet<Integer>> currentInstanceToTaskAssignments = new HashMap<>();
SortedSet<Integer> tasks = new TreeSet<>();
tasks.add(PARTITION_ID);
currentInstanceToTaskAssignments.put(INSTANCE_NAME, tasks);
Map<Integer, AbstractTaskDispatcher.PartitionAssignment> paMap = new TreeMap<>();
CurrentStateOutput currentStateOutput = prepareCurrentState(TaskPartitionState.RUNNING,
TaskPartitionState.RUNNING, TaskPartitionState.DROPPED);
JobContext jobContext = prepareJobContext(TaskPartitionState.RUNNING);
JobConfig jobConfig = prepareJobConfig();
Map<String, Set<Integer>> tasksToDrop = new HashMap<>();
tasksToDrop.put(INSTANCE_NAME, new HashSet<>());
WorkflowControllerDataProvider cache = new WorkflowControllerDataProvider();
jobDispatcher.updatePreviousAssignedTasksStatus(currentInstanceToTaskAssignments,
new HashSet<>(), JOB_NAME, currentStateOutput, jobContext, jobConfig, TaskState.IN_PROGRESS,
new HashMap<>(), new HashSet<>(), paMap, TargetState.START, new HashSet<>(), cache,
tasksToDrop);
Assert.assertEquals(paMap.get(0)._state, TaskPartitionState.DROPPED.name());
}
private JobConfig prepareJobConfig() {
JobConfig.Builder jobConfigBuilder = new JobConfig.Builder();
jobConfigBuilder.setWorkflow(WORKFLOW_NAME);
jobConfigBuilder.setCommand("TestCommand");
jobConfigBuilder.setJobId(JOB_NAME);
List<String> targetPartition = new ArrayList<>();
jobConfigBuilder.setTargetPartitions(targetPartition);
List<TaskConfig> taskConfigs = new ArrayList<>();
TaskConfig.Builder taskConfigBuilder = new TaskConfig.Builder();
taskConfigBuilder.setTaskId("0");
taskConfigs.add(taskConfigBuilder.build());
jobConfigBuilder.addTaskConfigs(taskConfigs);
return jobConfigBuilder.build();
}
private JobContext prepareJobContext(TaskPartitionState taskPartitionState) {
ZNRecord record = new ZNRecord(JOB_NAME);
JobContext jobContext = new JobContext(record);
jobContext.setStartTime(0L);
jobContext.setName(JOB_NAME);
jobContext.setStartTime(0L);
jobContext.setPartitionState(PARTITION_ID, taskPartitionState);
jobContext.setPartitionTarget(PARTITION_ID, TARGET_RESOURCES + "_0");
return jobContext;
}
private CurrentStateOutput prepareCurrentState(TaskPartitionState currentState,
TaskPartitionState messageFromState, TaskPartitionState messageToState) {
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
currentStateOutput.setResourceStateModelDef(JOB_NAME, "TASK");
Partition taskPartition = new Partition(JOB_NAME + "_" + PARTITION_NAME);
currentStateOutput.setCurrentState(JOB_NAME, taskPartition, INSTANCE_NAME, currentState.name());
Message message = new Message(Message.MessageType.STATE_TRANSITION, "123456789");
message.setFromState(messageFromState.name());
message.setToState(messageToState.name());
currentStateOutput.setPendingMessage(JOB_NAME, taskPartition, INSTANCE_NAME, message);
return currentStateOutput;
}
}
| 9,829 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/TestDropTerminalTasksUponReset.java | package org.apache.helix.task;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import org.apache.helix.controller.stages.CurrentStateOutput;
import org.apache.helix.model.Partition;
import org.apache.helix.model.ResourceAssignment;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestDropTerminalTasksUponReset {
/**
* This is a unit test that tests that all task partitions with requested state = DROPPED will be
* added to tasksToDrop.
*/
@Test
public void testDropAllTerminalTasksUponReset() {
Random random = new Random();
String jobName = "job";
String nodeName = "localhost";
int numTasks = 10;
// Create an Iterable of LiveInstances
Collection<String> liveInstances = new HashSet<>();
liveInstances.add("localhost");
// Create a dummy ResourceAssignment
ResourceAssignment prevAssignment = new ResourceAssignment(jobName);
// Create allTaskPartitions
Set<Integer> allTaskPartitions = new HashSet<>();
// Create a mock CurrentStateOutput
CurrentStateOutput currentStateOutput = mock(CurrentStateOutput.class);
// Generate a CurrentStateMap
Map<Partition, Map<String, String>> currentStateMap = new HashMap<>();
when(currentStateOutput.getCurrentStateMap(jobName)).thenReturn(currentStateMap);
for (int i = 0; i < numTasks; i++) {
allTaskPartitions.add(i);
Partition task = new Partition(jobName + "_" + i);
currentStateMap.put(task, new HashMap<>());
// Pick some random currentState between COMPLETED and TASK_ERROR
String currentState = (random.nextBoolean()) ? TaskPartitionState.COMPLETED.name()
: TaskPartitionState.TASK_ERROR.name();
// First half of the tasks to be dropped on each instance
if (i < numTasks / 2) {
// requested state is DROPPED
currentStateMap.get(task).put("localhost", currentState);
when(currentStateOutput.getRequestedState(jobName, task, nodeName))
.thenReturn(TaskPartitionState.DROPPED.name());
} else {
// requested state is nothing
when(currentStateOutput.getRequestedState(jobName, task, nodeName)).thenReturn(null);
}
}
// Create an empty tasksToDrop
Map<String, Set<Integer>> tasksToDrop = new HashMap<>();
// Call the static method we are testing
JobDispatcher.getCurrentInstanceToTaskAssignments(liveInstances, currentStateOutput, jobName, tasksToDrop);
// Check that tasksToDrop has (numTasks / 2) partitions as we intended regardless of what the
// current states of the tasks were
Assert.assertEquals(numTasks / 2, tasksToDrop.get(nodeName).size());
}
}
| 9,830 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/TestAssignableInstanceManager.java | package org.apache.helix.task;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.common.caches.TaskDataCache;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.task.assigner.AssignableInstance;
import org.apache.helix.task.assigner.TaskAssignResult;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestAssignableInstanceManager {
private static final int NUM_PARTICIPANTS = 3;
private static final int NUM_JOBS = 3;
private static final int NUM_TASKS = 3;
private static final String CLUSTER_NAME = "TestCluster_0";
private static final String INSTANCE_PREFIX = "Instance_";
private static final String JOB_PREFIX = "Job_";
private static final String TASK_PREFIX = "Task_";
private ClusterConfig _clusterConfig;
private MockTaskDataCache _taskDataCache;
private AssignableInstanceManager _assignableInstanceManager;
private Map<String, LiveInstance> _liveInstances;
private Map<String, InstanceConfig> _instanceConfigs;
private Set<String> _taskIDs; // To keep track of what tasks were created
@BeforeClass
public void beforeClass() {
System.out.println(
"START " + this.getClass().getSimpleName() + " at " + new Date(System.currentTimeMillis()));
_clusterConfig = new ClusterConfig(CLUSTER_NAME);
_taskDataCache = new MockTaskDataCache(CLUSTER_NAME);
_liveInstances = new HashMap<>();
_instanceConfigs = new HashMap<>();
_taskIDs = new HashSet<>();
// Populate live instances and their corresponding instance configs
for (int i = 0; i < NUM_PARTICIPANTS; i++) {
String instanceName = INSTANCE_PREFIX + i;
LiveInstance liveInstance = new LiveInstance(instanceName);
InstanceConfig instanceConfig = new InstanceConfig(instanceName);
_liveInstances.put(instanceName, liveInstance);
_instanceConfigs.put(instanceName, instanceConfig);
}
// Populate taskDataCache with JobConfigs and JobContexts
for (int i = 0; i < NUM_JOBS; i++) {
String jobName = JOB_PREFIX + i;
// Create a JobConfig
JobConfig.Builder jobConfigBuilder = new JobConfig.Builder();
List<TaskConfig> taskConfigs = new ArrayList<>();
for (int j = 0; j < NUM_TASKS; j++) {
String taskID = jobName + "_" + TASK_PREFIX + j;
TaskConfig.Builder taskConfigBuilder = new TaskConfig.Builder();
taskConfigBuilder.setTaskId(taskID);
_taskIDs.add(taskID);
taskConfigs.add(taskConfigBuilder.build());
}
jobConfigBuilder.setJobId(jobName);
jobConfigBuilder.addTaskConfigs(taskConfigs);
jobConfigBuilder.setCommand("MOCK");
jobConfigBuilder.setWorkflow("WORKFLOW");
_taskDataCache.addJobConfig(jobName, jobConfigBuilder.build());
// Create a JobContext
ZNRecord znRecord = new ZNRecord(JOB_PREFIX + "context_" + i);
JobContext jobContext = new MockJobContext(znRecord, _liveInstances, _taskIDs);
_taskDataCache.addJobContext(jobName, jobContext);
_taskIDs.clear();
}
// Create an AssignableInstanceManager and build
_assignableInstanceManager = new AssignableInstanceManager();
_assignableInstanceManager.buildAssignableInstances(_clusterConfig, _taskDataCache,
_liveInstances, _instanceConfigs);
}
@Test
public void testGetAssignableInstanceMap() {
Map<String, AssignableInstance> assignableInstanceMap =
_assignableInstanceManager.getAssignableInstanceMap();
for (String liveInstance : _liveInstances.keySet()) {
Assert.assertTrue(assignableInstanceMap.containsKey(liveInstance));
}
}
@Test(dependsOnMethods = "testGetAssignableInstanceMap")
public void testGetTaskAssignResultMap() {
Map<String, TaskAssignResult> taskAssignResultMap =
_assignableInstanceManager.getTaskAssignResultMap();
for (String taskID : _taskIDs) {
Assert.assertTrue(taskAssignResultMap.containsKey(taskID));
}
}
@Test(dependsOnMethods = "testGetTaskAssignResultMap")
public void testUpdateAssignableInstances() {
Map<String, LiveInstance> newLiveInstances = new HashMap<>();
Map<String, InstanceConfig> newInstanceConfigs = new HashMap<>();
// A brand new set of LiveInstances
for (int i = NUM_PARTICIPANTS; i < NUM_PARTICIPANTS + 3; i++) {
String instanceName = INSTANCE_PREFIX + i;
newLiveInstances.put(instanceName, new LiveInstance(instanceName));
newInstanceConfigs.put(instanceName, new InstanceConfig(instanceName));
}
_assignableInstanceManager.updateAssignableInstances(_clusterConfig, newLiveInstances,
newInstanceConfigs);
// Check that the assignable instance map contains new instances and there are no
// TaskAssignResults due to previous live instances being removed
Assert.assertEquals(_assignableInstanceManager.getTaskAssignResultMap().size(), 0);
Assert.assertEquals(_assignableInstanceManager.getAssignableInstanceMap().size(),
newLiveInstances.size());
for (String instance : newLiveInstances.keySet()) {
Assert
.assertTrue(_assignableInstanceManager.getAssignableInstanceMap().containsKey(instance));
}
}
public class MockTaskDataCache extends TaskDataCache {
private Map<String, JobConfig> _jobConfigMap;
private Map<String, WorkflowConfig> _workflowConfigMap;
private Map<String, JobContext> _jobContextMap;
private Map<String, WorkflowContext> _workflowContextMap;
public MockTaskDataCache(String clusterName) {
super(clusterName);
_jobConfigMap = new HashMap<>();
_workflowConfigMap = new HashMap<>();
_jobContextMap = new HashMap<>();
_workflowContextMap = new HashMap<>();
}
public void addJobConfig(String jobName, JobConfig jobConfig) {
_jobConfigMap.put(jobName, jobConfig);
}
public void addJobContext(String jobName, JobContext jobContext) {
_jobContextMap.put(jobName, jobContext);
}
public void addWorkflowConfig(String workflowName, WorkflowConfig workflowConfig) {
_workflowConfigMap.put(workflowName, workflowConfig);
}
public void addWorkflowContext(String workflowName, WorkflowContext workflowContext) {
_workflowContextMap.put(workflowName, workflowContext);
}
@Override
public JobContext getJobContext(String jobName) {
return _jobContextMap.get(jobName);
}
@Override
public Map<String, JobConfig> getJobConfigMap() {
return _jobConfigMap;
}
@Override
public Map<String, WorkflowConfig> getWorkflowConfigMap() {
return _workflowConfigMap;
}
public Map<String, JobContext> getJobContextMap() {
return _jobContextMap;
}
public Map<String, WorkflowContext> getWorkflowContextMap() {
return _workflowContextMap;
}
}
public class MockJobContext extends JobContext {
private Set<Integer> _taskPartitionSet;
private Map<Integer, TaskPartitionState> _taskPartitionStateMap;
private Map<Integer, String> _partitionToTaskIDMap;
private Map<Integer, String> _taskToInstanceMap;
public MockJobContext(ZNRecord record, Map<String, LiveInstance> liveInstanceMap,
Set<String> taskIDs) {
super(record);
_taskPartitionSet = new HashSet<>();
_taskPartitionStateMap = new HashMap<>();
_partitionToTaskIDMap = new HashMap<>();
_taskToInstanceMap = new HashMap<>();
List<String> taskIDList = new ArrayList<>(taskIDs);
for (int i = 0; i < taskIDList.size(); i++) {
_taskPartitionSet.add(i);
_taskPartitionStateMap.put(i, TaskPartitionState.RUNNING);
_partitionToTaskIDMap.put(i, taskIDList.get(i));
String someInstance = liveInstanceMap.keySet().iterator().next();
_taskToInstanceMap.put(i, someInstance);
}
}
@Override
public Set<Integer> getPartitionSet() {
return _taskPartitionSet;
}
@Override
public TaskPartitionState getPartitionState(int p) {
return _taskPartitionStateMap.get(p);
}
@Override
public String getAssignedParticipant(int p) {
return _taskToInstanceMap.get(p);
}
@Override
public String getTaskIdForPartition(int p) {
return _partitionToTaskIDMap.get(p);
}
}
}
| 9,831 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/TestTaskDriver.java | package org.apache.helix.task;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.ConfigAccessor;
import org.apache.helix.integration.task.TaskTestBase;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.InstanceConfig;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestTaskDriver extends TaskTestBase {
// Use a thread pool size that's different from the default value for test
private static final int TEST_THREAD_POOL_SIZE = TaskConstants.DEFAULT_TASK_THREAD_POOL_SIZE + 1;
private static final String NON_EXISTENT_INSTANCE_NAME = "NON_EXISTENT_INSTANCE_NAME";
private TaskDriver _taskDriver;
private ConfigAccessor _configAccessor;
@BeforeClass
public void beforeClass() throws Exception {
super.beforeClass();
_taskDriver = new TaskDriver(_controller);
_configAccessor = _controller.getConfigAccessor();
}
@Test
public void testSetTargetTaskThreadPoolSize() {
String validInstanceName = _participants[0].getInstanceName();
_taskDriver.setTargetTaskThreadPoolSize(validInstanceName, TEST_THREAD_POOL_SIZE);
InstanceConfig instanceConfig =
_configAccessor.getInstanceConfig(CLUSTER_NAME, validInstanceName);
Assert.assertEquals(instanceConfig.getTargetTaskThreadPoolSize(), TEST_THREAD_POOL_SIZE);
}
@Test(dependsOnMethods = "testSetTargetTaskThreadPoolSize", expectedExceptions = IllegalArgumentException.class)
public void testSetTargetTaskThreadPoolSizeWrongInstanceName() {
_taskDriver.setTargetTaskThreadPoolSize(NON_EXISTENT_INSTANCE_NAME, TEST_THREAD_POOL_SIZE);
}
@Test(dependsOnMethods = "testSetTargetTaskThreadPoolSizeWrongInstanceName")
public void testGetTargetTaskThreadPoolSize() {
String validInstanceName = _participants[0].getInstanceName();
Assert.assertEquals(_taskDriver.getTargetTaskThreadPoolSize(validInstanceName),
TEST_THREAD_POOL_SIZE);
}
@Test(dependsOnMethods = "testGetTargetTaskThreadPoolSize", expectedExceptions = IllegalArgumentException.class)
public void testGetTargetTaskThreadPoolSizeWrongInstanceName() {
_taskDriver.getTargetTaskThreadPoolSize(NON_EXISTENT_INSTANCE_NAME);
}
@Test(dependsOnMethods = "testGetTargetTaskThreadPoolSizeWrongInstanceName")
public void testSetGlobalTargetTaskThreadPoolSize() {
_taskDriver.setGlobalTargetTaskThreadPoolSize(TEST_THREAD_POOL_SIZE);
ClusterConfig clusterConfig = _configAccessor.getClusterConfig(CLUSTER_NAME);
Assert.assertEquals(clusterConfig.getGlobalTargetTaskThreadPoolSize(), TEST_THREAD_POOL_SIZE);
}
@Test(dependsOnMethods = "testSetGlobalTargetTaskThreadPoolSize")
public void testGetGlobalTargetTaskThreadPoolSize() {
Assert.assertEquals(_taskDriver.getGlobalTargetTaskThreadPoolSize(), TEST_THREAD_POOL_SIZE);
}
@Test(dependsOnMethods = "testGetGlobalTargetTaskThreadPoolSize")
public void testGetCurrentTaskThreadPoolSize() {
String validInstanceName = _participants[0].getInstanceName();
Assert.assertEquals(_taskDriver.getCurrentTaskThreadPoolSize(validInstanceName),
TaskConstants.DEFAULT_TASK_THREAD_POOL_SIZE);
}
@Test(dependsOnMethods = "testGetCurrentTaskThreadPoolSize", expectedExceptions = IllegalArgumentException.class)
public void testGetCurrentTaskThreadPoolSizeWrongInstanceName() {
_taskDriver.getCurrentTaskThreadPoolSize(NON_EXISTENT_INSTANCE_NAME);
}
}
| 9,832 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/TestTaskCreateThrottling.java | package org.apache.helix.task;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.List;
import org.apache.helix.HelixException;
import org.apache.helix.integration.task.MockTask;
import org.apache.helix.integration.task.TaskTestBase;
import org.apache.helix.integration.task.TaskTestUtil;
import org.apache.helix.integration.task.WorkflowGenerator;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestTaskCreateThrottling extends TaskTestBase {
@BeforeClass
public void beforeClass() throws Exception {
setSingleTestEnvironment();
super.beforeClass();
_driver._configsLimitation = 10;
}
@Test
public void testTaskCreatingThrottle() {
Workflow flow = WorkflowGenerator.generateDefaultRepeatedJobWorkflowBuilder("hugeWorkflow",
(int) _driver._configsLimitation + 1).build();
try {
_driver.start(flow);
Assert.fail("Creating a huge workflow contains more jobs than expected should fail.");
} catch (HelixException e) {
// expected
}
}
@Test(dependsOnMethods = {
"testTaskCreatingThrottle"
})
public void testEnqueueJobsThrottle() throws InterruptedException {
List<String> jobs = new ArrayList<>();
// Use a short name for testing
JobQueue.Builder builder = TaskTestUtil.buildJobQueue("Q");
builder.setCapacity(Integer.MAX_VALUE);
JobConfig.Builder jobBuilder =
new JobConfig.Builder().setTargetResource(WorkflowGenerator.DEFAULT_TGT_DB)
.setCommand(MockTask.TASK_COMMAND).setMaxAttemptsPerTask(2)
.setJobCommandConfigMap(WorkflowGenerator.DEFAULT_COMMAND_CONFIG).setExpiry(1L);
for (int i = 0; i < _driver._configsLimitation - 5; i++) {
builder.enqueueJob("J" + i, jobBuilder);
jobs.add("J" + i);
}
JobQueue jobQueue = builder.build();
// check if large number of jobs smaller than the threshold is OK.
_driver.start(jobQueue);
_driver.stop(jobQueue.getName());
_driver.pollForWorkflowState(jobQueue.getName(), TaskState.STOPPED);
try {
for (int i = 0; i < _driver._configsLimitation; i++) {
_driver.enqueueJob(jobQueue.getName(), "EJ" + i, jobBuilder);
jobs.add("EJ" + i);
}
Assert.fail("Enqueuing a huge number of jobs should fail.");
} catch (HelixException e) {
// expected
}
for (String job : jobs) {
try {
_driver.deleteJob(jobQueue.getName(), job);
} catch (Exception e) {
// OK
}
}
_driver.delete(jobQueue.getName());
}
}
| 9,833 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/TaskSynchronizedTestBase.java | package org.apache.helix.task;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.helix.HelixException;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.TestHelper;
import org.apache.helix.common.ZkTestBase;
import org.apache.helix.controller.rebalancer.strategy.CrushEdRebalanceStrategy;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.integration.task.MockTask;
import org.apache.helix.integration.task.WorkflowGenerator;
import org.apache.helix.mock.statemodel.MockTaskStateModelFactory;
import org.apache.helix.model.IdealState;
import org.apache.helix.participant.StateMachineEngine;
import org.apache.helix.participant.statemachine.StateModelFactory;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.tools.ClusterVerifiers.BestPossibleExternalViewVerifier;
import org.apache.helix.tools.ClusterVerifiers.ZkHelixClusterVerifier;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
public class TaskSynchronizedTestBase extends ZkTestBase {
protected int _numNodes = 5;
protected int _startPort = 12918;
protected int _numPartitions = 20;
protected int _numReplicas = 3;
protected int _numDbs = 1;
protected Boolean _partitionVary = true;
protected Boolean _instanceGroupTag = false;
protected ClusterControllerManager _controller;
protected HelixManager _manager;
protected TaskDriver _driver;
protected List<String> _testDbs = new ArrayList<>();
protected final String MASTER_SLAVE_STATE_MODEL = "MasterSlave";
protected final String CLUSTER_NAME = CLUSTER_PREFIX + "_" + getShortClassName();
protected MockParticipantManager[] _participants;
protected ZkHelixClusterVerifier _clusterVerifier;
@BeforeClass
public void beforeClass() throws Exception {
super.beforeClass();
_participants = new MockParticipantManager[_numNodes];
_gSetupTool.addCluster(CLUSTER_NAME, true);
setupParticipants();
setupDBs();
startParticipants();
createManagers();
_clusterVerifier =
new BestPossibleExternalViewVerifier.Builder(CLUSTER_NAME).setZkClient(_gZkClient)
.setWaitTillVerify(TestHelper.DEFAULT_REBALANCE_PROCESSING_WAIT_TIME)
.build();
}
@AfterClass
public void afterClass() throws Exception {
if (_controller != null && _controller.isConnected()) {
_controller.syncStop();
}
if (_manager != null && _manager.isConnected()) {
_manager.disconnect();
}
stopParticipants();
deleteCluster(CLUSTER_NAME);
}
protected void setupDBs() {
setupDBs(_gSetupTool);
}
protected void setupDBs(ClusterSetup clusterSetup) {
// Set up target db
if (_numDbs > 1) {
for (int i = 0; i < _numDbs; i++) {
int varyNum = _partitionVary ? 10 * i : 0;
String db = WorkflowGenerator.DEFAULT_TGT_DB + i;
clusterSetup.addResourceToCluster(CLUSTER_NAME, db, _numPartitions + varyNum,
MASTER_SLAVE_STATE_MODEL, IdealState.RebalanceMode.FULL_AUTO.toString(),
CrushEdRebalanceStrategy.class.getName());
clusterSetup.rebalanceStorageCluster(CLUSTER_NAME, db, _numReplicas);
_testDbs.add(db);
}
} else {
if (_instanceGroupTag) {
clusterSetup
.addResourceToCluster(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB, _numPartitions,
"OnlineOffline", IdealState.RebalanceMode.FULL_AUTO.name(),
CrushEdRebalanceStrategy.class.getName());
IdealState idealState = clusterSetup.getClusterManagementTool()
.getResourceIdealState(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB);
idealState.setInstanceGroupTag("TESTTAG0");
clusterSetup.getClusterManagementTool().setResourceIdealState(CLUSTER_NAME,
WorkflowGenerator.DEFAULT_TGT_DB, idealState);
} else {
clusterSetup.addResourceToCluster(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB,
_numPartitions, MASTER_SLAVE_STATE_MODEL, IdealState.RebalanceMode.FULL_AUTO.name(),
CrushEdRebalanceStrategy.class.getName());
}
clusterSetup.rebalanceStorageCluster(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB,
_numReplicas);
}
}
protected void setupParticipants() {
setupParticipants(_gSetupTool);
}
protected void setupParticipants(ClusterSetup setupTool) {
_participants = new MockParticipantManager[_numNodes];
for (int i = 0; i < _numNodes; i++) {
String storageNodeName = PARTICIPANT_PREFIX + "_" + (_startPort + i);
setupTool.addInstanceToCluster(CLUSTER_NAME, storageNodeName);
if (_instanceGroupTag) {
setupTool.addInstanceTag(CLUSTER_NAME, storageNodeName, "TESTTAG" + i);
}
}
}
protected void startParticipants() {
startParticipants(ZK_ADDR, _numNodes);
}
protected void startParticipants(String zkAddr) {
startParticipants(zkAddr, _numNodes);
}
protected void startParticipants(int numNodes) {
for (int i = 0; i < numNodes; i++) {
startParticipant(ZK_ADDR, i);
}
}
protected void startParticipants(String zkAddr, int numNodes) {
for (int i = 0; i < numNodes; i++) {
startParticipant(zkAddr, i);
}
}
protected void startParticipant(int i) {
startParticipant(ZK_ADDR, i);
}
protected void startParticipant(String zkAddr, int i) {
if (_participants[i] != null) {
stopParticipant(i);
}
Map<String, TaskFactory> taskFactoryReg = new HashMap<>();
taskFactoryReg.put(MockTask.TASK_COMMAND, MockTask::new);
String instanceName = PARTICIPANT_PREFIX + "_" + (_startPort + i);
_participants[i] = new MockParticipantManager(zkAddr, CLUSTER_NAME, instanceName);
// Register a Task state model factory.
StateMachineEngine stateMachine = _participants[i].getStateMachineEngine();
stateMachine.registerStateModelFactory(TaskConstants.STATE_MODEL_NAME,
new TaskStateModelFactory(_participants[i], taskFactoryReg));
_participants[i].syncStart();
}
protected void stopParticipants() {
for (int i = 0; i < _numNodes; i++) {
stopParticipant(i);
}
}
protected void stopParticipant(int i) {
if (_participants.length <= i) {
throw new HelixException(String
.format("Can't stop participant %s, only %s participants" + "were set up.", i,
_participants.length));
}
if (_participants[i] != null) {
if (_participants[i].isConnected()) {
_participants[i].syncStop();
}
// Shutdown the state model factories to close all threads.
StateMachineEngine stateMachine = _participants[i].getStateMachineEngine();
if (stateMachine != null) {
StateModelFactory stateModelFactory =
stateMachine.getStateModelFactory(TaskConstants.STATE_MODEL_NAME);
if (stateModelFactory != null && stateModelFactory instanceof TaskStateModelFactory) {
((TaskStateModelFactory) stateModelFactory).shutdownNow();
}
if (stateModelFactory != null && (stateModelFactory instanceof MockTaskStateModelFactory)) {
((MockTaskStateModelFactory) stateModelFactory).shutdownNow();
}
}
}
}
protected void createManagers() throws Exception {
createManagers(ZK_ADDR, CLUSTER_NAME);
}
protected void createManagers(String zkAddr, String clusterName) throws Exception {
_manager = HelixManagerFactory.getZKHelixManager(clusterName, "Admin",
InstanceType.ADMINISTRATOR, zkAddr);
_manager.connect();
_driver = new TaskDriver(_manager);
}
public void setSingleTestEnvironment() {
_numDbs = 1;
_numNodes = 1;
_numPartitions = 1;
_numReplicas = 1;
}
}
| 9,834 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/TestCleanExpiredJobs.java | package org.apache.helix.task;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.HashSet;
import java.util.Set;
import org.apache.helix.HelixException;
import org.apache.helix.TestHelper;
import org.apache.helix.controller.dataproviders.WorkflowControllerDataProvider;
import org.apache.helix.integration.task.MockTask;
import org.apache.helix.integration.task.TaskTestUtil;
import org.apache.helix.integration.task.WorkflowGenerator;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestCleanExpiredJobs extends TaskSynchronizedTestBase {
private WorkflowControllerDataProvider _cache;
@BeforeClass
public void beforeClass() throws Exception {
setSingleTestEnvironment();
super.beforeClass();
}
@Test
public void testCleanExpiredJobs() throws Exception {
String queue = TestHelper.getTestMethodName();
JobQueue.Builder builder = TaskTestUtil.buildJobQueue(queue);
JobConfig.Builder jobBuilder =
new JobConfig.Builder().setTargetResource(WorkflowGenerator.DEFAULT_TGT_DB)
.setCommand(MockTask.TASK_COMMAND).setMaxAttemptsPerTask(2)
.setJobCommandConfigMap(WorkflowGenerator.DEFAULT_COMMAND_CONFIG).setExpiry(1L);
long startTime = System.currentTimeMillis();
for (int i = 0; i < 8; i++) {
builder.enqueueJob("JOB" + i, jobBuilder);
}
for (int i = 0; i < 8; i++) {
TaskUtil.setJobContext(_manager, TaskUtil.getNamespacedJobName(queue, "JOB" + i),
TaskTestUtil.buildJobContext(startTime, startTime, TaskPartitionState.COMPLETED));
}
for (int i = 4; i < 6; i++) {
TaskUtil.setJobContext(_manager, TaskUtil.getNamespacedJobName(queue, "JOB" + i),
TaskTestUtil
.buildJobContext(startTime, startTime + 100000, TaskPartitionState.COMPLETED));
}
WorkflowContext workflowContext = TaskTestUtil
.buildWorkflowContext(queue, TaskState.IN_PROGRESS, null, TaskState.COMPLETED,
TaskState.FAILED, TaskState.ABORTED, TaskState.COMPLETED, TaskState.COMPLETED,
TaskState.COMPLETED, TaskState.IN_PROGRESS, TaskState.NOT_STARTED);
Set<String> jobsLeft = new HashSet<String>();
jobsLeft.add(TaskUtil.getNamespacedJobName(queue, "JOB" + 1));
jobsLeft.add(TaskUtil.getNamespacedJobName(queue, "JOB" + 2));
jobsLeft.add(TaskUtil.getNamespacedJobName(queue, "JOB" + 4));
jobsLeft.add(TaskUtil.getNamespacedJobName(queue, "JOB" + 5));
jobsLeft.add(TaskUtil.getNamespacedJobName(queue, "JOB" + 6));
jobsLeft.add(TaskUtil.getNamespacedJobName(queue, "JOB" + 7));
_driver.start(builder.build());
_cache = TaskTestUtil.buildDataProvider(_manager.getHelixDataAccessor(), CLUSTER_NAME);
TaskUtil.setWorkflowContext(_manager, queue, workflowContext);
TaskTestUtil.calculateTaskSchedulingStage(_cache, _manager);
Thread.sleep(500);
WorkflowConfig workflowConfig = _driver.getWorkflowConfig(queue);
Assert.assertEquals(workflowConfig.getJobDag().getAllNodes(), jobsLeft);
_cache.requireFullRefresh();
_cache.refresh(_manager.getHelixDataAccessor());
TaskTestUtil.calculateTaskSchedulingStage(_cache, _manager);
Thread.sleep(500);
workflowContext = _driver.getWorkflowContext(queue);
Assert.assertTrue(workflowContext.getLastJobPurgeTime() > startTime
&& workflowContext.getLastJobPurgeTime() < System.currentTimeMillis());
}
@Test
void testNotCleanJobsDueToParentFail() throws Exception {
String queue = TestHelper.getTestMethodName();
JobQueue.Builder builder = TaskTestUtil.buildJobQueue(queue);
JobConfig.Builder jobBuilder =
new JobConfig.Builder().setTargetResource(WorkflowGenerator.DEFAULT_TGT_DB)
.setCommand(MockTask.TASK_COMMAND).setMaxAttemptsPerTask(2)
.setJobCommandConfigMap(WorkflowGenerator.DEFAULT_COMMAND_CONFIG).setExpiry(1L);
long startTime = System.currentTimeMillis();
builder.enqueueJob("JOB0", jobBuilder);
builder.enqueueJob("JOB1", jobBuilder);
builder.addParentChildDependency("JOB0", "JOB1");
TaskUtil.setJobContext(_manager, TaskUtil.getNamespacedJobName(queue, "JOB0"),
TaskTestUtil.buildJobContext(startTime, startTime, TaskPartitionState.COMPLETED));
WorkflowContext workflowContext = TaskTestUtil
.buildWorkflowContext(queue, TaskState.IN_PROGRESS, null, TaskState.FAILED,
TaskState.FAILED);
_driver.start(builder.build());
_cache = TaskTestUtil.buildDataProvider(_manager.getHelixDataAccessor(), CLUSTER_NAME);
TaskUtil.setWorkflowContext(_manager, queue, workflowContext);
TaskTestUtil.calculateTaskSchedulingStage(_cache, _manager);
WorkflowConfig workflowConfig = _driver.getWorkflowConfig(queue);
Assert.assertEquals(workflowConfig.getJobDag().getAllNodes().size(), 2);
}
@Test
void testNotCleanJobsThroughEnqueueJob() throws Exception {
int capacity = 5;
String queue = TestHelper.getTestMethodName();
JobQueue.Builder builder = TaskTestUtil.buildJobQueue(queue, capacity);
JobConfig.Builder jobBuilder =
new JobConfig.Builder().setTargetResource(WorkflowGenerator.DEFAULT_TGT_DB)
.setCommand(MockTask.TASK_COMMAND).setMaxAttemptsPerTask(2)
.setJobCommandConfigMap(WorkflowGenerator.DEFAULT_COMMAND_CONFIG).setExpiry(1L);
long startTime = System.currentTimeMillis();
for (int i = 0; i < capacity; i++) {
builder.enqueueJob("JOB" + i, jobBuilder);
}
_driver.start(builder.build());
try {
// should fail here since the queue is full.
_driver.enqueueJob(queue, "JOB" + capacity, jobBuilder);
Assert.fail("Queue is not full.");
} catch (HelixException e) {
Assert.assertTrue(e.getMessage().contains("queue " + queue + " is full"));
}
for (int i = 0; i < capacity; i++) {
TaskUtil.setJobContext(_manager, TaskUtil.getNamespacedJobName(queue, "JOB" + i),
TaskTestUtil.buildJobContext(startTime, startTime, TaskPartitionState.COMPLETED));
}
WorkflowContext workflowContext = TaskTestUtil
.buildWorkflowContext(queue, TaskState.IN_PROGRESS, null, TaskState.COMPLETED,
TaskState.COMPLETED, TaskState.FAILED, TaskState.IN_PROGRESS);
TaskUtil.setWorkflowContext(_manager, queue, workflowContext);
_driver.enqueueJob(queue, "JOB" + capacity, jobBuilder);
WorkflowConfig workflowConfig = _driver.getWorkflowConfig(queue);
Assert.assertEquals(workflowConfig.getJobDag().getAllNodes().size(), capacity - 1);
}
}
| 9,835 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/TestScheduleDelayJobs.java | package org.apache.helix.task;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.TestHelper;
import org.apache.helix.controller.dataproviders.WorkflowControllerDataProvider;
import org.apache.helix.integration.task.MockTask;
import org.apache.helix.integration.task.TaskTestUtil;
import org.apache.helix.integration.task.WorkflowGenerator;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestScheduleDelayJobs extends TaskSynchronizedTestBase {
private TestRebalancer _testRebalancer = new TestRebalancer();
private WorkflowControllerDataProvider _cache;
@BeforeClass
public void beforeClass() throws Exception {
setSingleTestEnvironment();
super.beforeClass();
}
@Test
public void testScheduleDelayTime() throws Exception {
String workflowName = TestHelper.getTestMethodName();
Workflow.Builder builder = new Workflow.Builder(workflowName);
JobConfig.Builder jobBuilder =
new JobConfig.Builder().setTargetResource(WorkflowGenerator.DEFAULT_TGT_DB)
.setCommand(MockTask.TASK_COMMAND).setMaxAttemptsPerTask(2)
.setJobCommandConfigMap(WorkflowGenerator.DEFAULT_COMMAND_CONFIG);
builder.addParentChildDependency("JOB0", "JOB1");
builder.addJob("JOB0", jobBuilder);
builder.addJob("JOB1", jobBuilder.setExecutionDelay(10000L));
WorkflowContext workflowContext = TaskTestUtil
.buildWorkflowContext(workflowName, TaskState.IN_PROGRESS, null, TaskState.COMPLETED,
TaskState.NOT_STARTED);
_driver.start(builder.build());
_cache = TaskTestUtil.buildDataProvider(_manager.getHelixDataAccessor(), CLUSTER_NAME);
long currentTime = System.currentTimeMillis();
TaskUtil.setWorkflowContext(_manager, workflowName, workflowContext);
TaskTestUtil.calculateTaskSchedulingStage(_cache, _manager);
TaskTestUtil.calculateTaskSchedulingStage(_cache, _manager);
Assert.assertTrue(_testRebalancer.getRebalanceTime(workflowName) - currentTime >= 10000L);
}
@Test
public void testScheduleStartTime() throws Exception {
String workflowName = TestHelper.getTestMethodName();
Workflow.Builder builder = new Workflow.Builder(workflowName);
JobConfig.Builder jobBuilder =
new JobConfig.Builder().setTargetResource(WorkflowGenerator.DEFAULT_TGT_DB)
.setCommand(MockTask.TASK_COMMAND).setMaxAttemptsPerTask(2)
.setJobCommandConfigMap(WorkflowGenerator.DEFAULT_COMMAND_CONFIG);
long currentTime = System.currentTimeMillis() + 10000L;
builder.addParentChildDependency("JOB0", "JOB2");
builder.addParentChildDependency("JOB1", "JOB2");
builder.addJob("JOB0", jobBuilder);
builder.addJob("JOB1", jobBuilder);
builder.addJob("JOB2", jobBuilder.setExecutionStart(currentTime));
WorkflowContext workflowContext = TaskTestUtil
.buildWorkflowContext(workflowName, TaskState.IN_PROGRESS, null, TaskState.COMPLETED,
TaskState.COMPLETED, TaskState.NOT_STARTED);
_driver.start(builder.build());
_cache = TaskTestUtil.buildDataProvider(_manager.getHelixDataAccessor(), CLUSTER_NAME);
TaskUtil.setWorkflowContext(_manager, workflowName, workflowContext);
TaskTestUtil.calculateTaskSchedulingStage(_cache, _manager);
TaskTestUtil.calculateTaskSchedulingStage(_cache, _manager);
Assert.assertTrue(_testRebalancer.getRebalanceTime(workflowName) == currentTime);
}
private class TestRebalancer extends WorkflowRebalancer {
public long getRebalanceTime(String workflow) {
return _rebalanceScheduler.getRebalanceTime(workflow);
}
}
}
| 9,836 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/TestTaskUtil.java | package org.apache.helix.task;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.controller.dataproviders.WorkflowControllerDataProvider;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.integration.task.TaskTestBase;
import org.apache.helix.integration.task.TaskTestUtil;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.InstanceConfig;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestTaskUtil extends TaskTestBase {
// This value has to be different from the default value to verify correctness
private static final int TEST_TARGET_TASK_THREAD_POOL_SIZE =
TaskConstants.DEFAULT_TASK_THREAD_POOL_SIZE + 1;
@Test
public void testGetExpiredJobsFromCache() {
String workflowName = "TEST_WORKFLOW";
JobQueue.Builder queueBuilder = TaskTestUtil.buildJobQueue(workflowName);
JobConfig.Builder jobBuilder_0 =
new JobConfig.Builder().setJobId("Job_0").setTargetResource("1").setCommand("1")
.setExpiry(1L);
JobConfig.Builder jobBuilder_1 =
new JobConfig.Builder().setJobId("Job_1").setTargetResource("1").setCommand("1")
.setExpiry(1L);
JobConfig.Builder jobBuilder_2 =
new JobConfig.Builder().setJobId("Job_2").setTargetResource("1").setCommand("1")
.setExpiry(1L);
JobConfig.Builder jobBuilder_3 =
new JobConfig.Builder().setJobId("Job_3").setTargetResource("1").setCommand("1")
.setExpiry(1L);
Workflow jobQueue =
queueBuilder.enqueueJob("Job_0", jobBuilder_0).enqueueJob("Job_1", jobBuilder_1)
.enqueueJob("Job_2", jobBuilder_2).enqueueJob("Job_3", jobBuilder_3).build();
WorkflowContext workflowContext = mock(WorkflowContext.class);
Map<String, TaskState> jobStates = new HashMap<>();
jobStates.put(workflowName + "_Job_0", TaskState.COMPLETED);
jobStates.put(workflowName + "_Job_1", TaskState.COMPLETED);
jobStates.put(workflowName + "_Job_2", TaskState.FAILED);
jobStates.put(workflowName + "_Job_3", TaskState.COMPLETED);
when(workflowContext.getJobStates()).thenReturn(jobStates);
JobConfig jobConfig = mock(JobConfig.class);
WorkflowControllerDataProvider workflowControllerDataProvider =
mock(WorkflowControllerDataProvider.class);
when(workflowControllerDataProvider.getJobConfig(workflowName + "_Job_1")).thenReturn(null);
when(workflowControllerDataProvider.getJobConfig(workflowName + "_Job_1"))
.thenReturn(jobConfig);
when(workflowControllerDataProvider.getJobConfig(workflowName + "_Job_2"))
.thenReturn(jobConfig);
when(workflowControllerDataProvider.getJobConfig(workflowName + "_Job_3"))
.thenReturn(jobConfig);
JobContext jobContext = mock(JobContext.class);
when(jobContext.getFinishTime()).thenReturn(System.currentTimeMillis());
when(workflowControllerDataProvider.getJobContext(workflowName + "_Job_1")).thenReturn(null);
when(workflowControllerDataProvider.getJobContext(workflowName + "_Job_2"))
.thenReturn(jobContext);
when(workflowControllerDataProvider.getJobContext(workflowName + "_Job_3"))
.thenReturn(jobContext);
Set<String> expectedJobs = new HashSet<>();
expectedJobs.add(workflowName + "_Job_0");
expectedJobs.add(workflowName + "_Job_3");
Assert.assertEquals(TaskUtil
.getExpiredJobsFromCache(workflowControllerDataProvider, jobQueue.getWorkflowConfig(),
workflowContext, _manager), expectedJobs);
}
@Test
public void testGetExpiredJobsFromCacheFailPropagation() {
String workflowName = "TEST_WORKFLOW_COMPLEX_DAG";
Workflow.Builder workflowBuilder = new Workflow.Builder(workflowName);
// Workflow Schematic:
// 0
// / | \
// / | \
// 1 2 3
// / \ /
// /|\ /|\
// 4 5 6 7 8 9
for (int i = 0; i < 10; i++) {
workflowBuilder.addJob("Job_" + i,
new JobConfig.Builder().setJobId("Job_" + i).setTargetResource("1").setCommand("1"));
}
workflowBuilder.addParentChildDependency("Job_0", "Job_1");
workflowBuilder.addParentChildDependency("Job_0", "Job_2");
workflowBuilder.addParentChildDependency("Job_0", "Job_3");
workflowBuilder.addParentChildDependency("Job_1", "Job_4");
workflowBuilder.addParentChildDependency("Job_1", "Job_5");
workflowBuilder.addParentChildDependency("Job_1", "Job_6");
workflowBuilder.addParentChildDependency("Job_2", "Job_7");
workflowBuilder.addParentChildDependency("Job_2", "Job_8");
workflowBuilder.addParentChildDependency("Job_2", "Job_9");
workflowBuilder.addParentChildDependency("Job_3", "Job_7");
workflowBuilder.addParentChildDependency("Job_4", "Job_8");
workflowBuilder.addParentChildDependency("Job_5", "Job_9");
Workflow workflow = workflowBuilder.build();
WorkflowContext workflowContext = mock(WorkflowContext.class);
Map<String, TaskState> jobStates = new HashMap<>();
jobStates.put(workflowName + "_Job_0", TaskState.FAILED);
jobStates.put(workflowName + "_Job_1", TaskState.FAILED);
jobStates.put(workflowName + "_Job_2", TaskState.TIMED_OUT);
jobStates.put(workflowName + "_Job_3", TaskState.IN_PROGRESS);
jobStates.put(workflowName + "_Job_4", TaskState.FAILED);
jobStates.put(workflowName + "_Job_5", TaskState.FAILED);
jobStates.put(workflowName + "_Job_6", TaskState.IN_PROGRESS);
jobStates.put(workflowName + "_Job_7", TaskState.FAILED);
jobStates.put(workflowName + "_Job_8", TaskState.FAILED);
jobStates.put(workflowName + "_Job_9", TaskState.IN_PROGRESS);
when(workflowContext.getJobStates()).thenReturn(jobStates);
JobConfig jobConfig = mock(JobConfig.class);
when(jobConfig.getTerminalStateExpiry()).thenReturn(1L);
WorkflowControllerDataProvider workflowControllerDataProvider =
mock(WorkflowControllerDataProvider.class);
for (int i = 0; i < 10; i++) {
when(workflowControllerDataProvider.getJobConfig(workflowName + "_Job_" + i))
.thenReturn(jobConfig);
}
JobContext inProgressJobContext = mock(JobContext.class);
JobContext failedJobContext = mock(JobContext.class);
when(failedJobContext.getFinishTime()).thenReturn(System.currentTimeMillis() - 1L);
when(inProgressJobContext.getFinishTime()).thenReturn((long) WorkflowContext.UNFINISHED);
when(workflowControllerDataProvider.getJobContext(workflowName + "_Job_0"))
.thenReturn(failedJobContext);
when(workflowControllerDataProvider.getJobContext(workflowName + "_Job_1")).thenReturn(null);
when(workflowControllerDataProvider.getJobContext(workflowName + "_Job_2"))
.thenReturn(failedJobContext);
when(workflowControllerDataProvider.getJobContext(workflowName + "_Job_3"))
.thenReturn(inProgressJobContext);
when(workflowControllerDataProvider.getJobContext(workflowName + "_Job_4"))
.thenReturn(failedJobContext);
when(workflowControllerDataProvider.getJobContext(workflowName + "_Job_5")).thenReturn(null);
when(workflowControllerDataProvider.getJobContext(workflowName + "_Job_6"))
.thenReturn(inProgressJobContext);
when(workflowControllerDataProvider.getJobContext(workflowName + "_Job_7")).thenReturn(null);
when(workflowControllerDataProvider.getJobContext(workflowName + "_Job_8")).thenReturn(null);
when(workflowControllerDataProvider.getJobContext(workflowName + "_Job_9"))
.thenReturn(inProgressJobContext);
Set<String> expectedJobs = new HashSet<>();
expectedJobs.add(workflowName + "_Job_0");
expectedJobs.add(workflowName + "_Job_1");
expectedJobs.add(workflowName + "_Job_2");
expectedJobs.add(workflowName + "_Job_4");
expectedJobs.add(workflowName + "_Job_5");
expectedJobs.add(workflowName + "_Job_7");
expectedJobs.add(workflowName + "_Job_8");
Assert.assertEquals(TaskUtil
.getExpiredJobsFromCache(workflowControllerDataProvider, workflow.getWorkflowConfig(),
workflowContext, _manager), expectedJobs);
}
@Test
public void testGetTaskThreadPoolSize() {
MockParticipantManager anyParticipantManager = _participants[0];
InstanceConfig instanceConfig =
InstanceConfig.toInstanceConfig(anyParticipantManager.getInstanceName());
instanceConfig.setTargetTaskThreadPoolSize(TEST_TARGET_TASK_THREAD_POOL_SIZE);
anyParticipantManager.getConfigAccessor()
.setInstanceConfig(anyParticipantManager.getClusterName(),
anyParticipantManager.getInstanceName(), instanceConfig);
ClusterConfig clusterConfig = new ClusterConfig(anyParticipantManager.getClusterName());
clusterConfig.setGlobalTargetTaskThreadPoolSize(TEST_TARGET_TASK_THREAD_POOL_SIZE + 1);
anyParticipantManager.getConfigAccessor()
.setClusterConfig(anyParticipantManager.getClusterName(), clusterConfig);
Assert.assertEquals(TaskUtil.getTargetThreadPoolSize(anyParticipantManager.getZkClient(),
anyParticipantManager.getClusterName(), anyParticipantManager.getInstanceName()),
TEST_TARGET_TASK_THREAD_POOL_SIZE);
}
@Test(dependsOnMethods = "testGetTaskThreadPoolSize")
public void testGetTaskThreadPoolSizeInstanceConfigUndefined() {
MockParticipantManager anyParticipantManager = _participants[0];
InstanceConfig instanceConfig =
InstanceConfig.toInstanceConfig(anyParticipantManager.getInstanceName());
anyParticipantManager.getConfigAccessor()
.setInstanceConfig(anyParticipantManager.getClusterName(),
anyParticipantManager.getInstanceName(), instanceConfig);
ClusterConfig clusterConfig = new ClusterConfig(anyParticipantManager.getClusterName());
clusterConfig.setGlobalTargetTaskThreadPoolSize(TEST_TARGET_TASK_THREAD_POOL_SIZE);
anyParticipantManager.getConfigAccessor()
.setClusterConfig(anyParticipantManager.getClusterName(), clusterConfig);
Assert.assertEquals(TaskUtil.getTargetThreadPoolSize(anyParticipantManager.getZkClient(),
anyParticipantManager.getClusterName(), anyParticipantManager.getInstanceName()),
TEST_TARGET_TASK_THREAD_POOL_SIZE);
}
@Test(dependsOnMethods = "testGetTaskThreadPoolSizeInstanceConfigUndefined")
public void testGetTaskThreadPoolSizeInstanceConfigDoesNotExist() {
MockParticipantManager anyParticipantManager = _participants[0];
HelixDataAccessor helixDataAccessor = anyParticipantManager.getHelixDataAccessor();
helixDataAccessor.removeProperty(
helixDataAccessor.keyBuilder().instanceConfig(anyParticipantManager.getInstanceName()));
ClusterConfig clusterConfig = new ClusterConfig(anyParticipantManager.getClusterName());
clusterConfig.setGlobalTargetTaskThreadPoolSize(TEST_TARGET_TASK_THREAD_POOL_SIZE);
anyParticipantManager.getConfigAccessor()
.setClusterConfig(anyParticipantManager.getClusterName(), clusterConfig);
Assert.assertEquals(TaskUtil.getTargetThreadPoolSize(anyParticipantManager.getZkClient(),
anyParticipantManager.getClusterName(), anyParticipantManager.getInstanceName()),
TEST_TARGET_TASK_THREAD_POOL_SIZE);
}
@Test(dependsOnMethods = "testGetTaskThreadPoolSizeInstanceConfigDoesNotExist")
public void testGetTaskThreadPoolSizeClusterConfigUndefined() {
MockParticipantManager anyParticipantManager = _participants[0];
ClusterConfig clusterConfig = new ClusterConfig(anyParticipantManager.getClusterName());
anyParticipantManager.getConfigAccessor()
.setClusterConfig(anyParticipantManager.getClusterName(), clusterConfig);
Assert.assertEquals(TaskUtil.getTargetThreadPoolSize(anyParticipantManager.getZkClient(),
anyParticipantManager.getClusterName(), anyParticipantManager.getInstanceName()),
TaskConstants.DEFAULT_TASK_THREAD_POOL_SIZE);
}
@Test(dependsOnMethods = "testGetTaskThreadPoolSizeClusterConfigUndefined", expectedExceptions = HelixException.class)
public void testGetTaskThreadPoolSizeClusterConfigDoesNotExist() {
MockParticipantManager anyParticipantManager = _participants[0];
HelixDataAccessor helixDataAccessor = anyParticipantManager.getHelixDataAccessor();
helixDataAccessor.removeProperty(helixDataAccessor.keyBuilder().clusterConfig());
TaskUtil.getTargetThreadPoolSize(anyParticipantManager.getZkClient(),
anyParticipantManager.getClusterName(), anyParticipantManager.getInstanceName());
}
}
| 9,837 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/TestTaskStateModelFactory.java | package org.apache.helix.task;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.apache.helix.HelixManager;
import org.apache.helix.SystemPropertyKeys;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.integration.task.TaskTestBase;
import org.apache.helix.manager.zk.ZKHelixManager;
import org.apache.helix.mock.MockManager;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.msdcommon.constant.MetadataStoreRoutingConstants;
import org.apache.helix.msdcommon.mock.MockMetadataStoreDirectoryServer;
import org.apache.helix.zookeeper.api.client.RealmAwareZkClient;
import org.apache.helix.zookeeper.constant.RoutingDataReaderType;
import org.apache.helix.zookeeper.impl.client.FederatedZkClient;
import org.apache.helix.zookeeper.impl.factory.SharedZkClientFactory;
import org.apache.helix.zookeeper.routing.RoutingDataManager;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Mockito.when;
public class TestTaskStateModelFactory extends TaskTestBase {
// This value has to be different from the default value to verify correctness
private static final int TEST_TARGET_TASK_THREAD_POOL_SIZE =
TaskConstants.DEFAULT_TASK_THREAD_POOL_SIZE + 1;
@Test
public void testZkClientCreationMultiZk() throws Exception {
MockParticipantManager anyParticipantManager = _participants[0];
InstanceConfig instanceConfig =
InstanceConfig.toInstanceConfig(anyParticipantManager.getInstanceName());
instanceConfig.setTargetTaskThreadPoolSize(TEST_TARGET_TASK_THREAD_POOL_SIZE);
anyParticipantManager.getConfigAccessor()
.setInstanceConfig(anyParticipantManager.getClusterName(),
anyParticipantManager.getInstanceName(), instanceConfig);
// Start a msds server
// TODO: Refactor all MSDS_SERVER_ENDPOINT creation in system property to one place.
// Any test that modifies MSDS_SERVER_ENDPOINT system property and accesses
// HttpRoutingDataReader (ex. TestMultiZkHelixJavaApis and this test) will cause the
// MSDS_SERVER_ENDPOINT system property to be recorded as final in HttpRoutingDataReader; that
// means any test class that satisfies the aforementioned condition and is executed first gets
// to "decide" the default msds endpoint. The only workaround is for all these test classes to
// use the same default msds endpoint.
final String msdsHostName = "localhost";
final int msdsPort = 11117;
final String msdsNamespace = "multiZkTest";
Map<String, Collection<String>> routingData = new HashMap<>();
routingData
.put(ZK_ADDR, Collections.singletonList("/" + anyParticipantManager.getClusterName()));
MockMetadataStoreDirectoryServer msds =
new MockMetadataStoreDirectoryServer(msdsHostName, msdsPort, msdsNamespace, routingData);
msds.startServer();
// Save previously-set system configs
String prevMultiZkEnabled = System.getProperty(SystemPropertyKeys.MULTI_ZK_ENABLED);
String prevMsdsServerEndpoint =
System.getProperty(MetadataStoreRoutingConstants.MSDS_SERVER_ENDPOINT_KEY);
// Turn on multiZk mode in System config
System.setProperty(SystemPropertyKeys.MULTI_ZK_ENABLED, "true");
// MSDS endpoint: http://localhost:11117/admin/v2/namespaces/testTaskStateModelFactory
String testMSDSServerEndpointKey =
"http://" + msdsHostName + ":" + msdsPort + "/admin/v2/namespaces/" + msdsNamespace;
System.setProperty(MetadataStoreRoutingConstants.MSDS_SERVER_ENDPOINT_KEY,
testMSDSServerEndpointKey);
RoutingDataManager.getInstance().reset(true);
verifyThreadPoolSizeAndZkClientClass(anyParticipantManager, TEST_TARGET_TASK_THREAD_POOL_SIZE,
FederatedZkClient.class);
// Turn off multiZk mode in System config, and remove zkAddress
System.setProperty(SystemPropertyKeys.MULTI_ZK_ENABLED, "false");
ZKHelixManager participantManager = Mockito.spy(anyParticipantManager);
when(participantManager.getMetadataStoreConnectionString()).thenReturn(null);
verifyThreadPoolSizeAndZkClientClass(participantManager, TEST_TARGET_TASK_THREAD_POOL_SIZE,
FederatedZkClient.class);
// Test no connection config case
when(participantManager.getRealmAwareZkConnectionConfig()).thenReturn(null);
verifyThreadPoolSizeAndZkClientClass(participantManager, TEST_TARGET_TASK_THREAD_POOL_SIZE,
FederatedZkClient.class);
// Remove server endpoint key and use connection config to specify endpoint
System.clearProperty(SystemPropertyKeys.MSDS_SERVER_ENDPOINT_KEY);
RealmAwareZkClient.RealmAwareZkConnectionConfig connectionConfig =
new RealmAwareZkClient.RealmAwareZkConnectionConfig.Builder()
.setRealmMode(RealmAwareZkClient.RealmMode.MULTI_REALM)
.setRoutingDataSourceEndpoint(testMSDSServerEndpointKey)
.setRoutingDataSourceType(RoutingDataReaderType.HTTP.name()).build();
when(participantManager.getRealmAwareZkConnectionConfig()).thenReturn(connectionConfig);
verifyThreadPoolSizeAndZkClientClass(participantManager, TEST_TARGET_TASK_THREAD_POOL_SIZE,
FederatedZkClient.class);
// Restore system properties
if (prevMultiZkEnabled == null) {
System.clearProperty(SystemPropertyKeys.MULTI_ZK_ENABLED);
} else {
System.setProperty(SystemPropertyKeys.MULTI_ZK_ENABLED, prevMultiZkEnabled);
}
if (prevMsdsServerEndpoint == null) {
System.clearProperty(SystemPropertyKeys.MSDS_SERVER_ENDPOINT_KEY);
} else {
System.setProperty(SystemPropertyKeys.MSDS_SERVER_ENDPOINT_KEY, prevMsdsServerEndpoint);
}
msds.stopServer();
}
@Test(dependsOnMethods = "testZkClientCreationMultiZk")
public void testZkClientCreationSingleZk() {
MockParticipantManager anyParticipantManager = _participants[0];
// Save previously-set system configs
String prevMultiZkEnabled = System.getProperty(SystemPropertyKeys.MULTI_ZK_ENABLED);
// Turn off multiZk mode in System config
System.setProperty(SystemPropertyKeys.MULTI_ZK_ENABLED, "false");
verifyThreadPoolSizeAndZkClientClass(anyParticipantManager, TEST_TARGET_TASK_THREAD_POOL_SIZE,
SharedZkClientFactory.InnerSharedZkClient.class);
// Restore system properties
if (prevMultiZkEnabled == null) {
System.clearProperty(SystemPropertyKeys.MULTI_ZK_ENABLED);
} else {
System.setProperty(SystemPropertyKeys.MULTI_ZK_ENABLED, prevMultiZkEnabled);
}
}
@Test(dependsOnMethods = "testZkClientCreationSingleZk",
expectedExceptions = UnsupportedOperationException.class)
public void testZkClientCreationNonZKManager() {
TaskStateModelFactory.createZkClient(new MockManager());
}
private void verifyThreadPoolSizeAndZkClientClass(HelixManager helixManager, int threadPoolSize,
Class<?> zkClientClass) {
RealmAwareZkClient zkClient = TaskStateModelFactory.createZkClient(helixManager);
try {
Assert.assertEquals(TaskUtil.getTargetThreadPoolSize(zkClient, helixManager.getClusterName(),
helixManager.getInstanceName()), threadPoolSize);
Assert.assertEquals(zkClient.getClass(), zkClientClass);
} finally {
zkClient.close();
}
}
}
| 9,838 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/TestJobConfigValidation.java | package org.apache.helix.task;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.testng.annotations.Test;
public class TestJobConfigValidation {
@Test public void testJobConfigValidation() {
new JobConfig.Builder().setCommand("Dummy").setNumberOfTasks(123).setWorkflow("Workflow")
.build();
}
@Test(expectedExceptions = IllegalArgumentException.class)
public void testJobConfigWithoutAnyTaskSet() {
new JobConfig.Builder().setWorkflow("Workflow").build();
}
@Test(expectedExceptions = IllegalArgumentException.class)
public void testJobConfigCommandWithoutNumOfTask() {
new JobConfig.Builder().setWorkflow("Workflow").setCommand("Dummy").build();
}
}
| 9,839 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/TestGetLastScheduledTaskExecInfo.java | package org.apache.helix.task;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import org.apache.helix.integration.task.MockTask;
import org.apache.helix.integration.task.TaskTestBase;
import org.apache.helix.integration.task.TaskTestUtil;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import org.apache.helix.TestHelper;
public class TestGetLastScheduledTaskExecInfo extends TaskTestBase {
private final static String TASK_START_TIME_KEY = "START_TIME";
private final static long INVALID_TIMESTAMP = -1L;
private static final long SHORT_EXECUTION_TIME = 10L;
private static final long LONG_EXECUTION_TIME = 99999999L;
private static final long DELETE_DELAY = 30000L;
@BeforeClass
public void beforeClass() throws Exception {
setSingleTestEnvironment();
super.beforeClass();
}
@Test
public void testGetLastScheduledTaskExecInfo() throws Exception {
// Start new queue that has one job with long tasks and record start time of the tasks
String queueName = TestHelper.getTestMethodName();
// Create and start new queue that has one job with 5 tasks.
// Each task has a long execution time.
// Since NumConcurrentTasksPerInstance is equal to 2, here we wait until two tasks have
// been scheduled (expectedScheduledTime = 2).
List<Long> startTimesWithStuckTasks = setupTasks(queueName, 5, LONG_EXECUTION_TIME, 2);
// Wait till the job is in progress
_driver.pollForJobState(queueName, queueName + "_job_0", TaskState.IN_PROGRESS);
// First two must be -1 (two tasks are stuck), and API call must return the last value (most
// recent timestamp)
Assert.assertEquals(startTimesWithStuckTasks.get(0).longValue(), INVALID_TIMESTAMP);
Assert.assertEquals(startTimesWithStuckTasks.get(1).longValue(), INVALID_TIMESTAMP);
// Workflow will be stuck so its partition state will be Running
boolean hasQueueReachedDesiredState = TestHelper.verify(() -> {
Long lastScheduledTaskTs = _driver.getLastScheduledTaskTimestamp(queueName);
TaskExecutionInfo execInfo = _driver.getLastScheduledTaskExecutionInfo(queueName);
return (execInfo.getJobName().equals(queueName + "_job_0")
&& execInfo.getTaskPartitionState() == TaskPartitionState.RUNNING
&& execInfo.getStartTimeStamp().equals(lastScheduledTaskTs)
&& startTimesWithStuckTasks.get(4).equals(lastScheduledTaskTs));
}, TestHelper.WAIT_DURATION);
Assert.assertTrue(hasQueueReachedDesiredState);
// Stop and delete the queue
_driver.stop(queueName);
TestHelper.verify(() -> {
WorkflowContext workflowContext = _driver.getWorkflowContext(queueName);
return workflowContext.getWorkflowState().equals(TaskState.STOPPED);
}, TestHelper.WAIT_DURATION);
_driver.deleteAndWaitForCompletion(queueName, DELETE_DELAY);
// Start the new queue with new task configuration.
// Create and start new queue that has one job with 4 tasks.
// Each task has a short execution time. In the setupTasks we wait until all of the tasks have
// been scheduled (expectedScheduledTime = 4).
List<Long> startTimesFastTasks = setupTasks(queueName, 4, SHORT_EXECUTION_TIME, 4);
// Wait till the job is in progress or completed. Since the tasks have short execution time, we
// wait for either IN_PROGRESS or COMPLETED states
_driver.pollForJobState(queueName, queueName + "_job_0", TaskState.IN_PROGRESS,
TaskState.COMPLETED);
hasQueueReachedDesiredState = TestHelper.verify(() -> {
Long lastScheduledTaskTs = _driver.getLastScheduledTaskTimestamp(queueName);
TaskExecutionInfo execInfo = _driver.getLastScheduledTaskExecutionInfo(queueName);
return (execInfo.getJobName().equals(queueName + "_job_0")
&& execInfo.getTaskPartitionState() == TaskPartitionState.COMPLETED
&& execInfo.getStartTimeStamp().equals(lastScheduledTaskTs)
&& startTimesFastTasks.get(startTimesFastTasks.size() - 1).equals(lastScheduledTaskTs));
}, TestHelper.WAIT_DURATION);
Assert.assertTrue(hasQueueReachedDesiredState);
}
/**
* Helper method for gathering start times for all tasks. Returns start times in ascending order.
* Null start times
* are recorded as 0.
* @param jobQueueName name of the queue
* @param numTasks number of tasks to schedule
* @param taskTimeout duration of each task to be run for
* @param expectedScheduledTasks expected number of tasks that should be scheduled
* @return list of timestamps for all tasks in ascending order
* @throws Exception
*/
private List<Long> setupTasks(String jobQueueName, int numTasks, long taskTimeout,
int expectedScheduledTasks) throws Exception {
// Create a queue
JobQueue.Builder queueBuilder = TaskTestUtil.buildJobQueue(jobQueueName);
// Create and enqueue a job
JobConfig.Builder jobConfig = new JobConfig.Builder();
// Create tasks
List<TaskConfig> taskConfigs = new ArrayList<>();
for (int i = 0; i < numTasks; i++) {
taskConfigs
.add(new TaskConfig.Builder().setTaskId("task_" + i).setCommand(MockTask.TASK_COMMAND)
.addConfig(MockTask.JOB_DELAY, String.valueOf(taskTimeout)).build());
}
// Run up to 2 tasks at a time
jobConfig.addTaskConfigs(taskConfigs).setNumConcurrentTasksPerInstance(2);
queueBuilder.enqueueJob("job_0", jobConfig);
_driver.start(queueBuilder.build());
_driver.pollForWorkflowState(jobQueueName, TaskState.IN_PROGRESS);
boolean haveExpectedNumberOfTasksScheduled = TestHelper.verify(() -> {
int scheduleTask = 0;
WorkflowConfig workflowConfig =
TaskUtil.getWorkflowConfig(_manager.getHelixDataAccessor(), jobQueueName);
for (String job : workflowConfig.getJobDag().getAllNodes()) {
JobContext jobContext = _driver.getJobContext(job);
Set<Integer> allPartitions = jobContext.getPartitionSet();
for (Integer partition : allPartitions) {
String timestamp = jobContext.getMapField(partition).get(TASK_START_TIME_KEY);
if (timestamp != null) {
scheduleTask++;
}
}
}
return (scheduleTask == expectedScheduledTasks);
}, TestHelper.WAIT_DURATION);
Assert.assertTrue(haveExpectedNumberOfTasksScheduled);
// Pull jobContexts and look at the start times
List<Long> startTimes = new ArrayList<>();
WorkflowConfig workflowConfig =
TaskUtil.getWorkflowConfig(_manager.getHelixDataAccessor(), jobQueueName);
for (String job : workflowConfig.getJobDag().getAllNodes()) {
JobContext jobContext = _driver.getJobContext(job);
Set<Integer> allPartitions = jobContext.getPartitionSet();
for (Integer partition : allPartitions) {
String timestamp = jobContext.getMapField(partition).get(TASK_START_TIME_KEY);
if (timestamp == null) {
startTimes.add(INVALID_TIMESTAMP);
} else {
startTimes.add(Long.parseLong(timestamp));
}
}
}
Collections.sort(startTimes);
return startTimes;
}
}
| 9,840 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/TestCurrentInstanceToTaskAssignmentsWithPendingMessage.java | package org.apache.helix.task;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import static org.mockito.Mockito.*;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.SortedSet;
import org.apache.helix.controller.stages.CurrentStateOutput;
import org.apache.helix.model.Message;
import org.apache.helix.model.Partition;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestCurrentInstanceToTaskAssignmentsWithPendingMessage {
/**
* This is a unit test that tests that all task partitions with currentState or pending message is
* added to the result of getCurrentInstanceToTaskAssignments method.
*/
@Test
public void testCurrentInstanceToTaskAssignmentsWithPendingMessage() {
Random random = new Random();
String jobName = "job";
String nodeName = "localhost";
int numTasks = 100;
// Create an Iterable of LiveInstances
Collection<String> liveInstances = new HashSet<>();
liveInstances.add("localhost");
// Create allTaskPartitions
Set<Integer> allTaskPartitions = new HashSet<>();
// Create a mock CurrentStateOutput
CurrentStateOutput currentStateOutput = mock(CurrentStateOutput.class);
// Generate a CurrentStateMap and PendingMessageMap
Map<Partition, Map<String, String>> currentStateMap = new HashMap<>();
Map<Partition, Map<String, Message>> pendingMessageMap = new HashMap<>();
int tasksWithCurrentStateOnly = 0;
int tasksWithCurrentStateAndPendingMessage = 0;
int tasksWithPendingMessageOnly = 0;
List<String> states =
Arrays.asList(TaskPartitionState.INIT.name(), TaskPartitionState.RUNNING.name(),
TaskPartitionState.TIMED_OUT.name(), TaskPartitionState.TASK_ERROR.name(),
TaskPartitionState.COMPLETED.name(), TaskPartitionState.STOPPED.name(),
TaskPartitionState.TASK_ABORTED.name(), TaskPartitionState.DROPPED.name());
for (int i = 0; i < numTasks; i++) {
allTaskPartitions.add(i);
Partition task = new Partition(jobName + "_" + i);
currentStateMap.put(task, new HashMap<>());
pendingMessageMap.put(task, new HashMap<>());
String currentState = states.get(random.nextInt(states.size()));
Message message = new Message(Message.MessageType.STATE_TRANSITION, "12345");
message.setToState(states.get(random.nextInt(states.size())));
message.setFromState(states.get(random.nextInt(states.size())));
int randInt = random.nextInt(4);
if (randInt == 0) {
tasksWithCurrentStateOnly = tasksWithCurrentStateOnly + 1;
currentStateMap.get(task).put(nodeName, currentState);
} else if (randInt == 1) {
tasksWithCurrentStateAndPendingMessage = tasksWithCurrentStateAndPendingMessage + 1;
currentStateMap.get(task).put(nodeName, currentState);
pendingMessageMap.get(task).put(nodeName, message);
} else if (randInt == 2) {
tasksWithPendingMessageOnly = tasksWithPendingMessageOnly + 1;
pendingMessageMap.get(task).put(nodeName, message);
}
}
when(currentStateOutput.getCurrentStateMap(jobName)).thenReturn(currentStateMap);
when(currentStateOutput.getPendingMessageMap(jobName)).thenReturn(pendingMessageMap);
// Create an empty tasksToDrop
Map<String, Set<Integer>> tasksToDrop = new HashMap<>();
// Call the static method we are testing
Map<String, SortedSet<Integer>> result = JobDispatcher.getCurrentInstanceToTaskAssignments(
liveInstances, currentStateOutput, jobName, tasksToDrop);
Assert.assertEquals(result.get(nodeName).size(), (tasksWithCurrentStateOnly
+ tasksWithCurrentStateAndPendingMessage + tasksWithPendingMessageOnly));
}
}
| 9,841 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/TestWorkflowCreation.java | package org.apache.helix.task;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.HelixException;
import org.apache.helix.TestHelper;
import org.apache.helix.integration.task.MockTask;
import org.apache.helix.integration.task.TaskTestUtil;
import org.apache.helix.integration.task.WorkflowGenerator;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestWorkflowCreation extends TaskSynchronizedTestBase {
/**
* Test that submitting workflows of the same name throws an exception.
*/
@Test(expectedExceptions = HelixException.class)
public void testWorkflowCreationNoDuplicates() {
String queue = TestHelper.getTestMethodName();
JobQueue.Builder builder = TaskTestUtil.buildJobQueue(queue);
JobConfig.Builder jobBuilder =
new JobConfig.Builder().setTargetResource(WorkflowGenerator.DEFAULT_TGT_DB)
.setCommand(MockTask.TASK_COMMAND).setMaxAttemptsPerTask(2)
.setJobCommandConfigMap(WorkflowGenerator.DEFAULT_COMMAND_CONFIG).setExpiry(1L);
for (int i = 0; i < 8; i++) {
builder.enqueueJob("JOB" + i, jobBuilder);
}
// First try
_driver.createQueue(builder.build());
Assert.assertNotNull(_driver.getWorkflowConfig(queue));
// Second try (this should throw an exception)
_driver.createQueue(builder.build());
}
}
| 9,842 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/TestAssignableInstanceManagerControllerSwitch.java | package org.apache.helix.task;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.TestHelper;
import org.apache.helix.common.caches.TaskDataCache;
import org.apache.helix.integration.task.MockTask;
import org.apache.helix.integration.task.TaskTestBase;
import org.apache.helix.integration.task.WorkflowGenerator;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.ResourceConfig;
import org.apache.helix.spectator.RoutingTableProvider;
import org.apache.helix.task.assigner.AssignableInstance;
import org.apache.helix.task.assigner.TaskAssignResult;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestAssignableInstanceManagerControllerSwitch extends TaskTestBase {
private int numJobs = 2;
private int numTasks = 3;
/**
* Tests the duality of two AssignableInstanceManager instances to model the
* situation where there is a Controller switch and AssignableInstanceManager is
* built back from scratch.
* @throws InterruptedException
*/
@Test
public void testControllerSwitch() throws InterruptedException {
setupAndRunJobs();
Map<String, LiveInstance> liveInstanceMap = new HashMap<>();
Map<String, InstanceConfig> instanceConfigMap = new HashMap<>();
RoutingTableProvider routingTableProvider = new RoutingTableProvider(_manager);
Collection<LiveInstance> liveInstances = routingTableProvider.getLiveInstances();
for (LiveInstance liveInstance : liveInstances) {
String instanceName = liveInstance.getInstanceName();
liveInstanceMap.put(instanceName, liveInstance);
instanceConfigMap.put(instanceName,
_gSetupTool.getClusterManagementTool().getInstanceConfig(CLUSTER_NAME, instanceName));
}
// Get ClusterConfig
ClusterConfig clusterConfig = _manager.getConfigAccessor().getClusterConfig(CLUSTER_NAME);
// Initialize TaskDataCache
HelixDataAccessor accessor = _manager.getHelixDataAccessor();
TaskDataCache taskDataCache = new TaskDataCache(CLUSTER_NAME);
Map<String, ResourceConfig> resourceConfigMap =
accessor.getChildValuesMap(accessor.keyBuilder().resourceConfigs(), true);
// Wait for the job pipeline
Thread.sleep(1000);
taskDataCache.refresh(accessor, resourceConfigMap);
// Create prev manager and build
AssignableInstanceManager prevAssignableInstanceManager = new AssignableInstanceManager();
prevAssignableInstanceManager.buildAssignableInstances(clusterConfig, taskDataCache,
liveInstanceMap, instanceConfigMap);
Map<String, AssignableInstance> prevAssignableInstanceMap =
new HashMap<>(prevAssignableInstanceManager.getAssignableInstanceMap());
Map<String, TaskAssignResult> prevTaskAssignResultMap =
new HashMap<>(prevAssignableInstanceManager.getTaskAssignResultMap());
// Generate a new AssignableInstanceManager
taskDataCache.refresh(accessor, resourceConfigMap);
AssignableInstanceManager newAssignableInstanceManager = new AssignableInstanceManager();
newAssignableInstanceManager.buildAssignableInstances(clusterConfig, taskDataCache,
liveInstanceMap, instanceConfigMap);
Map<String, AssignableInstance> newAssignableInstanceMap =
new HashMap<>(newAssignableInstanceManager.getAssignableInstanceMap());
Map<String, TaskAssignResult> newTaskAssignResultMap =
new HashMap<>(newAssignableInstanceManager.getTaskAssignResultMap());
// Compare prev and new - they should match up exactly
Assert.assertEquals(prevAssignableInstanceMap.size(), newAssignableInstanceMap.size());
Assert.assertEquals(prevTaskAssignResultMap.size(), newTaskAssignResultMap.size());
for (Map.Entry<String, AssignableInstance> assignableInstanceEntry : newAssignableInstanceMap
.entrySet()) {
String instance = assignableInstanceEntry.getKey();
Assert.assertEquals(prevAssignableInstanceMap.get(instance).getCurrentAssignments(),
assignableInstanceEntry.getValue().getCurrentAssignments());
Assert.assertEquals(prevAssignableInstanceMap.get(instance).getTotalCapacity(),
assignableInstanceEntry.getValue().getTotalCapacity());
Assert.assertEquals(prevAssignableInstanceMap.get(instance).getUsedCapacity(),
assignableInstanceEntry.getValue().getUsedCapacity());
}
for (Map.Entry<String, TaskAssignResult> taskAssignResultEntry : newTaskAssignResultMap
.entrySet()) {
String taskID = taskAssignResultEntry.getKey();
Assert.assertEquals(prevTaskAssignResultMap.get(taskID).toString(),
taskAssignResultEntry.getValue().toString());
}
// Shut down RoutingTableProvider so periodic update gets shut down
routingTableProvider.shutdown();
}
private void setupAndRunJobs() {
// Create a workflow with some long-running jobs in progress
String workflowName = TestHelper.getTestMethodName();
Workflow.Builder builder = new Workflow.Builder(workflowName);
for (int i = 0; i < numJobs; i++) {
List<TaskConfig> taskConfigs = new ArrayList<>();
for (int j = 0; j < numTasks; j++) {
String taskID = "JOB_" + i + "_TASK_" + j;
TaskConfig.Builder taskConfigBuilder = new TaskConfig.Builder();
taskConfigBuilder.setTaskId(taskID).setCommand(MockTask.TASK_COMMAND)
.addConfig(MockTask.JOB_DELAY, "120000");
taskConfigs.add(taskConfigBuilder.build());
}
String jobName = "JOB_" + i;
// Long-running job
JobConfig.Builder jobBuilder =
new JobConfig.Builder().setCommand(MockTask.TASK_COMMAND).setMaxAttemptsPerTask(10000)
.setJobCommandConfigMap(WorkflowGenerator.DEFAULT_COMMAND_CONFIG)
.addTaskConfigs(taskConfigs).setIgnoreDependentJobFailure(true)
.setFailureThreshold(100000)
.setJobCommandConfigMap(ImmutableMap.of(MockTask.JOB_DELAY, "120000"));
builder.addJob(jobName, jobBuilder);
}
// Start the workflow
_driver.start(builder.build());
}
}
| 9,843 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/TestFixedTargetedTaskAssignmentCalculator.java | package org.apache.helix.task;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedSet;
import org.apache.helix.common.caches.TaskDataCache;
import org.apache.helix.controller.stages.CurrentStateOutput;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Partition;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
/**
* This unit test makes sure that FixedTargetedTaskAssignmentCalculator makes correct decision for
* targeted jobs
*/
public class TestFixedTargetedTaskAssignmentCalculator {
private static final String CLUSTER_NAME = "TestCluster";
private static final String INSTANCE_PREFIX = "Instance_";
private static final int NUM_PARTICIPANTS = 3;
private static final String WORKFLOW_NAME = "TestWorkflow";
private static final String JOB_NAME = "TestJob";
private static final String PARTITION_NAME = "0";
private static final String TARGET_PARTITION_NAME = "0";
private static final int PARTITION_ID = 0;
private static final String TARGET_RESOURCES = "TestDB";
private Map<String, LiveInstance> _liveInstances;
private Map<String, InstanceConfig> _instanceConfigs;
private ClusterConfig _clusterConfig;
private AssignableInstanceManager _assignableInstanceManager;
@BeforeClass
public void beforeClass() {
// Populate live instances and their corresponding instance configs
_liveInstances = new HashMap<>();
_instanceConfigs = new HashMap<>();
_clusterConfig = new ClusterConfig(CLUSTER_NAME);
for (int i = 0; i < NUM_PARTICIPANTS; i++) {
String instanceName = INSTANCE_PREFIX + i;
LiveInstance liveInstance = new LiveInstance(instanceName);
InstanceConfig instanceConfig = new InstanceConfig(instanceName);
_liveInstances.put(instanceName, liveInstance);
_instanceConfigs.put(instanceName, instanceConfig);
}
}
/**
* Test FixedTargetTaskAssignmentCalculator and make sure that if a job has been assigned
* before and target partition is still on the same instance and in RUNNING state,
* we do not make new assignment for that task.
*/
@Test
public void testFixedTargetTaskAssignmentCalculatorSameInstanceRunningTask() {
_assignableInstanceManager = new AssignableInstanceManager();
_assignableInstanceManager.buildAssignableInstances(_clusterConfig,
new TaskDataCache("CLUSTER_NAME"), _liveInstances, _instanceConfigs);
// Preparing the inputs
String masterInstance = INSTANCE_PREFIX + 1;
String slaveInstance1 = INSTANCE_PREFIX + 2;
String slaveInstance2 = INSTANCE_PREFIX + 3;
CurrentStateOutput currentStateOutput = prepareCurrentState(TaskPartitionState.RUNNING,
masterInstance, slaveInstance1, slaveInstance2);
List<String> instances =
new ArrayList<String>(Arrays.asList(masterInstance, slaveInstance1, slaveInstance2));
JobConfig jobConfig = prepareJobConfig();
JobContext jobContext = prepareJobContext(TaskPartitionState.RUNNING, masterInstance);
WorkflowConfig workflowConfig = prepareWorkflowConfig();
WorkflowContext workflowContext = prepareWorkflowContext();
Set<Integer> partitionSet = new HashSet<>(Collections.singletonList(PARTITION_ID));
Map<String, IdealState> idealStates =
prepareIdealStates(masterInstance, slaveInstance1, slaveInstance2);
TaskAssignmentCalculator taskAssignmentCal =
new FixedTargetTaskAssignmentCalculator(_assignableInstanceManager);
Map<String, SortedSet<Integer>> result =
taskAssignmentCal.getTaskAssignment(currentStateOutput, instances, jobConfig, jobContext,
workflowConfig, workflowContext, partitionSet, idealStates);
Assert.assertEquals(result.get(masterInstance).size(),0);
Assert.assertEquals(result.get(slaveInstance1).size(),0);
Assert.assertEquals(result.get(slaveInstance2).size(),0);
}
/**
* Test FixedTargetTaskAssignmentCalculator and make sure that if a job has been assigned
* before and target partition is still on the same instance and in INIT state,
* we do not make new assignment for that task.
*/
@Test
public void testFixedTargetTaskAssignmentCalculatorSameInstanceInitTask() {
_assignableInstanceManager = new AssignableInstanceManager();
_assignableInstanceManager.buildAssignableInstances(_clusterConfig,
new TaskDataCache("CLUSTER_NAME"), _liveInstances, _instanceConfigs);
// Preparing the inputs
String masterInstance = INSTANCE_PREFIX + 1;
String slaveInstance1 = INSTANCE_PREFIX + 2;
String slaveInstance2 = INSTANCE_PREFIX + 3;
// Preparing the inputs
CurrentStateOutput currentStateOutput = prepareCurrentState(TaskPartitionState.INIT,
masterInstance, slaveInstance1, slaveInstance2);
List<String> instances =
new ArrayList<String>(Arrays.asList(masterInstance, slaveInstance1, slaveInstance2));
JobConfig jobConfig = prepareJobConfig();
JobContext jobContext = prepareJobContext(TaskPartitionState.INIT, masterInstance);
WorkflowConfig workflowConfig = prepareWorkflowConfig();
WorkflowContext workflowContext = prepareWorkflowContext();
Set<Integer> partitionSet = new HashSet<>(Collections.singletonList(PARTITION_ID));
Map<String, IdealState> idealStates =
prepareIdealStates(masterInstance, slaveInstance1, slaveInstance2);
TaskAssignmentCalculator taskAssignmentCal =
new FixedTargetTaskAssignmentCalculator(_assignableInstanceManager);
Map<String, SortedSet<Integer>> result =
taskAssignmentCal.getTaskAssignment(currentStateOutput, instances, jobConfig, jobContext,
workflowConfig, workflowContext, partitionSet, idealStates);
Assert.assertEquals(result.get(masterInstance).size(),0);
Assert.assertEquals(result.get(slaveInstance1).size(),0);
Assert.assertEquals(result.get(slaveInstance2).size(),0);
}
/**
* Test FixedTargetTaskAssignmentCalculator and make sure that if a job has been assigned
* before and target partition has moved to another instance, controller assign the task to
* new/correct instance.
*/
@Test
public void testFixedTargetTaskAssignmentCalculatorDifferentInstance() {
_assignableInstanceManager = new AssignableInstanceManager();
_assignableInstanceManager.buildAssignableInstances(_clusterConfig,
new TaskDataCache("CLUSTER_NAME"), _liveInstances, _instanceConfigs);
// Preparing the inputs
String masterInstance = INSTANCE_PREFIX + 2;
String slaveInstance1 = INSTANCE_PREFIX + 1;
String slaveInstance2 = INSTANCE_PREFIX + 3;
// Preparing the inputs
CurrentStateOutput currentStateOutput = prepareCurrentState(TaskPartitionState.RUNNING,
masterInstance, slaveInstance1, slaveInstance2);
List<String> instances =
new ArrayList<String>(Arrays.asList(masterInstance, slaveInstance1, slaveInstance2));
JobConfig jobConfig = prepareJobConfig();
JobContext jobContext = prepareJobContext(TaskPartitionState.RUNNING, slaveInstance1);
WorkflowConfig workflowConfig = prepareWorkflowConfig();
WorkflowContext workflowContext = prepareWorkflowContext();
Set<Integer> partitionSet = new HashSet<>(Collections.singletonList(PARTITION_ID));
Map<String, IdealState> idealStates =
prepareIdealStates(masterInstance, slaveInstance1, slaveInstance2);
TaskAssignmentCalculator taskAssignmentCal =
new FixedTargetTaskAssignmentCalculator(_assignableInstanceManager);
Map<String, SortedSet<Integer>> result =
taskAssignmentCal.getTaskAssignment(currentStateOutput, instances, jobConfig, jobContext,
workflowConfig, workflowContext, partitionSet, idealStates);
Assert.assertEquals(result.get(slaveInstance1).size(),0);
Assert.assertEquals(result.get(masterInstance).size(),1);
Assert.assertEquals(result.get(slaveInstance2).size(),0);
}
private JobConfig prepareJobConfig() {
JobConfig.Builder jobConfigBuilder = new JobConfig.Builder();
jobConfigBuilder.setWorkflow(WORKFLOW_NAME);
jobConfigBuilder.setCommand("TestCommand");
jobConfigBuilder.setJobId(JOB_NAME);
jobConfigBuilder.setTargetResource(TARGET_RESOURCES);
List<String> targetPartition = new ArrayList<>();
jobConfigBuilder.setTargetPartitions(targetPartition);
Set<String> targetPartitionStates = new HashSet<>(Collections.singletonList("MASTER"));
jobConfigBuilder.setTargetPartitions(targetPartition);
jobConfigBuilder.setTargetPartitionStates(targetPartitionStates);
List<TaskConfig> taskConfigs = new ArrayList<>();
TaskConfig.Builder taskConfigBuilder = new TaskConfig.Builder();
taskConfigBuilder.setTaskId("0");
taskConfigs.add(taskConfigBuilder.build());
jobConfigBuilder.addTaskConfigs(taskConfigs);
return jobConfigBuilder.build();
}
private JobContext prepareJobContext(TaskPartitionState taskPartitionState, String instance) {
ZNRecord record = new ZNRecord(JOB_NAME);
JobContext jobContext = new JobContext(record);
jobContext.setStartTime(0L);
jobContext.setName(JOB_NAME);
jobContext.setStartTime(0L);
jobContext.setPartitionState(PARTITION_ID, taskPartitionState);
jobContext.setPartitionTarget(PARTITION_ID, TARGET_RESOURCES + "_" + TARGET_PARTITION_NAME);
jobContext.setAssignedParticipant(PARTITION_ID, instance);
return jobContext;
}
private CurrentStateOutput prepareCurrentState(TaskPartitionState taskCurrentState,
String masterInstance, String slaveInstance1, String slaveInstance2) {
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
currentStateOutput.setResourceStateModelDef(JOB_NAME, "TASK");
Partition taskPartition = new Partition(JOB_NAME + "_" + PARTITION_NAME);
currentStateOutput.setCurrentState(JOB_NAME, taskPartition, masterInstance,
taskCurrentState.name());
Partition dbPartition = new Partition(TARGET_RESOURCES + "_0");
currentStateOutput.setEndTime(TARGET_RESOURCES, dbPartition, masterInstance, 0L);
currentStateOutput.setCurrentState(TARGET_RESOURCES, dbPartition, masterInstance, "MASTER");
currentStateOutput.setCurrentState(TARGET_RESOURCES, dbPartition, slaveInstance1, "SLAVE");
currentStateOutput.setCurrentState(TARGET_RESOURCES, dbPartition, slaveInstance2, "SLAVE");
currentStateOutput.setInfo(TARGET_RESOURCES, dbPartition, masterInstance, "");
return currentStateOutput;
}
private WorkflowConfig prepareWorkflowConfig() {
WorkflowConfig.Builder workflowConfigBuilder = new WorkflowConfig.Builder();
workflowConfigBuilder.setWorkflowId(WORKFLOW_NAME);
workflowConfigBuilder.setTerminable(false);
workflowConfigBuilder.setTargetState(TargetState.START);
workflowConfigBuilder.setJobQueue(true);
JobDag jobDag = new JobDag();
jobDag.addNode(JOB_NAME);
workflowConfigBuilder.setJobDag(jobDag);
return workflowConfigBuilder.build();
}
private WorkflowContext prepareWorkflowContext() {
ZNRecord record = new ZNRecord(WORKFLOW_NAME);
record.setSimpleField(WorkflowContext.WorkflowContextProperties.StartTime.name(), "0");
record.setSimpleField(WorkflowContext.WorkflowContextProperties.NAME.name(), WORKFLOW_NAME);
record.setSimpleField(WorkflowContext.WorkflowContextProperties.STATE.name(),
TaskState.IN_PROGRESS.name());
Map<String, String> jobState = new HashMap<>();
jobState.put(JOB_NAME, TaskState.IN_PROGRESS.name());
record.setMapField(WorkflowContext.WorkflowContextProperties.JOB_STATES.name(), jobState);
return new WorkflowContext(record);
}
private Map<String, IdealState> prepareIdealStates(String instance1, String instance2,
String instance3) {
Map<String, IdealState> idealStates = new HashMap<>();
ZNRecord recordDB = new ZNRecord(TARGET_RESOURCES);
recordDB.setSimpleField(IdealState.IdealStateProperty.REPLICAS.name(), "3");
recordDB.setSimpleField(IdealState.IdealStateProperty.REBALANCE_MODE.name(), "FULL_AUTO");
recordDB.setSimpleField(IdealState.IdealStateProperty.IDEAL_STATE_MODE.name(),
"AUTO_REBALANCE");
recordDB.setSimpleField(IdealState.IdealStateProperty.STATE_MODEL_DEF_REF.name(),
"MasterSlave");
recordDB.setSimpleField(IdealState.IdealStateProperty.STATE_MODEL_DEF_REF.name(),
"org.apache.helix.controller.rebalancer.strategy.CrushEdRebalanceStrategy");
recordDB.setSimpleField(IdealState.IdealStateProperty.REBALANCER_CLASS_NAME.name(),
"org.apache.helix.controller.rebalancer.DelayedAutoRebalancer");
Map<String, String> mapping = new HashMap<>();
mapping.put(instance1, "MASTER");
mapping.put(instance2, "SLAVE");
mapping.put(instance3, "SLAVE");
recordDB.setMapField(TARGET_RESOURCES + "_0", mapping);
List<String> listField = new ArrayList<>();
listField.add(instance1);
listField.add(instance2);
listField.add(instance3);
recordDB.setListField(TARGET_RESOURCES + "_0", listField);
idealStates.put(TARGET_RESOURCES, new IdealState(recordDB));
return idealStates;
}
}
| 9,844 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/TestSemiAutoStateTransition.java | package org.apache.helix.task;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Map;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.PropertyKey;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.integration.task.TaskTestBase;
import org.apache.helix.integration.task.WorkflowGenerator;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.mock.participant.MockDelayMSStateModelFactory;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.IdealState;
import org.apache.helix.participant.StateMachineEngine;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestSemiAutoStateTransition extends TaskTestBase {
protected HelixDataAccessor _accessor;
private PropertyKey.Builder _keyBuilder;
@BeforeClass
public void beforeClass() throws Exception {
_participants = new MockParticipantManager[_numNodes];
_numPartitions = 1;
_gSetupTool.addCluster(CLUSTER_NAME, true);
_accessor = new ZKHelixDataAccessor(CLUSTER_NAME, _baseAccessor);
_keyBuilder = _accessor.keyBuilder();
setupParticipants();
for (int i = 0; i < _numDbs; i++) {
String db = WorkflowGenerator.DEFAULT_TGT_DB + i;
_gSetupTool.addResourceToCluster(CLUSTER_NAME, db, _numPartitions, MASTER_SLAVE_STATE_MODEL,
IdealState.RebalanceMode.SEMI_AUTO.toString());
_gSetupTool.rebalanceStorageCluster(CLUSTER_NAME, db, _numReplicas);
_testDbs.add(db);
}
startParticipants();
// start controller
String controllerName = CONTROLLER_PREFIX + "_0";
_controller = new ClusterControllerManager(ZK_ADDR, CLUSTER_NAME, controllerName);
_controller.syncStart();
Thread.sleep(2000L);
createManagers();
}
@Test
public void testOfflineToSecondTopState() throws Exception {
_participants[0].syncStop();
Thread.sleep(2000L);
ExternalView externalView =
_accessor.getProperty(_keyBuilder.externalView(WorkflowGenerator.DEFAULT_TGT_DB + "0"));
Map<String, String> stateMap =
externalView.getStateMap(WorkflowGenerator.DEFAULT_TGT_DB + "0_0");
Assert.assertEquals("MASTER", stateMap.get(PARTICIPANT_PREFIX + "_" + (_startPort + 1)));
Assert.assertEquals("SLAVE", stateMap.get(PARTICIPANT_PREFIX + "_" + (_startPort + 2)));
String instanceName = PARTICIPANT_PREFIX + "_" + _startPort;
_participants[0] = new MockParticipantManager(ZK_ADDR, CLUSTER_NAME, instanceName);
// add a state model with non-OFFLINE initial state
StateMachineEngine stateMach = _participants[0].getStateMachineEngine();
MockDelayMSStateModelFactory delayFactory =
new MockDelayMSStateModelFactory().setDelay(300000L);
stateMach.registerStateModelFactory(MASTER_SLAVE_STATE_MODEL, delayFactory);
_participants[0].syncStart();
Thread.sleep(2000L);
externalView =
_accessor.getProperty(_keyBuilder.externalView(WorkflowGenerator.DEFAULT_TGT_DB + "0"));
stateMap = externalView.getStateMap(WorkflowGenerator.DEFAULT_TGT_DB + "0_0");
Assert.assertEquals("OFFLINE", stateMap.get(PARTICIPANT_PREFIX + "_" + _startPort));
Assert.assertEquals("MASTER", stateMap.get(PARTICIPANT_PREFIX + "_" + (_startPort + 1)));
Assert.assertEquals("SLAVE", stateMap.get(PARTICIPANT_PREFIX + "_" + (_startPort + 2)));
}
}
| 9,845 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/TestGetSetUserContentStore.java | package org.apache.helix.task;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.TestHelper;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.integration.task.MockTask;
import org.apache.helix.integration.task.TaskTestBase;
import org.apache.helix.participant.StateMachineEngine;
import org.apache.helix.tools.ClusterSetup;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestGetSetUserContentStore extends TaskTestBase {
private static final String JOB_COMMAND = "DummyCommand";
private static final int NUM_JOB = 5;
private Map<String, String> _jobCommandMap;
private final CountDownLatch allTasksReady = new CountDownLatch(NUM_JOB);
private final CountDownLatch adminReady = new CountDownLatch(1);
private enum TaskDumpResultKey {
WorkflowContent,
JobContent,
TaskContent
}
private class TaskRecord {
String workflowName;
String jobName;
String taskPartitionId;
TaskRecord(String workflow, String job, String task) {
workflowName = workflow;
jobName = job;
taskPartitionId = task;
}
}
@BeforeClass
public void beforeClass() throws Exception {
_participants = new MockParticipantManager[_numNodes];
String namespace = "/" + CLUSTER_NAME;
if (_gZkClient.exists(namespace)) {
_gZkClient.deleteRecursively(namespace);
}
// Setup cluster and instances
ClusterSetup setupTool = new ClusterSetup(ZK_ADDR);
setupTool.addCluster(CLUSTER_NAME, true);
for (int i = 0; i < _numNodes; i++) {
String storageNodeName = PARTICIPANT_PREFIX + "_" + (_startPort + i);
setupTool.addInstanceToCluster(CLUSTER_NAME, storageNodeName);
}
// start dummy participants
for (int i = 0; i < _numNodes; i++) {
final String instanceName = PARTICIPANT_PREFIX + "_" + (_startPort + i);
// Set task callbacks
Map<String, TaskFactory> taskFactoryReg = new HashMap<>();
TaskFactory shortTaskFactory = WriteTask::new;
taskFactoryReg.put("WriteTask", shortTaskFactory);
_participants[i] = new MockParticipantManager(ZK_ADDR, CLUSTER_NAME, instanceName);
// Register a Task state model factory.
StateMachineEngine stateMachine = _participants[i].getStateMachineEngine();
stateMachine.registerStateModelFactory("Task",
new TaskStateModelFactory(_participants[i], taskFactoryReg));
_participants[i].syncStart();
}
// Start controller
String controllerName = CONTROLLER_PREFIX + "_0";
_controller = new ClusterControllerManager(ZK_ADDR, CLUSTER_NAME, controllerName);
_controller.syncStart();
// Start an admin connection
_manager = HelixManagerFactory.getZKHelixManager(CLUSTER_NAME, "Admin",
InstanceType.ADMINISTRATOR, ZK_ADDR);
_manager.connect();
_driver = new TaskDriver(_manager);
_jobCommandMap = new HashMap<>();
}
@Test
public void testGetUserContentStore() throws InterruptedException {
String workflowName = TestHelper.getTestMethodName();
Workflow.Builder workflowBuilder = new Workflow.Builder(workflowName);
WorkflowConfig.Builder configBuilder = new WorkflowConfig.Builder(workflowName);
configBuilder.setAllowOverlapJobAssignment(true);
workflowBuilder.setWorkflowConfig(configBuilder.build());
Map<String, TaskRecord> recordMap = new HashMap<>();
// Create 5 jobs with 1 WriteTask each
for (int i = 0; i < NUM_JOB; i++) {
List<TaskConfig> taskConfigs = new ArrayList<>();
taskConfigs.add(new TaskConfig("WriteTask", new HashMap<>()));
JobConfig.Builder jobConfigBulider = new JobConfig.Builder().setCommand(JOB_COMMAND)
.addTaskConfigs(taskConfigs).setJobCommandConfigMap(_jobCommandMap);
String jobName = "JOB" + i;
String taskPartitionId = "0";
workflowBuilder.addJob(jobName, jobConfigBulider);
recordMap.put(jobName, new TaskRecord(workflowName, jobName, taskPartitionId));
}
// Start the workflow and wait for all tasks started
_driver.start(workflowBuilder.build());
allTasksReady.await();
// add "workflow":"workflow" to the workflow's user content
_driver.addOrUpdateWorkflowUserContentMap(workflowName,
Collections.singletonMap(workflowName, workflowName));
for (TaskRecord rec : recordMap.values()) {
// add "job":"job" to the job's user content
String namespacedJobName = TaskUtil.getNamespacedJobName(rec.workflowName, rec.jobName);
_driver.addOrUpdateJobUserContentMap(rec.workflowName, rec.jobName,
Collections.singletonMap(namespacedJobName, namespacedJobName));
String namespacedTaskName =
TaskUtil.getNamespacedTaskName(namespacedJobName, rec.taskPartitionId);
// add "taskId":"taskId" to the task's user content
_driver.addOrUpdateTaskUserContentMap(rec.workflowName, rec.jobName, rec.taskPartitionId,
Collections.singletonMap(namespacedTaskName, namespacedTaskName));
}
adminReady.countDown();
_driver.pollForWorkflowState(workflowName, TaskState.COMPLETED);
// Aggregate key-value mappings in UserContentStore
for (TaskRecord rec : recordMap.values()) {
Assert.assertEquals(_driver.getWorkflowUserContentMap(rec.workflowName)
.get(TaskDumpResultKey.WorkflowContent.name()),
constructContentStoreResultString(rec.workflowName, rec.workflowName));
String namespacedJobName = TaskUtil.getNamespacedJobName(rec.workflowName, rec.jobName);
Assert.assertEquals(_driver.getJobUserContentMap(rec.workflowName, rec.jobName)
.get(TaskDumpResultKey.JobContent.name()),
constructContentStoreResultString(namespacedJobName, namespacedJobName));
String namespacedTaskName =
TaskUtil.getNamespacedTaskName(namespacedJobName, rec.taskPartitionId);
Assert.assertEquals(
_driver.getTaskUserContentMap(rec.workflowName, rec.jobName, rec.taskPartitionId)
.get(TaskDumpResultKey.TaskContent.name()),
constructContentStoreResultString(namespacedTaskName, namespacedTaskName));
}
}
/**
* A mock task that writes to UserContentStore. MockTask extends UserContentStore.
*/
private class WriteTask extends MockTask {
WriteTask(TaskCallbackContext context) {
super(context);
}
@Override
public TaskResult run() {
allTasksReady.countDown();
try {
adminReady.await();
} catch (Exception e) {
return new TaskResult(TaskResult.Status.FATAL_FAILED, e.getMessage());
}
String workflowStoreContent = constructContentStoreResultString(_workflowName, getUserContent(_workflowName, Scope.WORKFLOW));
String jobStoreContent = constructContentStoreResultString(_jobName, getUserContent(_jobName, Scope.JOB));
String taskStoreContent = constructContentStoreResultString(_taskName, getUserContent(_taskName, Scope.TASK));
putUserContent(TaskDumpResultKey.WorkflowContent.name(), workflowStoreContent, Scope.WORKFLOW);
putUserContent(TaskDumpResultKey.JobContent.name(), jobStoreContent, Scope.JOB);
putUserContent(TaskDumpResultKey.TaskContent.name(), taskStoreContent, Scope.TASK);
return new TaskResult(TaskResult.Status.COMPLETED, "");
}
}
private static String constructContentStoreResultString(String key, String value) {
return String.format("%s::%s", key, value);
}
}
| 9,846 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/assigner/TestAssignableInstance.java | package org.apache.helix.task.assigner;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.HashMap;
import java.util.Map;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.task.TaskConfig;
import org.apache.helix.task.TaskConstants;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.testng.collections.Maps;
public class TestAssignableInstance extends AssignerTestBase {
@Test
public void testInvalidInitialization() {
try {
AssignableInstance ai = new AssignableInstance(null, null, null);
Assert.fail("Expecting IllegalArgumentException");
} catch (IllegalArgumentException e) {
Assert.assertTrue(e.getMessage().contains("cannot be null"));
}
try {
ClusterConfig clusterConfig = new ClusterConfig("testCluster");
InstanceConfig instanceConfig = new InstanceConfig("instance");
LiveInstance liveInstance = new LiveInstance("another-instance");
AssignableInstance ai = new AssignableInstance(clusterConfig, instanceConfig, liveInstance);
Assert.fail("Expecting IllegalArgumentException");
} catch (IllegalArgumentException e) {
Assert.assertTrue(e.getMessage().contains("don't match"));
}
}
@Test
public void testInitializationWithQuotaUnset() {
int expectedCurrentTaskThreadPoolSize = 100;
LiveInstance liveInstance = createLiveInstance(null, null);
liveInstance.setCurrentTaskThreadPoolSize(expectedCurrentTaskThreadPoolSize);
// Initialize AssignableInstance with neither resource capacity nor quota ratio provided
AssignableInstance ai = new AssignableInstance(createClusterConfig(null, null, false),
new InstanceConfig(testInstanceName), liveInstance);
Assert.assertEquals(ai.getUsedCapacity().size(), 1);
Assert.assertEquals(
(int) ai.getUsedCapacity().get(LiveInstance.InstanceResourceType.TASK_EXEC_THREAD.name())
.get(AssignableInstance.DEFAULT_QUOTA_TYPE), 0);
Assert.assertEquals(
(int) ai.getTotalCapacity().get(LiveInstance.InstanceResourceType.TASK_EXEC_THREAD.name())
.get(AssignableInstance.DEFAULT_QUOTA_TYPE), expectedCurrentTaskThreadPoolSize);
Assert.assertEquals(ai.getCurrentAssignments().size(), 0);
}
@Test
public void testInitializationWithOnlyCapacity() {
// Initialize AssignableInstance with only resource capacity provided
AssignableInstance ai = new AssignableInstance(createClusterConfig(null, null, false),
new InstanceConfig(testInstanceName),
createLiveInstance(testResourceTypes, testResourceCapacity));
Assert.assertEquals(ai.getTotalCapacity().size(), testResourceTypes.length);
Assert.assertEquals(ai.getUsedCapacity().size(), testResourceTypes.length);
for (int i = 0; i < testResourceTypes.length; i++) {
Assert.assertEquals(ai.getTotalCapacity().get(testResourceTypes[i]).size(), 1);
Assert.assertEquals(ai.getUsedCapacity().get(testResourceTypes[i]).size(), 1);
Assert.assertEquals(ai.getTotalCapacity().get(testResourceTypes[i])
.get(AssignableInstance.DEFAULT_QUOTA_TYPE), Integer.valueOf(testResourceCapacity[i]));
Assert.assertEquals(
ai.getUsedCapacity().get(testResourceTypes[i]).get(AssignableInstance.DEFAULT_QUOTA_TYPE),
Integer.valueOf(0));
}
}
@Test
public void testInitializationWithOnlyQuotaType() {
int expectedCurrentTaskThreadPoolSize = 100;
LiveInstance liveInstance = createLiveInstance(null, null);
liveInstance.setCurrentTaskThreadPoolSize(expectedCurrentTaskThreadPoolSize);
// Initialize AssignableInstance with only quota type provided
AssignableInstance ai =
new AssignableInstance(createClusterConfig(testQuotaTypes, testQuotaRatio, false),
new InstanceConfig(testInstanceName), liveInstance);
Assert.assertEquals(ai.getTotalCapacity().size(), 1);
Assert.assertEquals(ai.getUsedCapacity().size(), 1);
Assert.assertEquals(
ai.getTotalCapacity().get(LiveInstance.InstanceResourceType.TASK_EXEC_THREAD.name()).size(),
testQuotaTypes.length);
Assert.assertEquals(
ai.getUsedCapacity().get(LiveInstance.InstanceResourceType.TASK_EXEC_THREAD.name()).size(),
testQuotaTypes.length);
Assert.assertEquals(
ai.getTotalCapacity().get(LiveInstance.InstanceResourceType.TASK_EXEC_THREAD.name()),
calculateExpectedQuotaPerType(expectedCurrentTaskThreadPoolSize, testQuotaTypes,
testQuotaRatio));
Assert.assertEquals(ai.getCurrentAssignments().size(), 0);
}
@Test
public void testInitializationWithQuotaAndCapacity() {
// Initialize AssignableInstance with both capacity and quota type provided
AssignableInstance ai =
new AssignableInstance(createClusterConfig(testQuotaTypes, testQuotaRatio, false),
new InstanceConfig(testInstanceName),
createLiveInstance(testResourceTypes, testResourceCapacity));
Map<String, Integer> usedResourcePerType =
createResourceQuotaPerTypeMap(testQuotaTypes, new int[] {
0, 0, 0
});
for (int i = 0; i < testResourceTypes.length; i++) {
Assert.assertEquals(ai.getTotalCapacity().get(testResourceTypes[i]),
calculateExpectedQuotaPerType(Integer.valueOf(testResourceCapacity[i]), testQuotaTypes,
testQuotaRatio));
Assert.assertEquals(ai.getUsedCapacity().get(testResourceTypes[i]), usedResourcePerType);
}
}
@Test
public void testAssignableInstanceUpdateConfigs() {
AssignableInstance ai =
new AssignableInstance(createClusterConfig(testQuotaTypes, testQuotaRatio, false),
new InstanceConfig(testInstanceName),
createLiveInstance(testResourceTypes, testResourceCapacity));
String[] newResources = new String[] {
"Resource2", "Resource3", "Resource4"
};
String[] newResourceCapacities = new String[] {
"100", "150", "50"
};
String[] newTypes = new String[] {
"Type3", "Type4", "Type5", "Type6"
};
String[] newTypeRatio = new String[] {
"20", "40", "25", "25"
};
LiveInstance newLiveInstance = createLiveInstance(newResources, newResourceCapacities);
ClusterConfig newClusterConfig = createClusterConfig(newTypes, newTypeRatio, false);
ai.updateConfigs(newClusterConfig, null, newLiveInstance);
Assert.assertEquals(ai.getUsedCapacity().size(), newResourceCapacities.length);
Assert.assertEquals(ai.getTotalCapacity().size(), newResourceCapacities.length);
for (int i = 0; i < newResources.length; i++) {
Assert.assertEquals(ai.getTotalCapacity().get(newResources[i]), calculateExpectedQuotaPerType(
Integer.valueOf(newResourceCapacities[i]), newTypes, newTypeRatio));
Assert.assertEquals(ai.getUsedCapacity().get(newResources[i]),
createResourceQuotaPerTypeMap(newTypes, new int[] {
0, 0, 0, 0
}));
}
}
@Test
public void testNormalTryAssign() {
int testCurrentTaskThreadPoolSize = 100;
LiveInstance liveInstance = createLiveInstance(null, null);
liveInstance.setCurrentTaskThreadPoolSize(testCurrentTaskThreadPoolSize);
AssignableInstance ai = new AssignableInstance(createClusterConfig(null, null, true),
new InstanceConfig(testInstanceName), liveInstance);
// When nothing is configured, we should use default quota type to assign
Map<String, TaskAssignResult> results = new HashMap<>();
for (int i = 0; i < testCurrentTaskThreadPoolSize; i++) {
String taskId = Integer.toString(i);
TaskConfig task = new TaskConfig("", null, taskId, null);
TaskAssignResult result = ai.tryAssign(task, AssignableInstance.DEFAULT_QUOTA_TYPE);
Assert.assertTrue(result.isSuccessful());
ai.assign(result);
results.put(taskId, result);
}
// We are out of quota now and we should not be able to assign
String taskId = "TaskCannotAssign";
TaskConfig task = new TaskConfig("", null, taskId, null);
TaskAssignResult result = ai.tryAssign(task, AssignableInstance.DEFAULT_QUOTA_TYPE);
Assert.assertFalse(result.isSuccessful());
Assert.assertEquals(result.getFailureReason(),
TaskAssignResult.FailureReason.INSUFFICIENT_QUOTA);
try {
ai.assign(result);
Assert.fail("Expecting IllegalStateException");
} catch (IllegalStateException e) {
// OK
}
// After releasing 1 task, we should be able to schedule
ai.release(results.get("1").getTaskConfig(), AssignableInstance.DEFAULT_QUOTA_TYPE);
result = ai.tryAssign(task, AssignableInstance.DEFAULT_QUOTA_TYPE);
Assert.assertTrue(result.isSuccessful());
// release all tasks, check remaining resources
for (TaskAssignResult rst : results.values()) {
ai.release(rst.getTaskConfig(), AssignableInstance.DEFAULT_QUOTA_TYPE);
}
Assert.assertEquals(
(int) ai.getUsedCapacity().get(LiveInstance.InstanceResourceType.TASK_EXEC_THREAD.name())
.get(AssignableInstance.DEFAULT_QUOTA_TYPE),
0);
}
@Test
public void testTryAssignFailure() {
AssignableInstance ai =
new AssignableInstance(createClusterConfig(testQuotaTypes, testQuotaRatio, false),
new InstanceConfig(testInstanceName),
createLiveInstance(testResourceTypes, testResourceCapacity));
// No such resource type
String taskId = "testTask";
TaskConfig task = new TaskConfig("", null, taskId, "");
TaskAssignResult result = ai.tryAssign(task, AssignableInstance.DEFAULT_QUOTA_TYPE);
Assert.assertFalse(result.isSuccessful());
Assert.assertEquals(result.getFailureReason(),
TaskAssignResult.FailureReason.NO_SUCH_RESOURCE_TYPE);
ai.updateConfigs(null, null, createLiveInstance(new String[] {
LiveInstance.InstanceResourceType.TASK_EXEC_THREAD.name()
}, new String[] {
"1"
}));
ai.updateConfigs(createClusterConfig(testQuotaTypes, testQuotaRatio, true), null, null);
result = ai.tryAssign(task, AssignableInstance.DEFAULT_QUOTA_TYPE);
Assert.assertTrue(result.isSuccessful());
ai.assign(result);
try {
ai.assign(result);
Assert.fail("Expecting IllegalArgumentException");
} catch (IllegalStateException e) {
// OK
}
// Duplicate assignment
result = ai.tryAssign(task, AssignableInstance.DEFAULT_QUOTA_TYPE);
Assert.assertFalse(result.isSuccessful());
Assert.assertEquals(result.getFailureReason(),
TaskAssignResult.FailureReason.TASK_ALREADY_ASSIGNED);
// Insufficient quota
ai.release(task, AssignableInstance.DEFAULT_QUOTA_TYPE);
ai.updateConfigs(null, null, createLiveInstance(new String[] {
LiveInstance.InstanceResourceType.TASK_EXEC_THREAD.name()
}, new String[] {
"0"
}));
result = ai.tryAssign(task, AssignableInstance.DEFAULT_QUOTA_TYPE);
Assert.assertFalse(result.isSuccessful());
Assert.assertEquals(result.getFailureReason(),
TaskAssignResult.FailureReason.INSUFFICIENT_QUOTA);
}
@Test
public void testRestoreTaskAssignResult() {
AssignableInstance ai =
new AssignableInstance(createClusterConfig(testQuotaTypes, testQuotaRatio, true),
new InstanceConfig(testInstanceName), createLiveInstance(new String[] {
LiveInstance.InstanceResourceType.TASK_EXEC_THREAD.name()
}, new String[] {
"40"
}));
Map<String, TaskConfig> currentAssignments = new HashMap<>();
TaskConfig supportedTask = new TaskConfig("", null, "supportedTask", "");
currentAssignments.put("supportedTask", supportedTask);
TaskConfig unsupportedTask = new TaskConfig("", null, "unsupportedTask", "");
currentAssignments.put("unsupportedTask", unsupportedTask);
Map<String, TaskAssignResult> results = Maps.newHashMap();
for (Map.Entry<String, TaskConfig> entry : currentAssignments.entrySet()) {
String taskID = entry.getKey();
TaskConfig taskConfig = entry.getValue();
String quotaType = (taskID.equals("supportedTask")) ? AssignableInstance.DEFAULT_QUOTA_TYPE
: "UnsupportedQuotaType";
// Restore TaskAssignResult
TaskAssignResult taskAssignResult = ai.restoreTaskAssignResult(taskID, taskConfig, quotaType);
if (taskAssignResult.isSuccessful()) {
results.put(taskID, taskAssignResult);
}
}
for (TaskAssignResult rst : results.values()) {
Assert.assertTrue(rst.isSuccessful());
Assert.assertEquals(rst.getAssignableInstance(), ai);
}
Assert.assertEquals(ai.getCurrentAssignments().size(), 2);
// The expected value for the following should be 2, not 1 because the unsupported task should
// also have been assigned as a DEFAULT task
Assert.assertEquals(
(int) ai.getUsedCapacity().get(LiveInstance.InstanceResourceType.TASK_EXEC_THREAD.name())
.get(AssignableInstance.DEFAULT_QUOTA_TYPE),
2);
}
private Map<String, Integer> createResourceQuotaPerTypeMap(String[] types, int[] quotas) {
Map<String, Integer> ret = new HashMap<>();
for (int i = 0; i < types.length; i++) {
ret.put(types[i], quotas[i]);
}
return ret;
}
private Map<String, Integer> calculateExpectedQuotaPerType(int capacity, String[] quotaTypes,
String[] quotaRatios) {
Integer totalQuota = 0;
Map<String, Integer> expectedQuotaPerType = new HashMap<>();
for (String ratio : quotaRatios) {
totalQuota += Integer.valueOf(ratio);
}
for (int i = 0; i < quotaRatios.length; i++) {
expectedQuotaPerType.put(quotaTypes[i],
Math.round((float) capacity * Integer.valueOf(quotaRatios[i]) / totalQuota));
}
return expectedQuotaPerType;
}
}
| 9,847 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/assigner/AssignerTestBase.java | package org.apache.helix.task.assigner;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.HashMap;
import java.util.Map;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.LiveInstance;
/* package */ class AssignerTestBase {
public static final String testClusterName = "testCluster";
static final String testInstanceName = "testInstance";
static final String[] testResourceTypes = new String[] {"Resource1", "Resource2", "Resource3"};
static final String[] testResourceCapacity = new String[] {"20", "50", "100"};
static final String[] testQuotaTypes = new String[] {"Type1", "Type2", "Type3"};
static final String[] testQuotaRatio = new String[] {"50", "30", "20"};
private static final String defaultQuotaRatio = "100";
/* package */ LiveInstance createLiveInstance(String[] resourceTypes, String[] resourceCapacity) {
return createLiveInstance(resourceTypes, resourceCapacity, testInstanceName);
}
/* package */ LiveInstance createLiveInstance(String[] resourceTypes, String[] resourceCapacity,
String instancename) {
LiveInstance li = new LiveInstance(instancename);
if (resourceCapacity != null && resourceTypes != null) {
Map<String, String> resMap = new HashMap<>();
for (int i = 0; i < resourceCapacity.length; i++) {
resMap.put(resourceTypes[i], resourceCapacity[i]);
}
li.setResourceCapacityMap(resMap);
}
return li;
}
/* package */ ClusterConfig createClusterConfig(String[] quotaTypes, String[] quotaRatio,
boolean addDefaultQuota) {
ClusterConfig clusterConfig = new ClusterConfig(testClusterName);
if (quotaTypes != null && quotaRatio != null) {
for (int i = 0; i < quotaTypes.length; i++) {
clusterConfig.setTaskQuotaRatio(quotaTypes[i], quotaRatio[i]);
}
}
if (addDefaultQuota) {
clusterConfig.setTaskQuotaRatio(AssignableInstance.DEFAULT_QUOTA_TYPE, defaultQuotaRatio);
}
return clusterConfig;
}
}
| 9,848 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task | Create_ds/helix/helix-core/src/test/java/org/apache/helix/task/assigner/TestThreadCountBasedTaskAssigner.java | package org.apache.helix.task.assigner;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import org.apache.helix.common.caches.TaskDataCache;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.task.AssignableInstanceManager;
import org.apache.helix.task.TaskConfig;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestThreadCountBasedTaskAssigner extends AssignerTestBase {
@Test
public void testSuccessfulAssignment() {
TaskAssigner assigner = new ThreadCountBasedTaskAssigner();
int taskCountPerType = 150;
int instanceCount = 20;
int threadCount = 50;
AssignableInstanceManager assignableInstanceManager =
createAssignableInstanceManager(instanceCount, threadCount);
for (String quotaType : testQuotaTypes) {
// Create tasks
List<TaskConfig> tasks = createTaskConfigs(taskCountPerType);
// Assign
Map<String, TaskAssignResult> results = assigner.assignTasks(assignableInstanceManager,
assignableInstanceManager.getAssignableInstanceNames(), tasks, quotaType);
// Check success
assertAssignmentResults(results.values(), true);
// Check evenness
for (AssignableInstance instance : assignableInstanceManager.getAssignableInstanceMap()
.values()) {
int assignedCount = instance.getUsedCapacity()
.get(LiveInstance.InstanceResourceType.TASK_EXEC_THREAD.name()).get(quotaType);
Assert.assertTrue(assignedCount <= taskCountPerType / instanceCount + 1
&& assignedCount >= taskCountPerType / instanceCount);
}
}
}
@Test
public void testAssignmentFailureNoInstance() {
TaskAssigner assigner = new ThreadCountBasedTaskAssigner();
int taskCount = 10;
List<TaskConfig> tasks = createTaskConfigs(taskCount);
AssignableInstanceManager assignableInstanceManager = new AssignableInstanceManager();
Map<String, TaskAssignResult> results = assigner.assignTasks(assignableInstanceManager,
assignableInstanceManager.getAssignableInstanceNames(), tasks, "Dummy");
Assert.assertEquals(results.size(), taskCount);
for (TaskAssignResult result : results.values()) {
Assert.assertFalse(result.isSuccessful());
Assert.assertNull(result.getAssignableInstance());
Assert.assertEquals(result.getFailureReason(),
TaskAssignResult.FailureReason.INSUFFICIENT_QUOTA);
}
}
@Test
public void testAssignmentFailureNoTask() {
TaskAssigner assigner = new ThreadCountBasedTaskAssigner();
AssignableInstanceManager assignableInstanceManager = createAssignableInstanceManager(1, 10);
Map<String, TaskAssignResult> results = assigner.assignTasks(assignableInstanceManager,
assignableInstanceManager.getAssignableInstanceNames(),
Collections.<TaskConfig> emptyList(), AssignableInstance.DEFAULT_QUOTA_TYPE);
Assert.assertTrue(results.isEmpty());
}
@Test
public void testAssignmentFailureInsufficientQuota() {
TaskAssigner assigner = new ThreadCountBasedTaskAssigner();
// 10 * Type1 quota
AssignableInstanceManager assignableInstanceManager = createAssignableInstanceManager(2, 10);
List<TaskConfig> tasks = createTaskConfigs(20);
Map<String, TaskAssignResult> results = assigner.assignTasks(assignableInstanceManager,
assignableInstanceManager.getAssignableInstanceNames(), tasks, testQuotaTypes[0]);
int successCnt = 0;
int failCnt = 0;
for (TaskAssignResult rst : results.values()) {
if (rst.isSuccessful()) {
successCnt += 1;
} else {
failCnt += 1;
Assert.assertEquals(rst.getFailureReason(),
TaskAssignResult.FailureReason.INSUFFICIENT_QUOTA);
}
}
Assert.assertEquals(successCnt, 10);
Assert.assertEquals(failCnt, 10);
}
@Test
public void testAssignmentFailureDuplicatedTask() {
TaskAssigner assigner = new ThreadCountBasedTaskAssigner();
AssignableInstanceManager assignableInstanceManager = createAssignableInstanceManager(1, 20);
List<TaskConfig> tasks = createTaskConfigs(10, false);
// Duplicate all tasks
tasks.addAll(createTaskConfigs(10, false));
Collections.shuffle(tasks);
Map<String, TaskAssignResult> results = assigner.assignTasks(assignableInstanceManager,
assignableInstanceManager.getAssignableInstanceNames(), tasks, testQuotaTypes[0]);
Assert.assertEquals(results.size(), 10);
assertAssignmentResults(results.values(), true);
}
@Test(enabled = false, description = "Not enabling profiling tests")
public void testAssignerProfiling() {
int instanceCount = 1000;
int taskCount = 50000;
for (int batchSize : new int[] {10000, 5000, 2000, 1000, 500, 100}) {
System.out.println("testing batch size: " + batchSize);
profileAssigner(batchSize, instanceCount, taskCount);
}
}
@Test
public void testAssignmentToGivenInstances() {
int totalNumberOfInstances = 10;
int eligibleNumberOfInstances = 5;
String instanceNameFormat = "instance-%s";
TaskAssigner assigner = new ThreadCountBasedTaskAssigner();
AssignableInstanceManager assignableInstanceManager = createAssignableInstanceManager(10, 20);
List<TaskConfig> tasks = createTaskConfigs(100, false);
Set<String> eligibleInstances = new HashSet<>();
// Add only eligible number of instances
for (int i = 0; i < eligibleNumberOfInstances; i++) {
eligibleInstances.add(String.format(instanceNameFormat, i));
}
Map<String, TaskAssignResult> result = assigner.assignTasks(assignableInstanceManager,
eligibleInstances, tasks, testQuotaTypes[0]);
for (int i = 0; i < totalNumberOfInstances; i++) {
String instance = String.format(instanceNameFormat, i);
Set<String> test = assignableInstanceManager.getAssignableInstance(instance)
.getCurrentAssignments();
boolean isAssignmentEmpty = assignableInstanceManager.getAssignableInstance(instance)
.getCurrentAssignments().isEmpty();
// Check that assignment only took place to eligible number of instances and that assignment
// did not happen to non-eligible AssignableInstances
if (i < eligibleNumberOfInstances) {
// Must have tasks assigned to these instances
Assert.assertFalse(isAssignmentEmpty);
} else {
// These instances should have no tasks assigned to them
Assert.assertTrue(isAssignmentEmpty);
}
}
}
private void profileAssigner(int assignBatchSize, int instanceCount, int taskCount) {
int trail = 100;
long totalTime = 0;
for (int i = 0; i < trail; i++) {
TaskAssigner assigner = new ThreadCountBasedTaskAssigner();
// 50 * instanceCount number of tasks
AssignableInstanceManager assignableInstanceManager =
createAssignableInstanceManager(instanceCount, 100);
List<TaskConfig> tasks = createTaskConfigs(taskCount);
List<Map<String, TaskAssignResult>> allResults = new ArrayList<>();
// Assign
long start = System.currentTimeMillis();
for (int j = 0; j < taskCount / assignBatchSize; j++) {
allResults.add(assigner.assignTasks(assignableInstanceManager,
assignableInstanceManager.getAssignableInstanceNames(),
tasks.subList(j * assignBatchSize, (j + 1) * assignBatchSize), testQuotaTypes[0]));
}
long duration = System.currentTimeMillis() - start;
totalTime += duration;
// Validate
for (Map<String, TaskAssignResult> results : allResults) {
for (TaskAssignResult rst : results.values()) {
Assert.assertTrue(rst.isSuccessful());
}
}
}
System.out.println("Average time: " + totalTime / trail + "ms");
}
private void assertAssignmentResults(Iterable<TaskAssignResult> results, boolean expected) {
for (TaskAssignResult rst : results) {
Assert.assertEquals(rst.isSuccessful(), expected);
}
}
private List<TaskConfig> createTaskConfigs(int count) {
return createTaskConfigs(count, true);
}
private List<TaskConfig> createTaskConfigs(int count, boolean randomID) {
List<TaskConfig> tasks = new ArrayList<>();
for (int i = 0; i < count; i++) {
TaskConfig task =
new TaskConfig(null, null, randomID ? UUID.randomUUID().toString() : "task-" + i, null);
tasks.add(task);
}
return tasks;
}
private AssignableInstanceManager createAssignableInstanceManager(int count, int threadCount) {
AssignableInstanceManager assignableInstanceManager = new AssignableInstanceManager();
ClusterConfig clusterConfig = createClusterConfig(testQuotaTypes, testQuotaRatio, false);
String instanceNameFormat = "instance-%s";
Map<String, LiveInstance> liveInstanceMap = new HashMap<>();
Map<String, InstanceConfig> instanceConfigMap = new HashMap<>();
for (int i = 0; i < count; i++) {
String instanceName = String.format(instanceNameFormat, i);
liveInstanceMap.put(instanceName, createLiveInstance(
new String[] { LiveInstance.InstanceResourceType.TASK_EXEC_THREAD.name() },
new String[] { Integer.toString(threadCount) }, instanceName));
instanceConfigMap.put(instanceName, new InstanceConfig(instanceName));
}
assignableInstanceManager
.buildAssignableInstances(clusterConfig, new TaskDataCache(testClusterName),
liveInstanceMap, instanceConfigMap);
return assignableInstanceManager;
}
}
| 9,849 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/model/TestRESTConfig.java | package org.apache.helix.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.HelixException;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestRESTConfig {
@Test
public void testGetBaseUrlValid() {
ZNRecord record = new ZNRecord("test");
record.setSimpleField(RESTConfig.SimpleFields.CUSTOMIZED_HEALTH_URL.name(), "http://*:8080");
RESTConfig restConfig = new RESTConfig(record);
Assert.assertEquals(restConfig.getBaseUrl("instance0"), "http://instance0:8080");
Assert.assertEquals(restConfig.getBaseUrl("instance1_9090"), "http://instance1:8080");
}
@Test(expectedExceptions = HelixException.class)
public void testGetBaseUrlInvalid() {
ZNRecord record = new ZNRecord("test");
record.setSimpleField(RESTConfig.SimpleFields.CUSTOMIZED_HEALTH_URL.name(), "http://foo:8080");
RESTConfig restConfig = new RESTConfig(record);
restConfig.getBaseUrl("instance0");
}
}
| 9,850 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/model/TestIdealState.java | package org.apache.helix.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.helix.TestHelper;
import org.apache.helix.model.IdealState.IdealStateModeProperty;
import org.apache.helix.model.IdealState.RebalanceMode;
import org.testng.Assert;
import org.testng.annotations.Test;
@SuppressWarnings("deprecation")
public class TestIdealState {
@Test
public void testGetInstanceSet() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String testName = className + "_" + methodName;
System.out.println("START " + testName + " at " + new Date(System.currentTimeMillis()));
IdealState idealState = new IdealState("idealState");
idealState.getRecord().setListField("TestDB_0", Arrays.asList("node_1", "node_2"));
Map<String, String> instanceState = new HashMap<String, String>();
instanceState.put("node_3", "MASTER");
instanceState.put("node_4", "SLAVE");
idealState.getRecord().setMapField("TestDB_1", instanceState);
// test SEMI_AUTO mode
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
Set<String> instances = idealState.getInstanceSet("TestDB_0");
// System.out.println("instances: " + instances);
Assert.assertEquals(instances.size(), 2, "Should contain node_1 and node_2");
Assert.assertTrue(instances.contains("node_1"), "Should contain node_1 and node_2");
Assert.assertTrue(instances.contains("node_2"), "Should contain node_1 and node_2");
instances = idealState.getInstanceSet("TestDB_nonExist_auto");
Assert.assertEquals(instances, Collections.emptySet(), "Should get empty set");
// test CUSTOMIZED mode
idealState.setRebalanceMode(RebalanceMode.CUSTOMIZED);
instances = idealState.getInstanceSet("TestDB_1");
// System.out.println("instances: " + instances);
Assert.assertEquals(instances.size(), 2, "Should contain node_3 and node_4");
Assert.assertTrue(instances.contains("node_3"), "Should contain node_3 and node_4");
Assert.assertTrue(instances.contains("node_4"), "Should contain node_3 and node_4");
instances = idealState.getInstanceSet("TestDB_nonExist_custom");
Assert.assertEquals(instances, Collections.emptySet(), "Should get empty set");
System.out.println("END " + testName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testReplicas() {
IdealState idealState = new IdealState("test-db");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(4);
idealState.setStateModelDefRef("MasterSlave");
idealState.setReplicas("" + 2);
List<String> preferenceList = new ArrayList<String>();
preferenceList.add("node_0");
idealState.getRecord().setListField("test-db_0", preferenceList);
Assert.assertFalse(idealState.isValid(),
"should fail since replicas not equals to preference-list size");
preferenceList.add("node_1");
idealState.getRecord().setListField("test-db_0", preferenceList);
Assert.assertTrue(idealState.isValid(),
"should pass since replicas equals to preference-list size");
}
@Test
public void testFullAutoModeCompatibility() {
IdealState idealStateOld = new IdealState("old-test-db");
idealStateOld.setIdealStateMode(IdealStateModeProperty.AUTO_REBALANCE.toString());
Assert.assertEquals(idealStateOld.getRebalanceMode(), RebalanceMode.FULL_AUTO);
Assert.assertEquals(idealStateOld.getIdealStateMode(), IdealStateModeProperty.AUTO_REBALANCE);
IdealState idealStateNew = new IdealState("new-test-db");
idealStateNew.setRebalanceMode(RebalanceMode.FULL_AUTO);
Assert.assertEquals(idealStateNew.getIdealStateMode(), IdealStateModeProperty.AUTO_REBALANCE);
Assert.assertEquals(idealStateNew.getRebalanceMode(), RebalanceMode.FULL_AUTO);
}
@Test
public void testSemiAutoModeCompatibility() {
IdealState idealStateOld = new IdealState("old-test-db");
idealStateOld.setIdealStateMode(IdealStateModeProperty.AUTO.toString());
Assert.assertEquals(idealStateOld.getRebalanceMode(), RebalanceMode.SEMI_AUTO);
Assert.assertEquals(idealStateOld.getIdealStateMode(), IdealStateModeProperty.AUTO);
IdealState idealStateNew = new IdealState("new-test-db");
idealStateNew.setRebalanceMode(RebalanceMode.SEMI_AUTO);
Assert.assertEquals(idealStateNew.getIdealStateMode(), IdealStateModeProperty.AUTO);
Assert.assertEquals(idealStateNew.getRebalanceMode(), RebalanceMode.SEMI_AUTO);
}
@Test
public void testCustomizedModeCompatibility() {
IdealState idealStateOld = new IdealState("old-test-db");
idealStateOld.setIdealStateMode(IdealStateModeProperty.CUSTOMIZED.toString());
Assert.assertEquals(idealStateOld.getRebalanceMode(), RebalanceMode.CUSTOMIZED);
Assert.assertEquals(idealStateOld.getIdealStateMode(), IdealStateModeProperty.CUSTOMIZED);
IdealState idealStateNew = new IdealState("new-test-db");
idealStateNew.setRebalanceMode(RebalanceMode.CUSTOMIZED);
Assert.assertEquals(idealStateNew.getIdealStateMode(), IdealStateModeProperty.CUSTOMIZED);
Assert.assertEquals(idealStateNew.getRebalanceMode(), RebalanceMode.CUSTOMIZED);
}
}
| 9,851 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/model/TestClusterTopologyConfig.java | package org.apache.helix.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Iterator;
import org.apache.helix.HelixException;
import org.apache.helix.controller.rebalancer.topology.Topology;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestClusterTopologyConfig {
@Test
public void testClusterNonTopologyAware() {
ClusterConfig testConfig = new ClusterConfig("testId");
testConfig.setTopologyAwareEnabled(false);
ClusterTopologyConfig clusterTopologyConfig = ClusterTopologyConfig.createFromClusterConfig(testConfig);
Assert.assertEquals(clusterTopologyConfig.getEndNodeType(), Topology.Types.INSTANCE.name());
Assert.assertEquals(clusterTopologyConfig.getFaultZoneType(), Topology.Types.INSTANCE.name());
Assert.assertTrue(clusterTopologyConfig.getTopologyKeyDefaultValue().isEmpty());
}
@Test
public void testClusterValidTopology() {
ClusterConfig testConfig = new ClusterConfig("testId");
testConfig.setTopologyAwareEnabled(true);
testConfig.setTopology("/zone/instance");
// no fault zone setup
ClusterTopologyConfig clusterTopologyConfig = ClusterTopologyConfig.createFromClusterConfig(testConfig);
Assert.assertEquals(clusterTopologyConfig.getEndNodeType(), "instance");
Assert.assertEquals(clusterTopologyConfig.getFaultZoneType(), "instance");
Assert.assertEquals(clusterTopologyConfig.getTopologyKeyDefaultValue().size(), 2);
// with fault zone
testConfig.setFaultZoneType("zone");
testConfig.setTopology(" /zone/instance ");
clusterTopologyConfig = ClusterTopologyConfig.createFromClusterConfig(testConfig);
Assert.assertEquals(clusterTopologyConfig.getEndNodeType(), "instance");
Assert.assertEquals(clusterTopologyConfig.getFaultZoneType(), "zone");
Assert.assertEquals(clusterTopologyConfig.getTopologyKeyDefaultValue().size(), 2);
String[] keys = new String[] {"zone", "instance"};
Iterator<String> itr = clusterTopologyConfig.getTopologyKeyDefaultValue().keySet().iterator();
for (String k : keys) {
Assert.assertEquals(k, itr.next());
}
testConfig.setTopology("/rack/zone/instance");
clusterTopologyConfig = ClusterTopologyConfig.createFromClusterConfig(testConfig);
Assert.assertEquals(clusterTopologyConfig.getEndNodeType(), "instance");
Assert.assertEquals(clusterTopologyConfig.getFaultZoneType(), "zone");
Assert.assertEquals(clusterTopologyConfig.getTopologyKeyDefaultValue().size(), 3);
keys = new String[] {"rack", "zone", "instance"};
itr = clusterTopologyConfig.getTopologyKeyDefaultValue().keySet().iterator();
for (String k : keys) {
Assert.assertEquals(k, itr.next());
}
}
@Test(expectedExceptions = HelixException.class)
public void testClusterInvalidTopology() {
ClusterConfig testConfig = new ClusterConfig("testId");
testConfig.setTopologyAwareEnabled(true);
testConfig.setTopology("/zone/instance");
testConfig.setFaultZoneType("rack");
ClusterTopologyConfig.createFromClusterConfig(testConfig);
}
}
| 9,852 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/model/TestInstanceConfig.java | package org.apache.helix.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.constants.InstanceConstants;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestInstanceConfig {
@Test
public void testNotCheckingHostPortExistence() {
InstanceConfig config = new InstanceConfig("node_0");
Assert.assertTrue(config.isValid(),
"HELIX-65: should not check host/port existence for instance-config");
}
@Test
public void testGetParsedDomain() {
InstanceConfig instanceConfig = new InstanceConfig(new ZNRecord("id"));
instanceConfig
.setDomain("cluster=myCluster,zone=myZone1,rack=myRack,host=hostname,instance=instance001");
Map<String, String> parsedDomain = instanceConfig.getDomainAsMap();
Assert.assertEquals(parsedDomain.size(), 5);
Assert.assertEquals(parsedDomain.get("zone"), "myZone1");
}
@Test
public void testSetInstanceEnableWithReason() {
InstanceConfig instanceConfig = new InstanceConfig(new ZNRecord("id"));
instanceConfig.setInstanceEnabled(true);
instanceConfig.setInstanceDisabledReason("NoShowReason");
instanceConfig.setInstanceDisabledType(InstanceConstants.InstanceDisabledType.USER_OPERATION);
Assert.assertEquals(instanceConfig.getRecord().getSimpleFields()
.get(InstanceConfig.InstanceConfigProperty.HELIX_ENABLED.toString()), "true");
Assert.assertEquals(instanceConfig.getRecord().getSimpleFields()
.get(InstanceConfig.InstanceConfigProperty.HELIX_DISABLED_REASON.toString()), null);
Assert.assertEquals(instanceConfig.getRecord().getSimpleFields()
.get(InstanceConfig.InstanceConfigProperty.HELIX_DISABLED_TYPE.toString()), null);
instanceConfig.setInstanceEnabled(false);
String reasonCode = "ReasonCode";
instanceConfig.setInstanceDisabledReason(reasonCode);
instanceConfig.setInstanceDisabledType(InstanceConstants.InstanceDisabledType.USER_OPERATION);
Assert.assertEquals(instanceConfig.getRecord().getSimpleFields()
.get(InstanceConfig.InstanceConfigProperty.HELIX_ENABLED.toString()), "false");
Assert.assertEquals(instanceConfig.getRecord().getSimpleFields()
.get(InstanceConfig.InstanceConfigProperty.HELIX_DISABLED_REASON.toString()), reasonCode);
Assert.assertEquals(instanceConfig.getInstanceDisabledReason(), reasonCode);
Assert.assertEquals(instanceConfig.getInstanceDisabledType(),
InstanceConstants.InstanceDisabledType.USER_OPERATION.toString());
}
@Test
public void testGetParsedDomainEmptyDomain() {
InstanceConfig instanceConfig = new InstanceConfig(new ZNRecord("id"));
Map<String, String> parsedDomain = instanceConfig.getDomainAsMap();
Assert.assertTrue(parsedDomain.isEmpty());
}
@Test
public void testGetInstanceCapacityMap() {
Map<String, Integer> capacityDataMap = ImmutableMap.of("item1", 1,
"item2", 2,
"item3", 3);
Map<String, String> capacityDataMapString = ImmutableMap.of("item1", "1",
"item2", "2",
"item3", "3");
ZNRecord rec = new ZNRecord("testId");
rec.setMapField(InstanceConfig.InstanceConfigProperty.INSTANCE_CAPACITY_MAP.name(), capacityDataMapString);
InstanceConfig testConfig = new InstanceConfig(rec);
Assert.assertTrue(testConfig.getInstanceCapacityMap().equals(capacityDataMap));
}
@Test
public void testGetInstanceCapacityMapEmpty() {
InstanceConfig testConfig = new InstanceConfig("testId");
Assert.assertTrue(testConfig.getInstanceCapacityMap().equals(Collections.emptyMap()));
}
@Test
public void testSetInstanceCapacityMap() {
Map<String, Integer> capacityDataMap = ImmutableMap.of("item1", 1,
"item2", 2,
"item3", 3);
Map<String, String> capacityDataMapString =
ImmutableMap.of("item1", "1", "item2", "2", "item3", "3");
InstanceConfig testConfig = new InstanceConfig("testConfig");
testConfig.setInstanceCapacityMap(capacityDataMap);
Assert.assertEquals(testConfig.getRecord().getMapField(InstanceConfig.InstanceConfigProperty.
INSTANCE_CAPACITY_MAP.name()), capacityDataMapString);
// This operation shall be done. This will clear the instance capacity map in the InstanceConfig
testConfig.setInstanceCapacityMap(Collections.emptyMap());
Assert.assertEquals(testConfig.getRecord().getMapField(InstanceConfig.InstanceConfigProperty.
INSTANCE_CAPACITY_MAP.name()), Collections.emptyMap());
// This operation shall be done. This will remove the instance capacity map in the InstanceConfig
testConfig.setInstanceCapacityMap(null);
Assert.assertTrue(testConfig.getRecord().getMapField(InstanceConfig.InstanceConfigProperty.
INSTANCE_CAPACITY_MAP.name()) == null);
}
@Test(expectedExceptions = IllegalArgumentException.class,
expectedExceptionsMessageRegExp = "Capacity Data contains a negative value: item3 = -3")
public void testSetInstanceCapacityMapInvalid() {
Map<String, Integer> capacityDataMap = ImmutableMap.of("item1", 1,
"item2", 2,
"item3", -3);
InstanceConfig testConfig = new InstanceConfig("testConfig");
testConfig.setInstanceCapacityMap(capacityDataMap);
}
@Test
public void testGetTargetTaskThreadPoolSize() {
InstanceConfig testConfig = new InstanceConfig("testConfig");
testConfig.getRecord().setIntField(
InstanceConfig.InstanceConfigProperty.TARGET_TASK_THREAD_POOL_SIZE.name(), 100);
Assert.assertEquals(testConfig.getTargetTaskThreadPoolSize(), 100);
}
@Test
public void testSetTargetTaskThreadPoolSize() {
InstanceConfig testConfig = new InstanceConfig("testConfig");
testConfig.setTargetTaskThreadPoolSize(100);
Assert.assertEquals(testConfig.getRecord()
.getIntField(InstanceConfig.InstanceConfigProperty.TARGET_TASK_THREAD_POOL_SIZE.name(), -1),
100);
}
@Test(expectedExceptions = IllegalArgumentException.class)
public void testSetTargetTaskThreadPoolSizeIllegalArgument() {
InstanceConfig testConfig = new InstanceConfig("testConfig");
testConfig.setTargetTaskThreadPoolSize(-1);
}
@Test
public void testInstanceConfigBuilder() {
Map<String, String> instanceInfoMap = new HashMap<>();
instanceInfoMap.put("CAGE", "H");
Map<String, Integer> capacityDataMap = ImmutableMap.of("weight1", 1);
InstanceConfig instanceConfig =
new InstanceConfig.Builder().setHostName("testHost").setPort("1234").setDomain("foo=bar")
.setWeight(100).setInstanceEnabled(true).addTag("tag1").addTag("tag2")
.setInstanceEnabled(false).setInstanceInfoMap(instanceInfoMap)
.addInstanceInfo("CAGE", "G").addInstanceInfo("CABINET", "30")
.setInstanceCapacityMap(capacityDataMap).build("instance1");
Assert.assertEquals(instanceConfig.getId(), "instance1");
Assert.assertEquals(instanceConfig.getHostName(), "testHost");
Assert.assertEquals(instanceConfig.getPort(), "1234");
Assert.assertEquals(instanceConfig.getDomainAsString(), "foo=bar");
Assert.assertEquals(instanceConfig.getWeight(), 100);
Assert.assertTrue(instanceConfig.getTags().contains("tag1"));
Assert.assertTrue(instanceConfig.getTags().contains("tag2"));
Assert.assertFalse(instanceConfig.getInstanceEnabled());
Assert.assertEquals(instanceConfig.getInstanceInfoMap().get("CAGE"), "H");
Assert.assertEquals(instanceConfig.getInstanceInfoMap().get("CABINET"), "30");
Assert.assertEquals(instanceConfig.getInstanceCapacityMap().get("weight1"), Integer.valueOf(1));
}
}
| 9,853 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/model/TestClusterConfig.java | package org.apache.helix.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.controller.rebalancer.constraint.MockAbnormalStateResolver;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.apache.helix.model.ClusterConfig.GlobalRebalancePreferenceKey.EVENNESS;
import static org.apache.helix.model.ClusterConfig.GlobalRebalancePreferenceKey.LESS_MOVEMENT;
public class TestClusterConfig {
@Test
public void testGetCapacityKeys() {
List<String> keys = ImmutableList.of("CPU", "MEMORY", "Random");
ClusterConfig testConfig = new ClusterConfig("testId");
testConfig.getRecord()
.setListField(ClusterConfig.ClusterConfigProperty.INSTANCE_CAPACITY_KEYS.name(), keys);
Assert.assertEquals(testConfig.getInstanceCapacityKeys(), keys);
}
@Test
public void testGetCapacityKeysEmpty() {
ClusterConfig testConfig = new ClusterConfig("testId");
Assert.assertEquals(testConfig.getInstanceCapacityKeys(), Collections.emptyList());
}
@Test
public void testSetCapacityKeys() {
List<String> keys = ImmutableList.of("CPU", "MEMORY", "Random");
ClusterConfig testConfig = new ClusterConfig("testId");
testConfig.setInstanceCapacityKeys(keys);
Assert.assertEquals(keys, testConfig.getRecord()
.getListField(ClusterConfig.ClusterConfigProperty.INSTANCE_CAPACITY_KEYS.name()));
testConfig.setInstanceCapacityKeys(Collections.emptyList());
Assert.assertEquals(testConfig.getRecord()
.getListField(ClusterConfig.ClusterConfigProperty.INSTANCE_CAPACITY_KEYS.name()),
Collections.emptyList());
testConfig.setInstanceCapacityKeys(null);
Assert.assertTrue(testConfig.getRecord()
.getListField(ClusterConfig.ClusterConfigProperty.INSTANCE_CAPACITY_KEYS.name()) == null);
}
@Test
public void testGetGlobalTargetTaskThreadPoolSize() {
ClusterConfig testConfig = new ClusterConfig("testId");
testConfig.getRecord().setIntField(
ClusterConfig.ClusterConfigProperty.GLOBAL_TARGET_TASK_THREAD_POOL_SIZE.name(), 100);
Assert.assertEquals(testConfig.getGlobalTargetTaskThreadPoolSize(), 100);
}
@Test
public void testSetGlobalTargetTaskThreadPoolSize() {
ClusterConfig testConfig = new ClusterConfig("testId");
testConfig.setGlobalTargetTaskThreadPoolSize(100);
Assert.assertEquals(testConfig.getRecord().getIntField(
ClusterConfig.ClusterConfigProperty.GLOBAL_TARGET_TASK_THREAD_POOL_SIZE.name(), -1), 100);
}
@Test(expectedExceptions = IllegalArgumentException.class)
public void testSetGlobalTargetTaskThreadPoolSizeIllegalArgument() {
ClusterConfig testConfig = new ClusterConfig("testId");
testConfig.setGlobalTargetTaskThreadPoolSize(-1);
}
@Test
public void testGetRebalancePreference() {
Map<ClusterConfig.GlobalRebalancePreferenceKey, Integer> preference = new HashMap<>();
preference.put(EVENNESS, 5);
preference.put(LESS_MOVEMENT, 3);
Map<String, String> mapFieldData = new HashMap<>();
for (ClusterConfig.GlobalRebalancePreferenceKey key : preference.keySet()) {
mapFieldData.put(key.name(), String.valueOf(preference.get(key)));
}
ClusterConfig testConfig = new ClusterConfig("testId");
testConfig.getRecord()
.setMapField(ClusterConfig.ClusterConfigProperty.REBALANCE_PREFERENCE.name(), mapFieldData);
Assert.assertEquals(testConfig.getGlobalRebalancePreference(), preference);
}
@Test
public void testGetRebalancePreferenceDefault() {
ClusterConfig testConfig = new ClusterConfig("testId");
Assert.assertEquals(testConfig.getGlobalRebalancePreference(),
ClusterConfig.DEFAULT_GLOBAL_REBALANCE_PREFERENCE);
}
@Test
public void testGetRebalancePreferenceMissingKey() {
ClusterConfig testConfig = new ClusterConfig("testId");
Map<String, String> preference = new HashMap<>();
preference.put(EVENNESS.name(), String.valueOf(5));
testConfig.getRecord()
.setMapField(ClusterConfig.ClusterConfigProperty.REBALANCE_PREFERENCE.name(), preference);
Assert.assertEquals(testConfig.getGlobalRebalancePreference(), Collections.emptyMap());
}
@Test
public void testSetRebalancePreference() {
Map<ClusterConfig.GlobalRebalancePreferenceKey, Integer> preference = new HashMap<>();
preference.put(EVENNESS, 5);
preference.put(LESS_MOVEMENT, 3);
Map<String, String> mapFieldData = new HashMap<>();
for (ClusterConfig.GlobalRebalancePreferenceKey key : preference.keySet()) {
mapFieldData.put(key.name(), String.valueOf(preference.get(key)));
}
ClusterConfig testConfig = new ClusterConfig("testId");
testConfig.setGlobalRebalancePreference(preference);
Assert.assertEquals(testConfig.getRecord()
.getMapField(ClusterConfig.ClusterConfigProperty.REBALANCE_PREFERENCE.name()),
mapFieldData);
testConfig.setGlobalRebalancePreference(Collections.emptyMap());
Assert.assertEquals(testConfig.getRecord()
.getMapField(ClusterConfig.ClusterConfigProperty.REBALANCE_PREFERENCE.name()),
Collections.emptyMap());
testConfig.setGlobalRebalancePreference(null);
Assert.assertTrue(testConfig.getRecord()
.getMapField(ClusterConfig.ClusterConfigProperty.REBALANCE_PREFERENCE.name()) == null);
}
@Test(expectedExceptions = IllegalArgumentException.class)
public void testSetRebalancePreferenceInvalidNumber() {
Map<ClusterConfig.GlobalRebalancePreferenceKey, Integer> preference = new HashMap<>();
preference.put(EVENNESS, -1);
preference.put(LESS_MOVEMENT, 3);
ClusterConfig testConfig = new ClusterConfig("testId");
testConfig.setGlobalRebalancePreference(preference);
}
@Test(expectedExceptions = IllegalArgumentException.class)
public void testSetRebalancePreferenceMissingKey() {
Map<ClusterConfig.GlobalRebalancePreferenceKey, Integer> preference = new HashMap<>();
preference.put(EVENNESS, 1);
ClusterConfig testConfig = new ClusterConfig("testId");
testConfig.setGlobalRebalancePreference(preference);
}
@Test
public void testGetInstanceCapacityMap() {
Map<String, Integer> capacityDataMap = ImmutableMap.of("item1", 1, "item2", 2, "item3", 3);
Map<String, String> capacityDataMapString =
ImmutableMap.of("item1", "1", "item2", "2", "item3", "3");
ZNRecord rec = new ZNRecord("testId");
rec.setMapField(ClusterConfig.ClusterConfigProperty.DEFAULT_INSTANCE_CAPACITY_MAP.name(),
capacityDataMapString);
ClusterConfig testConfig = new ClusterConfig(rec);
Assert.assertTrue(testConfig.getDefaultInstanceCapacityMap().equals(capacityDataMap));
}
@Test
public void testGetInstanceCapacityMapEmpty() {
ClusterConfig testConfig = new ClusterConfig("testId");
Assert.assertTrue(testConfig.getDefaultInstanceCapacityMap().equals(Collections.emptyMap()));
}
@Test
public void testSetInstanceCapacityMap() {
Map<String, Integer> capacityDataMap = ImmutableMap.of("item1", 1, "item2", 2, "item3", 3);
Map<String, String> capacityDataMapString =
ImmutableMap.of("item1", "1", "item2", "2", "item3", "3");
ClusterConfig testConfig = new ClusterConfig("testConfig");
testConfig.setDefaultInstanceCapacityMap(capacityDataMap);
Assert.assertEquals(testConfig.getRecord().getMapField(ClusterConfig.ClusterConfigProperty.
DEFAULT_INSTANCE_CAPACITY_MAP.name()), capacityDataMapString);
// The following operation can be done, this will clear the default values
testConfig.setDefaultInstanceCapacityMap(Collections.emptyMap());
Assert.assertEquals(testConfig.getRecord().getMapField(ClusterConfig.ClusterConfigProperty.
DEFAULT_INSTANCE_CAPACITY_MAP.name()), Collections.emptyMap());
testConfig.setDefaultInstanceCapacityMap(null);
Assert.assertTrue(testConfig.getRecord().getMapField(ClusterConfig.ClusterConfigProperty.
DEFAULT_INSTANCE_CAPACITY_MAP.name()) == null);
}
@Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Default capacity data contains a negative value: item3 = -3")
public void testSetInstanceCapacityMapInvalid() {
Map<String, Integer> capacityDataMap = ImmutableMap.of("item1", 1, "item2", 2, "item3", -3);
ClusterConfig testConfig = new ClusterConfig("testConfig");
testConfig.setDefaultInstanceCapacityMap(capacityDataMap);
}
@Test
public void testGetPartitionWeightMap() {
Map<String, Integer> weightDataMap = ImmutableMap.of("item1", 1, "item2", 2, "item3", 3);
Map<String, String> weightDataMapString =
ImmutableMap.of("item1", "1", "item2", "2", "item3", "3");
ZNRecord rec = new ZNRecord("testId");
rec.setMapField(ClusterConfig.ClusterConfigProperty.DEFAULT_PARTITION_WEIGHT_MAP.name(),
weightDataMapString);
ClusterConfig testConfig = new ClusterConfig(rec);
Assert.assertTrue(testConfig.getDefaultPartitionWeightMap().equals(weightDataMap));
}
@Test
public void testGetPartitionWeightMapEmpty() {
ClusterConfig testConfig = new ClusterConfig("testId");
Assert.assertTrue(testConfig.getDefaultPartitionWeightMap().equals(Collections.emptyMap()));
}
@Test
public void testSetPartitionWeightMap() {
Map<String, Integer> weightDataMap = ImmutableMap.of("item1", 1, "item2", 2, "item3", 3);
Map<String, String> weightDataMapString =
ImmutableMap.of("item1", "1", "item2", "2", "item3", "3");
ClusterConfig testConfig = new ClusterConfig("testConfig");
testConfig.setDefaultPartitionWeightMap(weightDataMap);
Assert.assertEquals(testConfig.getRecord().getMapField(ClusterConfig.ClusterConfigProperty.
DEFAULT_PARTITION_WEIGHT_MAP.name()), weightDataMapString);
// The following operation can be done, this will clear the default values
testConfig.setDefaultPartitionWeightMap(Collections.emptyMap());
Assert.assertEquals(testConfig.getRecord().getMapField(ClusterConfig.ClusterConfigProperty.
DEFAULT_PARTITION_WEIGHT_MAP.name()), Collections.emptyMap());
testConfig.setDefaultPartitionWeightMap(null);
Assert.assertTrue(testConfig.getRecord().getMapField(ClusterConfig.ClusterConfigProperty.
DEFAULT_PARTITION_WEIGHT_MAP.name()) == null);
}
@Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Default capacity data contains a negative value: item3 = -3")
public void testSetPartitionWeightMapInvalid() {
Map<String, Integer> weightDataMap = ImmutableMap.of("item1", 1, "item2", 2, "item3", -3);
ClusterConfig testConfig = new ClusterConfig("testConfig");
testConfig.setDefaultPartitionWeightMap(weightDataMap);
}
@Test
public void testAsyncGlobalRebalanceOption() {
ClusterConfig testConfig = new ClusterConfig("testConfig");
// Default value is true.
Assert.assertEquals(testConfig.isGlobalRebalanceAsyncModeEnabled(), true);
// Test get the option
testConfig.getRecord()
.setBooleanField(ClusterConfig.ClusterConfigProperty.GLOBAL_REBALANCE_ASYNC_MODE.name(),
false);
Assert.assertEquals(testConfig.isGlobalRebalanceAsyncModeEnabled(), false);
// Test set the option
testConfig.setGlobalRebalanceAsyncMode(true);
Assert.assertEquals(testConfig.getRecord()
.getBooleanField(ClusterConfig.ClusterConfigProperty.GLOBAL_REBALANCE_ASYNC_MODE.name(),
false), true);
}
@Test
public void testGetOfflineNodeTimeOutForMaintenanceMode() {
ClusterConfig testConfig = new ClusterConfig("testId");
Assert.assertEquals(testConfig.getOfflineNodeTimeOutForMaintenanceMode(), -1);
testConfig.getRecord()
.setLongField(ClusterConfig.ClusterConfigProperty.OFFLINE_NODE_TIME_OUT_FOR_MAINTENANCE_MODE
.name(),
10000L);
Assert.assertEquals(testConfig.getOfflineNodeTimeOutForMaintenanceMode(), 10000L);
}
@Test
public void testSetOfflineNodeTimeOutForMaintenanceMode() {
ClusterConfig testConfig = new ClusterConfig("testId");
testConfig.setOfflineNodeTimeOutForMaintenanceMode(10000L);
Assert.assertEquals(testConfig.getRecord()
.getLongField(ClusterConfig.ClusterConfigProperty.OFFLINE_NODE_TIME_OUT_FOR_MAINTENANCE_MODE
.name(),
-1), 10000L);
}
@Test
public void testGetOfflineNodeTimeOutForPurge() {
ClusterConfig testConfig = new ClusterConfig("testId");
Assert.assertEquals(testConfig.getOfflineDurationForPurge(), -1);
testConfig.getRecord()
.setLongField(ClusterConfig.ClusterConfigProperty.OFFLINE_DURATION_FOR_PURGE_MS
.name(),
10000L);
Assert.assertEquals(testConfig.getOfflineDurationForPurge(), 10000L);
}
@Test
public void testSetOfflineNodeTimeOutForPurge() {
ClusterConfig testConfig = new ClusterConfig("testId");
testConfig.setOfflineDurationForPurge(10000L);
Assert.assertEquals(testConfig.getRecord()
.getLongField(ClusterConfig.ClusterConfigProperty.OFFLINE_DURATION_FOR_PURGE_MS
.name(),
-1), 10000L);
}
@Test
public void testAbnormalStatesResolverConfig() {
ClusterConfig testConfig = new ClusterConfig("testConfig");
// Default value is empty
Assert.assertEquals(testConfig.getAbnormalStateResolverMap(), Collections.EMPTY_MAP);
// Test set
Map<String, String> resolverMap =
ImmutableMap.of(MasterSlaveSMD.name, MockAbnormalStateResolver.class.getName());
testConfig.setAbnormalStateResolverMap(resolverMap);
Assert.assertEquals(testConfig.getAbnormalStateResolverMap(), resolverMap);
// Test empty the map
testConfig.setAbnormalStateResolverMap(Collections.emptyMap());
Assert.assertEquals(testConfig.getAbnormalStateResolverMap(), Collections.EMPTY_MAP);
testConfig.setAbnormalStateResolverMap(null);
Assert.assertTrue(testConfig.getRecord()
.getMapField(ClusterConfig.ClusterConfigProperty.ABNORMAL_STATES_RESOLVER_MAP.name())
== null);
}
@Test
public void testSetInvalidAbnormalStatesResolverConfig() {
ClusterConfig testConfig = new ClusterConfig("testConfig");
Map<String, String> resolverMap = new HashMap<>();
resolverMap.put(null, MockAbnormalStateResolver.class.getName());
trySetInvalidAbnormalStatesResolverMap(testConfig, resolverMap);
resolverMap.clear();
resolverMap.put("", MockAbnormalStateResolver.class.getName());
trySetInvalidAbnormalStatesResolverMap(testConfig, resolverMap);
resolverMap.clear();
resolverMap.put(MasterSlaveSMD.name, null);
trySetInvalidAbnormalStatesResolverMap(testConfig, resolverMap);
resolverMap.clear();
resolverMap.put(MasterSlaveSMD.name, "");
trySetInvalidAbnormalStatesResolverMap(testConfig, resolverMap);
}
@Test
public void testGetLastOnDemandRebalanceTimestamp() {
ClusterConfig testConfig = new ClusterConfig("testConfig");
Assert.assertEquals(testConfig.getLastOnDemandRebalanceTimestamp(), -1L);
testConfig.getRecord()
.setLongField(ClusterConfig.ClusterConfigProperty.LAST_ON_DEMAND_REBALANCE_TIMESTAMP.name(),
10000L);
Assert.assertEquals(testConfig.getLastOnDemandRebalanceTimestamp(), 10000L);
}
@Test
public void testSetLastOnDemandRebalanceTimestamp() {
ClusterConfig testConfig = new ClusterConfig("testConfig");
testConfig.setLastOnDemandRebalanceTimestamp(10000L);
Assert.assertEquals(testConfig.getRecord()
.getLongField(ClusterConfig.ClusterConfigProperty.LAST_ON_DEMAND_REBALANCE_TIMESTAMP.name(),
-1), 10000L);
}
private void trySetInvalidAbnormalStatesResolverMap(ClusterConfig testConfig,
Map<String, String> resolverMap) {
try {
testConfig.setAbnormalStateResolverMap(resolverMap);
Assert.fail("Invalid resolver setup shall fail.");
} catch (IllegalArgumentException ex) {
// expected
}
}
}
| 9,854 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/model/TestStateModelValidity.java | package org.apache.helix.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.google.common.collect.Lists;
import org.apache.helix.HelixDefinedState;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.model.StateModelDefinition.StateModelDefinitionProperty;
import org.apache.helix.tools.StateModelConfigGenerator;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestStateModelValidity {
/**
* Ensure that state models that we know to be good pass validation
*/
@Test
public void testValidModels() {
StateModelDefinition masterSlave =
new StateModelDefinition(StateModelConfigGenerator.generateConfigForMasterSlave());
Assert.assertTrue(masterSlave.isValid());
StateModelDefinition leaderStandby =
new StateModelDefinition(StateModelConfigGenerator.generateConfigForLeaderStandby());
Assert.assertTrue(leaderStandby.isValid());
StateModelDefinition onlineOffline =
new StateModelDefinition(StateModelConfigGenerator.generateConfigForOnlineOffline());
Assert.assertTrue(onlineOffline.isValid());
}
/**
* Ensure that Helix responds negatively if DROPPED is not specified
*/
@Test
public void testNoDroppedState() {
StateModelDefinition stateModel =
new StateModelDefinition.Builder("stateModel").initialState("OFFLINE").addState("OFFLINE")
.addState("MASTER").addState("SLAVE").addTransition("OFFLINE", "SLAVE")
.addTransition("SLAVE", "MASTER").addTransition("MASTER", "SLAVE")
.addTransition("SLAVE", "OFFLINE").build();
Assert.assertFalse(stateModel.isValid());
}
/**
* Ensure that Helix can catch when a state doesn't have a path to DROPPED
*/
@Test
public void testNoPathToDropped() {
StateModelDefinition stateModel =
new StateModelDefinition.Builder("stateModel").initialState("OFFLINE").addState("OFFLINE")
.addState("MASTER").addState("SLAVE").addState("DROPPED")
.addTransition("OFFLINE", "SLAVE").addTransition("SLAVE", "MASTER")
.addTransition("SLAVE", "OFFLINE").addTransition("OFFLINE", "DROPPED").build();
Assert.assertFalse(stateModel.isValid());
// now see that adding MASTER-DROPPED fixes the problem
stateModel =
new StateModelDefinition.Builder("stateModel").initialState("OFFLINE").addState("OFFLINE")
.addState("MASTER").addState("SLAVE").addState("DROPPED")
.addTransition("OFFLINE", "SLAVE").addTransition("SLAVE", "MASTER")
.addTransition("SLAVE", "OFFLINE").addTransition("OFFLINE", "DROPPED")
.addTransition("MASTER", "DROPPED").build();
Assert.assertTrue(stateModel.isValid());
}
/**
* The initial state should be added as a state, otherwise validation check should fail
*/
@Test
public void testInitialStateIsNotState() {
StateModelDefinition stateModel =
new StateModelDefinition.Builder("stateModel").initialState("OFFLINE").addState("MASTER")
.addState("SLAVE").addState("DROPPED").addTransition("OFFLINE", "SLAVE")
.addTransition("SLAVE", "MASTER").addTransition("SLAVE", "OFFLINE")
.addTransition("OFFLINE", "DROPPED").addTransition("MASTER", "SLAVE").build();
Assert.assertFalse(stateModel.isValid());
}
/**
* There should be an initial state, otherwise instantiation should fail
*/
@Test
public void testNoInitialState() {
try {
new StateModelDefinition.Builder("stateModel").addState("OFFLINE").addState("MASTER")
.addState("SLAVE").addState("DROPPED").addTransition("OFFLINE", "SLAVE")
.addTransition("SLAVE", "MASTER").addTransition("SLAVE", "OFFLINE")
.addTransition("OFFLINE", "DROPPED").addTransition("MASTER", "SLAVE").build();
Assert.fail("StateModelDefinition creation should fail if no initial state");
} catch (IllegalArgumentException e) {
}
}
/**
* SRC and DEST in a transition SRC-TEST must be valid states
*/
@Test
public void testTransitionsWithInvalidStates() {
// invalid to state
StateModelDefinition stateModel =
new StateModelDefinition.Builder("stateModel").initialState("OFFLINE").addState("OFFLINE")
.addState("MASTER").addState("SLAVE").addState("DROPPED")
.addTransition("OFFLINE", "SLAVE").addTransition("SLAVE", "MASTER")
.addTransition("SLAVE", "OFFLINE").addTransition("OFFLINE", "DROPPED")
.addTransition("MASTER", "SLAVE").addTransition("OFFLINE", "INVALID").build();
Assert.assertFalse(stateModel.isValid());
// invalid from state
stateModel =
new StateModelDefinition.Builder("stateModel").initialState("OFFLINE").addState("OFFLINE")
.addState("MASTER").addState("SLAVE").addState("DROPPED")
.addTransition("OFFLINE", "SLAVE").addTransition("SLAVE", "MASTER")
.addTransition("SLAVE", "OFFLINE").addTransition("OFFLINE", "DROPPED")
.addTransition("MASTER", "SLAVE").addTransition("INVALID", "MASTER").build();
Assert.assertFalse(stateModel.isValid());
}
/**
* The initial state should be able to reach all states, should fail validation otherwise
*/
@Test
public void testUnreachableState() {
StateModelDefinition stateModel =
new StateModelDefinition.Builder("stateModel").initialState("OFFLINE").addState("OFFLINE")
.addState("MASTER").addState("SLAVE").addState("DROPPED")
.addTransition("OFFLINE", "SLAVE").addTransition("SLAVE", "OFFLINE")
.addTransition("OFFLINE", "DROPPED").addTransition("MASTER", "SLAVE")
.addTransition("MASTER", "DROPPED").build();
Assert.assertFalse(stateModel.isValid());
}
/**
* The validator should fail on any detected infinite loops
*/
@Test
public void testLoopInStateModel() {
// create an infinite loop ONE --> TWO --> ONE
ZNRecord record = new ZNRecord("MasterSlave");
record.setSimpleField(StateModelDefinitionProperty.INITIAL_STATE.toString(), "OFFLINE");
List<String> statePriorityList =
Lists.newArrayList("ONE", "TWO", "THREE", "OFFLINE", "DROPPED", "ERROR");
record.setListField(StateModelDefinitionProperty.STATE_PRIORITY_LIST.toString(),
statePriorityList);
for (String state : statePriorityList) {
String key = state + ".meta";
Map<String, String> metadata = new HashMap<String, String>();
metadata.put("count", "-1");
record.setMapField(key, metadata);
}
for (String state : statePriorityList) {
String key = state + ".next";
if (state.equals("ONE")) {
Map<String, String> metadata = new HashMap<String, String>();
metadata.put("THREE", "TWO");
metadata.put("TWO", "TWO");
metadata.put("OFFLINE", "OFFLINE");
metadata.put("DROPPED", "DROPPED");
record.setMapField(key, metadata);
} else if (state.equals("TWO")) {
Map<String, String> metadata = new HashMap<String, String>();
metadata.put("THREE", "ONE");
metadata.put("OFFLINE", "OFFLINE");
metadata.put("DROPPED", "OFFLINE");
record.setMapField(key, metadata);
} else if (state.equals("THREE")) {
Map<String, String> metadata = new HashMap<String, String>();
metadata.put("OFFLINE", "OFFLINE");
metadata.put("DROPPED", "OFFLINE");
record.setMapField(key, metadata);
} else if (state.equals("OFFLINE")) {
Map<String, String> metadata = new HashMap<String, String>();
metadata.put("ONE", "ONE");
metadata.put("TWO", "TWO");
metadata.put("THREE", "THREE");
metadata.put("DROPPED", "DROPPED");
record.setMapField(key, metadata);
} else if (state.equals("ERROR")) {
Map<String, String> metadata = new HashMap<String, String>();
metadata.put("OFFLINE", "OFFLINE");
record.setMapField(key, metadata);
}
}
List<String> stateTransitionPriorityList = new ArrayList<String>();
record.setListField(StateModelDefinitionProperty.STATE_TRANSITION_PRIORITYLIST.toString(),
stateTransitionPriorityList);
StateModelDefinition stateModel = new StateModelDefinition(record);
Assert.assertFalse(stateModel.isValid());
}
/**
* This is the example used on the website, so this must work
*/
@Test
public void testBasic() {
StateModelDefinition stateModel = new StateModelDefinition.Builder("MasterSlave")
// OFFLINE is the state that the system starts in (initial state is REQUIRED)
.initialState("OFFLINE")
// Lowest number here indicates highest priority, no value indicates lowest priority
.addState("MASTER", 1).addState("SLAVE", 2).addState("OFFLINE")
// Note the special inclusion of the DROPPED state (REQUIRED)
.addState(HelixDefinedState.DROPPED.toString())
// No more than one master allowed
.upperBound("MASTER", 1)
// R indicates an upper bound of number of replicas for each partition
.dynamicUpperBound("SLAVE", "R")
// Add some high-priority transitions
.addTransition("SLAVE", "MASTER", 1).addTransition("OFFLINE", "SLAVE", 2)
// Using the same priority value indicates that these transitions can fire in any order
.addTransition("MASTER", "SLAVE", 3).addTransition("SLAVE", "OFFLINE", 3)
// Not specifying a value defaults to lowest priority
// Notice the inclusion of the OFFLINE to DROPPED transition
// Since every state has a path to OFFLINE, they each now have a path to DROPPED (REQUIRED)
.addTransition("OFFLINE", HelixDefinedState.DROPPED.toString())
// Create the StateModelDefinition instance
.build();
Assert.assertTrue(stateModel.isValid());
}
}
| 9,855 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/model/TestLiveInstance.java | package org.apache.helix.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.LiveInstanceChangeListener;
import org.apache.helix.NotificationContext;
import org.apache.helix.PropertyKey;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.task.TaskConstants;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestLiveInstance extends ZkUnitTestBase {
private final String clusterName = CLUSTER_PREFIX + "_" + getShortClassName();
@BeforeClass()
public void beforeClass() throws Exception {
_gSetupTool.addCluster(clusterName, true);
_gSetupTool
.addInstancesToCluster(clusterName, new String[] { "localhost:54321", "localhost:54322" });
}
@AfterClass()
public void afterClass() throws Exception {
deleteCluster(clusterName);
}
@Test
public void testDataChange() throws Exception {
// Create an admin and add LiveInstanceChange listener to it
HelixManager adminManager =
HelixManagerFactory.getZKHelixManager(clusterName, null, InstanceType.ADMINISTRATOR,
ZK_ADDR);
adminManager.connect();
final BlockingQueue<List<LiveInstance>> changeList =
new LinkedBlockingQueue<List<LiveInstance>>();
adminManager.addLiveInstanceChangeListener(new LiveInstanceChangeListener() {
@Override
public void onLiveInstanceChange(List<LiveInstance> liveInstances,
NotificationContext changeContext) {
// The queue is basically unbounded, so shouldn't throw exception when calling
// "add".
changeList.add(deepCopy(liveInstances));
}
});
// Check the initial condition
List<LiveInstance> instances = changeList.poll(1, TimeUnit.SECONDS);
Assert.assertNotNull(instances, "Expecting a list of live instance");
Assert.assertTrue(instances.isEmpty(), "Expecting an empty list of live instance");
// Join as participant, should trigger a live instance change event
HelixManager manager =
HelixManagerFactory.getZKHelixManager(clusterName, "localhost_54321",
InstanceType.PARTICIPANT, ZK_ADDR);
manager.connect();
instances = changeList.poll(1, TimeUnit.SECONDS);
Assert.assertNotNull(instances, "Expecting a list of live instance");
Assert.assertEquals(instances.size(), 1, "Expecting one live instance");
Assert.assertEquals(instances.get(0).getInstanceName(), manager.getInstanceName());
// Update data in the live instance node, should trigger another live instance change
// event
HelixDataAccessor helixDataAccessor = manager.getHelixDataAccessor();
PropertyKey propertyKey =
helixDataAccessor.keyBuilder().liveInstance(manager.getInstanceName());
LiveInstance instance = helixDataAccessor.getProperty(propertyKey);
Map<String, String> map = new TreeMap<String, String>();
map.put("k1", "v1");
instance.getRecord().setMapField("test", map);
Assert.assertTrue(helixDataAccessor.updateProperty(propertyKey, instance),
"Failed to update live instance node");
instances = changeList.poll(1, TimeUnit.SECONDS);
Assert.assertNotNull(instances, "Expecting a list of live instance");
Assert.assertEquals(instances.get(0).getRecord().getMapField("test"), map, "Wrong map data.");
manager.disconnect();
Thread.sleep(1000); // wait for callback finish
instances = changeList.poll(1, TimeUnit.SECONDS);
Assert.assertNotNull(instances, "Expecting a list of live instance");
Assert.assertTrue(instances.isEmpty(), "Expecting an empty list of live instance");
adminManager.disconnect();
}
private List<LiveInstance> deepCopy(List<LiveInstance> instances) {
List<LiveInstance> result = new ArrayList<LiveInstance>();
for (LiveInstance instance : instances) {
result.add(new LiveInstance(instance.getRecord()));
}
return result;
}
@Test(dependsOnMethods = "testDataChange")
public void testGetCurrentTaskThreadPoolSize() {
LiveInstance testLiveInstance = new LiveInstance("testId");
testLiveInstance.getRecord()
.setIntField(LiveInstance.LiveInstanceProperty.CURRENT_TASK_THREAD_POOL_SIZE.name(), 100);
Assert.assertEquals(testLiveInstance.getCurrentTaskThreadPoolSize(), 100);
}
@Test(dependsOnMethods = "testGetCurrentTaskThreadPoolSize")
public void testGetCurrentTaskThreadPoolSizeDefault() {
LiveInstance testLiveInstance = new LiveInstance("testId");
Assert.assertEquals(testLiveInstance.getCurrentTaskThreadPoolSize(), TaskConstants.DEFAULT_TASK_THREAD_POOL_SIZE);
}
@Test(dependsOnMethods = "testGetCurrentTaskThreadPoolSizeDefault")
public void testSetCurrentTaskThreadPoolSize() {
LiveInstance testLiveInstance = new LiveInstance("testId");
testLiveInstance.setCurrentTaskThreadPoolSize(100);
Assert.assertEquals(testLiveInstance.getCurrentTaskThreadPoolSize(), 100);
}
@Test
public void testLiveInstanceStatus() {
LiveInstance testLiveInstance = new LiveInstance("testLiveInstanceStatus");
Assert.assertNull(testLiveInstance.getRecord()
.getSimpleField(LiveInstance.LiveInstanceProperty.STATUS.name()));
Assert.assertEquals(testLiveInstance.getStatus(), LiveInstance.LiveInstanceStatus.NORMAL);
testLiveInstance.setStatus(LiveInstance.LiveInstanceStatus.FROZEN);
Assert.assertEquals(testLiveInstance.getStatus(), LiveInstance.LiveInstanceStatus.FROZEN);
}
}
| 9,856 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/model/TestResourceConfig.java | package org.apache.helix.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestResourceConfig {
private static final ObjectMapper _objectMapper = new ObjectMapper();
@Test
public void testGetPartitionCapacityMap() throws IOException {
Map<String, Integer> capacityDataMap = ImmutableMap.of("item1", 1,
"item2", 2,
"item3", 3);
ZNRecord rec = new ZNRecord("testId");
rec.setMapField(ResourceConfig.ResourceConfigProperty.PARTITION_CAPACITY_MAP.name(), Collections
.singletonMap(ResourceConfig.DEFAULT_PARTITION_KEY,
_objectMapper.writeValueAsString(capacityDataMap)));
ResourceConfig testConfig = new ResourceConfig(rec);
Assert.assertTrue(testConfig.getPartitionCapacityMap().get(ResourceConfig.DEFAULT_PARTITION_KEY)
.equals(capacityDataMap));
}
@Test
public void testGetPartitionCapacityMapEmpty() throws IOException {
ResourceConfig testConfig = new ResourceConfig("testId");
Assert.assertTrue(testConfig.getPartitionCapacityMap().equals(Collections.emptyMap()));
}
@Test(expectedExceptions = IOException.class)
public void testGetPartitionCapacityMapInvalidJson() throws IOException {
ZNRecord rec = new ZNRecord("testId");
rec.setMapField(ResourceConfig.ResourceConfigProperty.PARTITION_CAPACITY_MAP.name(),
Collections.singletonMap("test", "gibberish"));
ResourceConfig testConfig = new ResourceConfig(rec);
testConfig.getPartitionCapacityMap();
}
@Test(dependsOnMethods = "testGetPartitionCapacityMap", expectedExceptions = IOException.class)
public void testGetPartitionCapacityMapInvalidJsonType() throws IOException {
Map<String, String> capacityDataMap = ImmutableMap.of("item1", "1",
"item2", "2",
"item3", "three");
ZNRecord rec = new ZNRecord("testId");
rec.setMapField(ResourceConfig.ResourceConfigProperty.PARTITION_CAPACITY_MAP.name(), Collections
.singletonMap(ResourceConfig.DEFAULT_PARTITION_KEY,
_objectMapper.writeValueAsString(capacityDataMap)));
ResourceConfig testConfig = new ResourceConfig(rec);
testConfig.getPartitionCapacityMap();
}
@Test
public void testSetPartitionCapacityMap() throws IOException {
Map<String, Integer> capacityDataMap = ImmutableMap.of("item1", 1,
"item2", 2,
"item3", 3);
ResourceConfig testConfig = new ResourceConfig("testConfig");
testConfig.setPartitionCapacityMap(
Collections.singletonMap(ResourceConfig.DEFAULT_PARTITION_KEY, capacityDataMap));
Assert.assertEquals(testConfig.getRecord().getMapField(ResourceConfig.ResourceConfigProperty.
PARTITION_CAPACITY_MAP.name()).get(ResourceConfig.DEFAULT_PARTITION_KEY),
_objectMapper.writeValueAsString(capacityDataMap));
}
@Test
public void testSetMultiplePartitionCapacityMap() throws IOException {
Map<String, Integer> capacityDataMap = ImmutableMap.of("item1", 1,
"item2", 2,
"item3", 3);
Map<String, Map<String, Integer>> totalCapacityMap =
ImmutableMap.of(ResourceConfig.DEFAULT_PARTITION_KEY, capacityDataMap,
"partition2", capacityDataMap,
"partition3", capacityDataMap);
ResourceConfig testConfig = new ResourceConfig("testConfig");
testConfig.setPartitionCapacityMap(totalCapacityMap);
Assert.assertNull(testConfig.getRecord().getMapField(ResourceConfig.ResourceConfigProperty.
PARTITION_CAPACITY_MAP.name()).get("partition1"));
Assert.assertEquals(testConfig.getRecord().getMapField(ResourceConfig.ResourceConfigProperty.
PARTITION_CAPACITY_MAP.name()).get(ResourceConfig.DEFAULT_PARTITION_KEY),
_objectMapper.writeValueAsString(capacityDataMap));
Assert.assertEquals(testConfig.getRecord().getMapField(ResourceConfig.ResourceConfigProperty.
PARTITION_CAPACITY_MAP.name()).get("partition2"),
_objectMapper.writeValueAsString(capacityDataMap));
Assert.assertEquals(testConfig.getRecord().getMapField(ResourceConfig.ResourceConfigProperty.
PARTITION_CAPACITY_MAP.name()).get("partition3"),
_objectMapper.writeValueAsString(capacityDataMap));
}
@Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Capacity Data is empty")
public void testSetPartitionCapacityMapEmpty() throws IOException {
Map<String, Integer> capacityDataMap = new HashMap<>();
ResourceConfig testConfig = new ResourceConfig("testConfig");
testConfig.setPartitionCapacityMap(
Collections.singletonMap(ResourceConfig.DEFAULT_PARTITION_KEY, capacityDataMap));
}
@Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "The default partition capacity with the default key DEFAULT is required.")
public void testSetPartitionCapacityMapWithoutDefault() throws IOException {
Map<String, Integer> capacityDataMap = new HashMap<>();
ResourceConfig testConfig = new ResourceConfig("testConfig");
testConfig.setPartitionCapacityMap(
Collections.singletonMap("Random", capacityDataMap));
}
@Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Capacity Data contains a negative value:.+")
public void testSetPartitionCapacityMapInvalid() throws IOException {
Map<String, Integer> capacityDataMap = ImmutableMap.of("item1", 1,
"item2", 2,
"item3", -3);
ResourceConfig testConfig = new ResourceConfig("testConfig");
testConfig.setPartitionCapacityMap(
Collections.singletonMap(ResourceConfig.DEFAULT_PARTITION_KEY, capacityDataMap));
}
@Test
public void testWithResourceBuilder() throws IOException {
Map<String, Integer> capacityDataMap = ImmutableMap.of("item1", 1,
"item2", 2,
"item3", 3);
ResourceConfig.Builder builder = new ResourceConfig.Builder("testConfig");
builder.setPartitionCapacity(capacityDataMap);
builder.setPartitionCapacity("partition1", capacityDataMap);
Assert.assertEquals(
builder.build().getPartitionCapacityMap().get(ResourceConfig.DEFAULT_PARTITION_KEY),
capacityDataMap);
Assert.assertEquals(
builder.build().getPartitionCapacityMap().get("partition1"),
capacityDataMap);
Assert.assertNull(
builder.build().getPartitionCapacityMap().get("Random"));
}
@Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "The default partition capacity with the default key DEFAULT is required.")
public void testWithResourceBuilderInvalidInput() {
Map<String, Integer> capacityDataMap = ImmutableMap.of("item1", 1,
"item2", 2,
"item3", 3);
ResourceConfig.Builder builder = new ResourceConfig.Builder("testConfig");
builder.setPartitionCapacity("Random", capacityDataMap);
builder.build();
}
@Test
public void testMergeWithIdealState() {
// Test failure case
ResourceConfig testConfig = new ResourceConfig("testResource");
IdealState testIdealState = new IdealState("DifferentState");
try {
ResourceConfig.mergeIdealStateWithResourceConfig(testConfig, testIdealState);
Assert.fail("Should not be able merge with a IdealState of different resource.");
} catch (IllegalArgumentException ex) {
// expected
}
testIdealState = new IdealState("testResource");
testIdealState.setInstanceGroupTag("testISGroup");
testIdealState.setMaxPartitionsPerInstance(1);
testIdealState.setNumPartitions(1);
testIdealState.setStateModelDefRef("testISDef");
testIdealState.setStateModelFactoryName("testISFactory");
testIdealState.setReplicas("3");
testIdealState.setMinActiveReplicas(1);
testIdealState.enable(true);
testIdealState.setResourceGroupName("testISGroup");
testIdealState.setResourceType("ISType");
testIdealState.setDisableExternalView(false);
testIdealState.setDelayRebalanceEnabled(true);
// Test IdealState info overriding the empty config fields.
ResourceConfig mergedResourceConfig =
ResourceConfig.mergeIdealStateWithResourceConfig(null, testIdealState);
Assert.assertEquals(mergedResourceConfig.getInstanceGroupTag(),
testIdealState.getInstanceGroupTag());
Assert.assertEquals(mergedResourceConfig.getMaxPartitionsPerInstance(),
testIdealState.getMaxPartitionsPerInstance());
Assert.assertEquals(mergedResourceConfig.getNumPartitions(), testIdealState.getNumPartitions());
Assert.assertEquals(mergedResourceConfig.getStateModelDefRef(),
testIdealState.getStateModelDefRef());
Assert.assertEquals(mergedResourceConfig.getStateModelFactoryName(),
testIdealState.getStateModelFactoryName());
Assert.assertEquals(mergedResourceConfig.getNumReplica(), testIdealState.getReplicas());
Assert.assertEquals(mergedResourceConfig.getMinActiveReplica(),
testIdealState.getMinActiveReplicas());
Assert
.assertEquals(mergedResourceConfig.isEnabled().booleanValue(), testIdealState.isEnabled());
Assert.assertEquals(mergedResourceConfig.getResourceGroupName(),
testIdealState.getResourceGroupName());
Assert.assertEquals(mergedResourceConfig.getResourceType(), testIdealState.getResourceType());
Assert.assertEquals(mergedResourceConfig.isExternalViewDisabled().booleanValue(),
testIdealState.isExternalViewDisabled());
Assert.assertEquals(Boolean.valueOf(mergedResourceConfig
.getSimpleConfig(ResourceConfig.ResourceConfigProperty.DELAY_REBALANCE_ENABLED.name()))
.booleanValue(), testIdealState.isDelayRebalanceEnabled());
// Test priority, Resource Config field has higher priority.
ResourceConfig.Builder configBuilder = new ResourceConfig.Builder("testResource");
configBuilder.setInstanceGroupTag("testRCGroup");
configBuilder.setMaxPartitionsPerInstance(2);
configBuilder.setNumPartitions(2);
configBuilder.setStateModelDefRef("testRCDef");
configBuilder.setStateModelFactoryName("testRCFactory");
configBuilder.setNumReplica("4");
configBuilder.setMinActiveReplica(2);
configBuilder.setHelixEnabled(false);
configBuilder.setResourceGroupName("testRCGroup");
configBuilder.setResourceType("RCType");
configBuilder.setExternalViewDisabled(true);
testConfig = configBuilder.build();
mergedResourceConfig =
ResourceConfig.mergeIdealStateWithResourceConfig(testConfig, testIdealState);
Assert
.assertEquals(mergedResourceConfig.getInstanceGroupTag(), testConfig.getInstanceGroupTag());
Assert.assertEquals(mergedResourceConfig.getMaxPartitionsPerInstance(),
testConfig.getMaxPartitionsPerInstance());
Assert.assertEquals(mergedResourceConfig.getNumPartitions(), testConfig.getNumPartitions());
Assert
.assertEquals(mergedResourceConfig.getStateModelDefRef(), testConfig.getStateModelDefRef());
Assert.assertEquals(mergedResourceConfig.getStateModelFactoryName(),
testConfig.getStateModelFactoryName());
Assert.assertEquals(mergedResourceConfig.getNumReplica(), testConfig.getNumReplica());
Assert
.assertEquals(mergedResourceConfig.getMinActiveReplica(), testConfig.getMinActiveReplica());
Assert.assertEquals(mergedResourceConfig.isEnabled(), testConfig.isEnabled());
Assert.assertEquals(mergedResourceConfig.getResourceGroupName(),
testConfig.getResourceGroupName());
Assert.assertEquals(mergedResourceConfig.getResourceType(), testConfig.getResourceType());
Assert.assertEquals(mergedResourceConfig.isExternalViewDisabled(),
testConfig.isExternalViewDisabled());
}
}
| 9,857 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/model/TestParticipantHistory.java | package org.apache.helix.model;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.TimeZone;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestParticipantHistory {
@Test
public void testGetLastTimeInOfflineHistory() {
ParticipantHistory participantHistory = new ParticipantHistory("testId");
long currentTimeMillis = System.currentTimeMillis();
List<String> offlineHistory = new ArrayList<>();
DateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss:SSS");
df.setTimeZone(TimeZone.getTimeZone("UTC"));
String dateTime = df.format(new Date(currentTimeMillis));
offlineHistory.add(dateTime);
participantHistory.getRecord()
.setListField(ParticipantHistory.ConfigProperty.OFFLINE.name(), offlineHistory);
Assert.assertEquals(participantHistory.getLastTimeInOfflineHistory(), currentTimeMillis);
}
@Test
public void testGetLastTimeInOfflineHistoryNoRecord() {
ParticipantHistory participantHistory = new ParticipantHistory("testId");
Assert.assertEquals(participantHistory.getLastTimeInOfflineHistory(), -1);
}
@Test
public void testGetLastTimeInOfflineHistoryWrongFormat() {
ParticipantHistory participantHistory = new ParticipantHistory("testId");
List<String> offlineHistory = new ArrayList<>();
offlineHistory.add("Wrong Format");
participantHistory.getRecord()
.setListField(ParticipantHistory.ConfigProperty.OFFLINE.name(), offlineHistory);
Assert.assertEquals(participantHistory.getLastTimeInOfflineHistory(), -1);
}
@Test
public void testParseSessionHistoryStringToMap() {
// Test for normal use case
ParticipantHistory participantHistory = new ParticipantHistory("testId");
participantHistory.reportOnline("testSessionId", "testVersion");
String sessionString = participantHistory.getRecord()
.getListField(ParticipantHistory.ConfigProperty.HISTORY.name()).get(0);
Map<String, String> sessionMap =
ParticipantHistory.sessionHistoryStringToMap(sessionString);
Assert.assertEquals(sessionMap.get(ParticipantHistory.ConfigProperty.SESSION.name()),
"testSessionId");
Assert.assertEquals(sessionMap.get(ParticipantHistory.ConfigProperty.VERSION.name()),
"testVersion");
// Test for error resistance
sessionMap = ParticipantHistory
.sessionHistoryStringToMap("{TEST_FIELD_ONE=X, 12345, TEST_FIELD_TWO=Y=Z}");
Assert.assertEquals(sessionMap.get("TEST_FIELD_ONE"), "X");
Assert.assertEquals(sessionMap.get("TEST_FIELD_TWO"), "Y");
}
@Test
public void testGetHistoryTimestampsAsMilliseconds() {
ParticipantHistory participantHistory = new ParticipantHistory("testId");
List<String> historyList = new ArrayList<>();
historyList.add(
"{DATE=2020-08-27T09:25:39:767, VERSION=1.0.0.61, SESSION=AAABBBCCC, TIME=1598520339767}");
historyList
.add("{DATE=2020-08-27T09:25:39:767, VERSION=1.0.0.61, SESSION=AAABBBCCC, TIME=ABCDE}");
historyList.add("{DATE=2020-08-27T09:25:39:767, VERSION=1.0.0.61, SESSION=AAABBBCCC}");
participantHistory.getRecord()
.setListField(ParticipantHistory.ConfigProperty.HISTORY.name(), historyList);
Assert.assertEquals(participantHistory.getOnlineTimestampsAsMilliseconds(),
Collections.singletonList(1598520339767L));
}
@Test
public void testGetOfflineTimestampsAsMilliseconds() {
ParticipantHistory participantHistory = new ParticipantHistory("testId");
List<String> offlineList = new ArrayList<>();
long currentTimeMillis = System.currentTimeMillis();
DateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss:SSS");
df.setTimeZone(TimeZone.getTimeZone("UTC"));
String dateTime = df.format(new Date(currentTimeMillis));
offlineList.add(dateTime);
offlineList.add("WRONG FORMAT");
participantHistory.getRecord()
.setListField(ParticipantHistory.ConfigProperty.OFFLINE.name(), offlineList);
Assert.assertEquals(participantHistory.getOfflineTimestampsAsMilliseconds(),
Collections.singletonList(currentTimeMillis));
}
}
| 9,858 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/model/TestCustomizedStateConfig.java | package org.apache.helix.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.TestHelper;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import java.util.List;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestCustomizedStateConfig extends ZkUnitTestBase {
@Test(expectedExceptions = HelixException.class)
public void TestCustomizedStateConfigNonExistentCluster() {
String className = getShortClassName();
String clusterName = "CLUSTER_" + className;
// Read CustomizedStateConfig from Zookeeper and get exception since cluster in not setup yet
ConfigAccessor _configAccessor = new ConfigAccessor(ZK_ADDR);
CustomizedStateConfig customizedStateConfig =
_configAccessor.getCustomizedStateConfig(clusterName);
}
@Test(dependsOnMethods = "TestCustomizedStateConfigNonExistentCluster")
public void testCustomizedStateConfigNull() {
String className = getShortClassName();
String clusterName = "CLUSTER_" + className;
TestHelper.setupEmptyCluster(_gZkClient, clusterName);
// Read CustomizedStateConfig from Zookeeper
ConfigAccessor _configAccessor = new ConfigAccessor(ZK_ADDR);
CustomizedStateConfig customizedStateConfigFromZk =
_configAccessor.getCustomizedStateConfig(clusterName);
Assert.assertNull(customizedStateConfigFromZk);
}
@Test(dependsOnMethods = "testCustomizedStateConfigNull")
public void testCustomizedStateConfig() {
String className = getShortClassName();
String clusterName = "CLUSTER_" + className;
TestHelper.setupEmptyCluster(_gZkClient, clusterName);
// Create dummy CustomizedStateConfig object
CustomizedStateConfig.Builder customizedStateConfigBuilder =
new CustomizedStateConfig.Builder();
List<String> aggregationEnabledTypes = new ArrayList<String>();
aggregationEnabledTypes.add("mockType1");
aggregationEnabledTypes.add("mockType2");
customizedStateConfigBuilder.setAggregationEnabledTypes(aggregationEnabledTypes);
CustomizedStateConfig customizedStateConfig =
customizedStateConfigBuilder.build();
// Write the CustomizedStateConfig to Zookeeper
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(ZK_ADDR));
Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.customizedStateConfig(),
customizedStateConfig);
// Read CustomizedStateConfig from Zookeeper and check the content
ConfigAccessor _configAccessor = new ConfigAccessor(ZK_ADDR);
CustomizedStateConfig customizedStateConfigFromZk =
_configAccessor.getCustomizedStateConfig(clusterName);
Assert.assertEquals(customizedStateConfigFromZk.getAggregationEnabledTypes().size(),
2);
Assert.assertEquals(aggregationEnabledTypes.get(0), "mockType1");
Assert.assertEquals(aggregationEnabledTypes.get(1), "mockType2");
}
@Test(dependsOnMethods = "testCustomizedStateConfig")
public void testCustomizedStateConfigBuilder() {
String className = getShortClassName();
String clusterName = "CLUSTER_" + className;
TestHelper.setupEmptyCluster(_gZkClient, clusterName);
CustomizedStateConfig.Builder builder =
new CustomizedStateConfig.Builder();
builder.addAggregationEnabledType("mockType1");
builder.addAggregationEnabledType("mockType2");
// Check builder getter methods
List<String> aggregationEnabledTypes = builder.getAggregationEnabledTypes();
Assert.assertEquals(aggregationEnabledTypes.size(), 2);
Assert.assertEquals(aggregationEnabledTypes.get(0), "mockType1");
Assert.assertEquals(aggregationEnabledTypes.get(1), "mockType2");
CustomizedStateConfig customizedStateConfig = builder.build();
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(ZK_ADDR));
Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.customizedStateConfig(),
customizedStateConfig);
// Read CustomizedStateConfig from Zookeeper and check the content
ConfigAccessor _configAccessor = new ConfigAccessor(ZK_ADDR);
CustomizedStateConfig customizedStateConfigFromZk =
_configAccessor.getCustomizedStateConfig(clusterName);
List<String> aggregationEnabledTypesFromZk =
customizedStateConfigFromZk.getAggregationEnabledTypes();
Assert.assertEquals(aggregationEnabledTypesFromZk.get(0), "mockType1");
Assert.assertEquals(aggregationEnabledTypesFromZk.get(1), "mockType2");
}
}
| 9,859 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/model/TestStateTransitionProperty.java | package org.apache.helix.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.api.config.StateTransitionTimeoutConfig;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestStateTransitionProperty {
@Test
public void testTimeoutSetAndGet() {
StateTransitionTimeoutConfig stateTransitionTimeoutConfig = new StateTransitionTimeoutConfig(new ZNRecord("TEST"));
stateTransitionTimeoutConfig.setStateTransitionTimeout("MASTER", "SLAVE", 300);
Assert.assertEquals(stateTransitionTimeoutConfig.getStateTransitionTimeout("MASTER", "SLAVE"), 300);
stateTransitionTimeoutConfig.setStateTransitionTimeout("*", "MASTER", 500);
Assert.assertEquals(stateTransitionTimeoutConfig.getStateTransitionTimeout("OFFLINE", "MASTER"), 500);
Assert.assertEquals(stateTransitionTimeoutConfig.getStateTransitionTimeout("SLAVE", "OFFLINE"), -1);
}
}
| 9,860 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/model/TestControllerHistoryModel.java | package org.apache.helix.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.time.Instant;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.google.common.base.Splitter;
import org.apache.helix.TestHelper;
import org.apache.helix.api.status.ClusterManagementMode;
import org.apache.helix.zookeeper.zkclient.NetworkUtil;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestControllerHistoryModel {
@Test
public void testManagementModeHistory() {
ControllerHistory controllerHistory = new ControllerHistory("HISTORY");
String controller = "controller-0";
ClusterManagementMode mode = new ClusterManagementMode(ClusterManagementMode.Type.CLUSTER_FREEZE,
ClusterManagementMode.Status.COMPLETED);
long time = System.currentTimeMillis();
String fromHost = NetworkUtil.getLocalhostName();
String reason = TestHelper.getTestMethodName();
controllerHistory.updateManagementModeHistory(controller, mode, fromHost, time, reason);
List<String> historyList = controllerHistory.getManagementModeHistory();
String lastHistory = historyList.get(historyList.size() - 1);
Map<String, String> historyMap = stringToMap(lastHistory);
Map<String, String> expectedMap = new HashMap<>();
expectedMap.put("CONTROLLER", controller);
expectedMap.put("TIME", Instant.ofEpochMilli(time).toString());
expectedMap.put("MODE", mode.getMode().name());
expectedMap.put("STATUS", mode.getStatus().name());
expectedMap.put(PauseSignal.PauseSignalProperty.FROM_HOST.name(), fromHost);
expectedMap.put(PauseSignal.PauseSignalProperty.REASON.name(), reason);
Assert.assertEquals(historyMap, expectedMap);
// Add more than 10 entries, it should only keep the latest 10.
List<String> reasonList = new ArrayList<>();
for (int i = 0; i < 15; i++) {
String reasonI = reason + "-" + i;
controllerHistory.updateManagementModeHistory(controller, mode, fromHost, time, reasonI);
reasonList.add(reasonI);
}
historyList = controllerHistory.getManagementModeHistory();
Assert.assertEquals(historyList.size(), 10);
// Assert the history is the latest 10 entries.
int i = 5;
for (String entry : historyList) {
Map<String, String> actual = stringToMap(entry);
Assert.assertEquals(actual.get(PauseSignal.PauseSignalProperty.REASON.name()),
reasonList.get(i++));
}
}
/**
* Performs conversion from a map string into a map. The string was converted by map's toString().
*
* @param mapAsString A string that is converted by map's toString() method.
* Example: "{k1=v1, k2=v2}"
* @return Map<String, String>
*/
private static Map<String, String> stringToMap(String mapAsString) {
return Splitter.on(", ").withKeyValueSeparator('=')
.split(mapAsString.substring(1, mapAsString.length() - 1));
}
}
| 9,861 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/model/TestConstraint.java | package org.apache.helix.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.TestHelper;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.model.ClusterConstraints.ConstraintAttribute;
import org.apache.helix.model.ClusterConstraints.ConstraintType;
import org.apache.helix.model.Message.MessageType;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestConstraint extends ZkUnitTestBase {
@Test
public void testMsgConstraint() {
String className = getShortClassName();
System.out.println("START testMsgConstraint() at " + new Date(System.currentTimeMillis()));
String clusterName = "CLUSTER_" + className + "_msg";
TestHelper.setupEmptyCluster(_gZkClient, clusterName);
ZNRecord record = new ZNRecord("testMsgConstraint");
// constraint0:
// "MESSAGE_TYPE=STATE_TRANSITION,CONSTRAINT_VALUE=ANY"
record.setMapField("constraint0", new TreeMap<String, String>());
record.getMapField("constraint0").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint0").put("CONSTRAINT_VALUE", "ANY");
ConstraintItem constraint0 = new ConstraintItem(record.getMapField("constraint0"));
// constraint1:
// "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,CONSTRAINT_VALUE=ANY"
record.setMapField("constraint1", new TreeMap<String, String>());
record.getMapField("constraint1").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint1").put("TRANSITION", "OFFLINE-SLAVE");
record.getMapField("constraint1").put("CONSTRAINT_VALUE", "50");
ConstraintItem constraint1 = new ConstraintItem(record.getMapField("constraint1"));
// constraint2:
// "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,INSTANCE=.*,RESOURCE=TestDB,CONSTRAINT_VALUE=2";
record.setMapField("constraint2", new TreeMap<String, String>());
record.getMapField("constraint2").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint2").put("TRANSITION", "OFFLINE-SLAVE");
record.getMapField("constraint2").put("INSTANCE", ".*");
record.getMapField("constraint2").put("RESOURCE", "TestDB");
record.getMapField("constraint2").put("CONSTRAINT_VALUE", "2");
ConstraintItem constraint2 = new ConstraintItem(record.getMapField("constraint2"));
// constraint3:
// "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,INSTANCE=localhost_12918,RESOURCE=.*,CONSTRAINT_VALUE=1";
record.setMapField("constraint3", new TreeMap<String, String>());
record.getMapField("constraint3").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint3").put("TRANSITION", "OFFLINE-SLAVE");
record.getMapField("constraint3").put("INSTANCE", "localhost_12919");
record.getMapField("constraint3").put("RESOURCE", ".*");
record.getMapField("constraint3").put("CONSTRAINT_VALUE", "1");
ConstraintItem constraint3 = new ConstraintItem(record.getMapField("constraint3"));
// constraint4:
// "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,INSTANCE=.*,RESOURCE=.*,CONSTRAINT_VALUE=10"
record.setMapField("constraint4", new TreeMap<String, String>());
record.getMapField("constraint4").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint4").put("TRANSITION", "OFFLINE-SLAVE");
record.getMapField("constraint4").put("INSTANCE", ".*");
record.getMapField("constraint4").put("RESOURCE", ".*");
record.getMapField("constraint4").put("CONSTRAINT_VALUE", "10");
ConstraintItem constraint4 = new ConstraintItem(record.getMapField("constraint4"));
// constraint5:
// "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,INSTANCE=localhost_12918,RESOURCE=TestDB,CONSTRAINT_VALUE=5"
record.setMapField("constraint5", new TreeMap<String, String>());
record.getMapField("constraint5").put("MESSAGE_TYPE", "STATE_TRANSITION");
record.getMapField("constraint5").put("TRANSITION", "OFFLINE-SLAVE");
record.getMapField("constraint5").put("INSTANCE", "localhost_12918");
record.getMapField("constraint5").put("RESOURCE", "TestDB");
record.getMapField("constraint5").put("CONSTRAINT_VALUE", "5");
ConstraintItem constraint5 = new ConstraintItem(record.getMapField("constraint5"));
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_gZkClient));
Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.constraint(ConstraintType.MESSAGE_CONSTRAINT.toString()),
new ClusterConstraints(record));
record =
accessor.getProperty(keyBuilder.constraint(ConstraintType.MESSAGE_CONSTRAINT.toString()))
.getRecord();
ClusterConstraints constraint = new ClusterConstraints(record);
// System.out.println("constraint: " + constraint);
// message1
Message msg1 =
createMessage(MessageType.STATE_TRANSITION, "msgId-001", "OFFLINE", "SLAVE", "TestDB",
"localhost_12918");
Map<ConstraintAttribute, String> msgAttr = ClusterConstraints.toConstraintAttributes(msg1);
Set<ConstraintItem> matches = constraint.match(msgAttr);
System.out.println(msg1 + " matches(" + matches.size() + "): " + matches);
Assert.assertEquals(matches.size(), 5);
Assert.assertTrue(contains(matches, constraint0));
Assert.assertTrue(contains(matches, constraint1));
Assert.assertTrue(contains(matches, constraint2));
Assert.assertTrue(contains(matches, constraint4));
Assert.assertTrue(contains(matches, constraint5));
// message2
Message msg2 =
createMessage(MessageType.STATE_TRANSITION, "msgId-002", "OFFLINE", "SLAVE", "TestDB",
"localhost_12919");
msgAttr = ClusterConstraints.toConstraintAttributes(msg2);
matches = constraint.match(msgAttr);
System.out.println(msg2 + " matches(" + matches.size() + "): " + matches);
Assert.assertEquals(matches.size(), 5);
Assert.assertTrue(contains(matches, constraint0));
Assert.assertTrue(contains(matches, constraint1));
Assert.assertTrue(contains(matches, constraint2));
Assert.assertTrue(contains(matches, constraint3));
Assert.assertTrue(contains(matches, constraint4));
System.out.println("END testMsgConstraint() at " + new Date(System.currentTimeMillis()));
}
@Test
public void testStateConstraint() {
String className = getShortClassName();
System.out.println("START testStateConstraint() at " + new Date(System.currentTimeMillis()));
String clusterName = "CLUSTER_" + className + "_state";
TestHelper.setupEmptyCluster(_gZkClient, clusterName);
ZNRecord record = new ZNRecord("testStateConstraint");
// constraint0:
// "STATE=MASTER,CONSTRAINT_VALUE=1"
record.setMapField("constraint0", new TreeMap<String, String>());
record.getMapField("constraint0").put("STATE", "MASTER");
record.getMapField("constraint0").put("CONSTRAINT_VALUE", "1");
ConstraintItem constraint0 = new ConstraintItem(record.getMapField("constraint0"));
// constraint1:
// "STATE=MASTER,RESOURCE=TestDB,CONSTRAINT_VALUE=5"
record.setMapField("constraint1", new TreeMap<String, String>());
record.getMapField("constraint1").put("STATE", "MASTER");
record.getMapField("constraint1").put("RESOURCE", "TestDB");
record.getMapField("constraint1").put("CONSTRAINT_VALUE", "1");
ConstraintItem constraint1 = new ConstraintItem(record.getMapField("constraint1"));
// constraint2:
// "STATE=MASTER,RESOURCE=.*,CONSTRAINT_VALUE=2"
record.setMapField("constraint2", new TreeMap<String, String>());
record.getMapField("constraint2").put("STATE", "MASTER");
record.getMapField("constraint2").put("RESOURCE", ".*");
record.getMapField("constraint2").put("CONSTRAINT_VALUE", "2");
ConstraintItem constraint2 = new ConstraintItem(record.getMapField("constraint2"));
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_gZkClient));
Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.constraint(ConstraintType.STATE_CONSTRAINT.toString()),
new ClusterConstraints(record));
record =
accessor.getProperty(keyBuilder.constraint(ConstraintType.STATE_CONSTRAINT.toString()))
.getRecord();
ClusterConstraints constraint = new ClusterConstraints(record);
// System.out.println("constraint: " + constraint);
// state1: hit rule2
Map<ConstraintAttribute, String> stateAttr1 = new HashMap<ConstraintAttribute, String>();
stateAttr1.put(ConstraintAttribute.STATE, "MASTER");
stateAttr1.put(ConstraintAttribute.RESOURCE, "TestDB");
Set<ConstraintItem> matches = constraint.match(stateAttr1);
System.out.println(stateAttr1 + " matches(" + matches.size() + "): " + matches);
Assert.assertEquals(matches.size(), 3);
Assert.assertTrue(contains(matches, constraint0));
Assert.assertTrue(contains(matches, constraint1));
Assert.assertTrue(contains(matches, constraint2));
// matches = selectConstraints(matches, stateAttr1);
// System.out.println(stateAttr1 + " matches(" + matches.size() + "): " + matches);
// Assert.assertEquals(matches.size(), 2);
// Assert.assertTrue(contains(matches, constraint0));
// Assert.assertTrue(contains(matches, constraint1));
// state2: not hit any rules
Map<ConstraintAttribute, String> stateAttr2 = new HashMap<ConstraintAttribute, String>();
stateAttr2.put(ConstraintAttribute.STATE, "MASTER");
stateAttr2.put(ConstraintAttribute.RESOURCE, "MyDB");
matches = constraint.match(stateAttr2);
System.out.println(stateAttr2 + " matches(" + matches.size() + "): " + matches);
Assert.assertEquals(matches.size(), 2);
Assert.assertTrue(contains(matches, constraint0));
Assert.assertTrue(contains(matches, constraint2));
// matches = selectConstraints(matches, stateAttr2);
// System.out.println(stateAttr2 + " matches(" + matches.size() + "): " + matches);
// Assert.assertEquals(matches.size(), 2);
// Assert.assertTrue(contains(matches, constraint0));
// Assert.assertTrue(contains(matches, constraint2));
deleteCluster(clusterName);
System.out.println("END testStateConstraint() at " + new Date(System.currentTimeMillis()));
}
private boolean contains(Set<ConstraintItem> constraints, ConstraintItem constraint) {
for (ConstraintItem item : constraints) {
if (item.toString().equals(constraint.toString())) {
return true;
}
}
return false;
}
}
| 9,862 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/model/TestClusterTrie.java | package org.apache.helix.model;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.helix.HelixException;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestClusterTrie {
private ClusterTrie _trie;
final List<String> _instanceNames = new ArrayList<>();
final Map<String, InstanceConfig> _instanceConfigMap = new HashMap<>();
private ClusterConfig _clusterConfig;
final int _numOfNodes = 40;
@BeforeClass
public void beforeClass() {
for (int i = 0; i < _numOfNodes; i++) {
_instanceNames.add(String.valueOf(i));
}
createClusterConfig();
createInstanceConfigMap();
}
@Test
public void testConstructionMissingInstanceConfigMap() {
Map<String, InstanceConfig> emptyMap = new HashMap<>();
try {
new ClusterTrie(_instanceNames, emptyMap, _clusterConfig);
Assert.fail("Expecting instance config not found exception");
} catch (HelixException e) {
Assert.assertTrue(e.getMessage().contains("is not found!"));
}
}
@Test
public void testConstructionMissingTopology() {
_clusterConfig.setTopology(null);
try {
new ClusterTrie(_instanceNames, _instanceConfigMap, _clusterConfig);
Assert.fail("Expecting topology not set exception");
} catch (HelixException e) {
Assert.assertTrue(e.getMessage().contains("is invalid!"));
}
_clusterConfig.setTopology("/group/zone/rack/host");
}
@Test
public void testConstructionInvalidTopology() {
_clusterConfig.setTopology("invalidTopology");
try {
new ClusterTrie(_instanceNames, _instanceConfigMap, _clusterConfig);
Assert.fail("Expecting topology invalid exception");
} catch (HelixException e) {
Assert.assertTrue(e.getMessage().contains("is invalid!"));
}
_clusterConfig.setTopology("/group/zone/rack/host");
}
@Test
public void testConstructionNormal() {
try {
_trie = new ClusterTrie(_instanceNames, _instanceConfigMap, _clusterConfig);
} catch (HelixException e) {
Assert.fail("Not expecting HelixException");
}
}
@Test
public void testConstructionNormalWithSpace() {
_clusterConfig.setTopology("/ group/ zone/rack/host");
try {
_trie = new ClusterTrie(_instanceNames, _instanceConfigMap, _clusterConfig);
} catch (HelixException e) {
Assert.fail("Not expecting HelixException");
}
String[] topologyDef = _trie.getTopologyKeys();
Assert.assertEquals(topologyDef[0], "group");
Assert.assertEquals(topologyDef[1], "zone");
_clusterConfig.setTopology("/group/zone/rack/host");
}
@Test
public void testConstructionNormalWithInvalidConfig() {
String instance = "invalidInstance";
InstanceConfig config = new InstanceConfig(instance);
config.setDomain(String.format("invaliddomain=%s, zone=%s, rack=%s, host=%s", 1, 2, 3, 4));
_instanceConfigMap.put(instance, config);
try {
_trie = new ClusterTrie(_instanceNames, _instanceConfigMap, _clusterConfig);
} catch (HelixException e) {
Assert.fail("Not expecting HelixException");
}
Assert.assertEquals(_trie.getInvalidInstances().size(), 1);
Assert.assertEquals(_trie.getInvalidInstances().get(0), instance );
_instanceConfigMap.remove(instance);
}
private void createInstanceConfigMap() {
for (int i = 0; i < _instanceNames.size(); i++) {
String instance = _instanceNames.get(i);
InstanceConfig config = new InstanceConfig(instance);
// create 2 groups, 4 zones, and 4 racks.
config.setDomain(String.format("group=%s, zone=%s, rack=%s, host=%s", i % (_numOfNodes / 10),
i % (_numOfNodes / 5), i % (_numOfNodes / 5), instance));
_instanceConfigMap.put(instance, config);
}
}
private void createClusterConfig() {
_clusterConfig = new ClusterConfig("test");
_clusterConfig.setTopologyAwareEnabled(true);
_clusterConfig.setTopology("/group/zone/rack/host");
_clusterConfig.setFaultZoneType("rack");
}
} | 9,863 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/model | Create_ds/helix/helix-core/src/test/java/org/apache/helix/model/cloud/TestCloudConfig.java | package org.apache.helix.model.cloud;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.List;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.TestHelper;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.cloud.constants.CloudProvider;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.model.CloudConfig;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestCloudConfig extends ZkUnitTestBase {
@Test(expectedExceptions = HelixException.class)
public void testCloudConfigNonExistentCluster() {
String className = getShortClassName();
String clusterName = "CLUSTER_" + className;
// Read CloudConfig from Zookeeper and get exception since cluster in not setup yet
ConfigAccessor _configAccessor = new ConfigAccessor(_gZkClient);
CloudConfig cloudConfigFromZk = _configAccessor.getCloudConfig(clusterName);
}
@Test(dependsOnMethods = "testCloudConfigNonExistentCluster")
public void testCloudConfigNull() {
String className = getShortClassName();
String clusterName = "CLUSTER_" + className;
TestHelper.setupEmptyCluster(_gZkClient, clusterName);
// Read CloudConfig from Zookeeper
ConfigAccessor _configAccessor = new ConfigAccessor(_gZkClient);
CloudConfig cloudConfigFromZk = _configAccessor.getCloudConfig(clusterName);
// since CloudConfig is not written to ZooKeeper, the output should be null
Assert.assertNull(cloudConfigFromZk);
}
@Test(dependsOnMethods = "testCloudConfigNull")
public void testCloudConfig() throws Exception {
String className = getShortClassName();
String clusterName = "CLUSTER_" + className;
TestHelper.setupEmptyCluster(_gZkClient, clusterName);
// Create dummy CloudConfig object
CloudConfig.Builder cloudConfigBuilder = new CloudConfig.Builder();
cloudConfigBuilder.setCloudEnabled(true);
cloudConfigBuilder.setCloudProvider(CloudProvider.AZURE);
cloudConfigBuilder.setCloudID("TestID");
List<String> infoURL = new ArrayList<String>();
infoURL.add("TestURL");
cloudConfigBuilder.setCloudInfoSources(infoURL);
cloudConfigBuilder.setCloudInfoProcessorName("TestProcessor");
CloudConfig cloudConfig = cloudConfigBuilder.build();
// Write the CloudConfig to Zookeeper
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_gZkClient));
Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.cloudConfig(), cloudConfig);
// Read CloudConfig from Zookeeper and check the content
ConfigAccessor _configAccessor = new ConfigAccessor(_gZkClient);
CloudConfig cloudConfigFromZk = _configAccessor.getCloudConfig(clusterName);
Assert.assertTrue(cloudConfigFromZk.isCloudEnabled());
Assert.assertEquals(cloudConfigFromZk.getCloudProvider(), CloudProvider.AZURE.name());
Assert.assertEquals(cloudConfigFromZk.getCloudID(), "TestID");
Assert.assertEquals(cloudConfigFromZk.getCloudInfoSources().size(), 1);
Assert.assertEquals(cloudConfigFromZk.getCloudInfoProcessorName(), "TestProcessor");
}
@Test(expectedExceptions = HelixException.class)
public void testUnverifiedCloudConfigBuilder() {
String className = getShortClassName();
String clusterName = "CLUSTER_" + className;
CloudConfig.Builder builder = new CloudConfig.Builder();
builder.setCloudEnabled(true);
// Verify will fail because cloudID has net been defined.
CloudConfig cloudConfig = builder.build();
}
@Test(expectedExceptions = HelixException.class)
public void testUnverifiedCloudConfigBuilderEmptySources() {
String className = getShortClassName();
String clusterName = "CLUSTER_" + className;
CloudConfig.Builder builder = new CloudConfig.Builder();
builder.setCloudEnabled(true);
builder.setCloudProvider(CloudProvider.CUSTOMIZED);
builder.setCloudID("TestID");
List<String> emptyList = new ArrayList<String>();
builder.setCloudInfoSources(emptyList);
builder.setCloudInfoProcessorName("TestProcessor");
CloudConfig cloudConfig = builder.build();
}
@Test(expectedExceptions = HelixException.class)
public void testUnverifiedCloudConfigBuilderWithoutProcessor() {
CloudConfig.Builder builder = new CloudConfig.Builder();
builder.setCloudEnabled(true);
builder.setCloudProvider(CloudProvider.CUSTOMIZED);
builder.setCloudID("TestID");
List<String> testList = new ArrayList<String>();
builder.setCloudInfoSources(testList);
builder.addCloudInfoSource("TestURL");
CloudConfig cloudConfig = builder.build();
}
@Test(dependsOnMethods = "testCloudConfig")
public void testCloudConfigBuilder() {
String className = getShortClassName();
String clusterName = "CLUSTER_" + className;
TestHelper.setupEmptyCluster(_gZkClient, clusterName);
CloudConfig.Builder builder = new CloudConfig.Builder();
builder.setCloudEnabled(true);
builder.setCloudProvider(CloudProvider.CUSTOMIZED);
builder.setCloudID("TestID");
builder.addCloudInfoSource("TestURL0");
builder.addCloudInfoSource("TestURL1");
builder.setCloudInfoProcessorName("TestProcessor");
builder.setCloudInfoProcessorPackageName("org.apache.foo.bar");
// Check builder getter methods
Assert.assertTrue(builder.getCloudEnabled());
Assert.assertEquals(builder.getCloudProvider(), CloudProvider.CUSTOMIZED.name());
Assert.assertEquals(builder.getCloudID(), "TestID");
List<String> listUrlFromBuilder = builder.getCloudInfoSources();
Assert.assertEquals(listUrlFromBuilder.size(), 2);
Assert.assertEquals(listUrlFromBuilder.get(0), "TestURL0");
Assert.assertEquals(listUrlFromBuilder.get(1), "TestURL1");
Assert.assertEquals(builder.getCloudInfoProcessorName(), "TestProcessor");
CloudConfig cloudConfig = builder.build();
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_gZkClient));
Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.cloudConfig(), cloudConfig);
// Read CloudConfig from Zookeeper and check the content
ConfigAccessor _configAccessor = new ConfigAccessor(_gZkClient);
CloudConfig cloudConfigFromZk = _configAccessor.getCloudConfig(clusterName);
Assert.assertTrue(cloudConfigFromZk.isCloudEnabled());
Assert.assertEquals(cloudConfigFromZk.getCloudProvider(), CloudProvider.CUSTOMIZED.name());
Assert.assertEquals(cloudConfigFromZk.getCloudID(), "TestID");
List<String> listUrlFromZk = cloudConfigFromZk.getCloudInfoSources();
Assert.assertEquals(listUrlFromZk.get(0), "TestURL0");
Assert.assertEquals(listUrlFromZk.get(1), "TestURL1");
Assert.assertEquals(cloudConfigFromZk.getCloudInfoProcessorName(), "TestProcessor");
Assert.assertEquals(cloudConfigFromZk.getCloudInfoProcessorPackage(), "org.apache.foo.bar");
}
@Test(dependsOnMethods = "testCloudConfigBuilder")
public void testCloudConfigBuilderAzureProvider() {
String className = getShortClassName();
String clusterName = "CLUSTER_" + className;
TestHelper.setupEmptyCluster(_gZkClient, clusterName);
CloudConfig.Builder builder = new CloudConfig.Builder();
builder.setCloudEnabled(true);
builder.setCloudProvider(CloudProvider.AZURE);
builder.setCloudID("TestID");
// Check builder getter methods
Assert.assertTrue(builder.getCloudEnabled());
Assert.assertEquals(builder.getCloudProvider(), CloudProvider.AZURE.name());
CloudConfig cloudConfig = builder.build();
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_gZkClient));
Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.cloudConfig(), cloudConfig);
// Read CloudConfig from Zookeeper and check the content
ConfigAccessor _configAccessor = new ConfigAccessor(_gZkClient);
CloudConfig cloudConfigFromZk = _configAccessor.getCloudConfig(clusterName);
Assert.assertTrue(cloudConfigFromZk.isCloudEnabled());
Assert.assertEquals(cloudConfigFromZk.getCloudProvider(), CloudProvider.AZURE.name());
// Since user does not set the CloudInfoProcessorName, this field will be null.
Assert.assertNull(cloudConfigFromZk.getCloudInfoProcessorName());
// Checking the set method in CloudConfig
cloudConfig.setCloudEnabled(false);
accessor.setProperty(keyBuilder.cloudConfig(), cloudConfig);
cloudConfigFromZk = _configAccessor.getCloudConfig(clusterName);
Assert.assertFalse(cloudConfigFromZk.isCloudEnabled());
cloudConfig.setCloudEnabled(true);
cloudConfig.setCloudID("TestID2");
List<String> sourceList = new ArrayList<String>();
sourceList.add("TestURL0");
sourceList.add("TestURL1");
cloudConfig.setCloudInfoSource(sourceList);
accessor.setProperty(keyBuilder.cloudConfig(), cloudConfig);
cloudConfigFromZk = _configAccessor.getCloudConfig(clusterName);
Assert.assertTrue(cloudConfigFromZk.isCloudEnabled());
Assert.assertEquals(cloudConfigFromZk.getCloudProvider(), CloudProvider.AZURE.name());
Assert.assertEquals(cloudConfigFromZk.getCloudID(), "TestID2");
List<String> listUrlFromZk = cloudConfigFromZk.getCloudInfoSources();
Assert.assertEquals(listUrlFromZk.get(0), "TestURL0");
Assert.assertEquals(listUrlFromZk.get(1), "TestURL1");
}
}
| 9,864 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/model | Create_ds/helix/helix-core/src/test/java/org/apache/helix/model/builder/TestIdealStateBuilder.java | package org.apache.helix.model.builder;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.model.IdealState;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestIdealStateBuilder {
@Test
public void testSemiAutoISBuilder() {
SemiAutoModeISBuilder builder = new SemiAutoModeISBuilder("test-db");
builder.setStateModel("MasterSlave").setNumPartitions(2).setNumReplica(2);
builder.assignPreferenceList("test-db_0", "node_0", "node_1").assignPreferenceList("test-db_1",
"node_1", "node_0");
IdealState idealState = null;
try {
idealState = builder.build();
} catch (Exception e) {
Assert.fail("fail to build an auto mode ideal-state.", e);
}
// System.out.println("ideal-state: " + idealState);
Assert.assertEquals(idealState.getRebalanceMode(), IdealState.RebalanceMode.SEMI_AUTO,
"rebalancer mode should be semi-auto");
}
@Test
public void testFullAutoModeISModeBuilder() {
String nodeGroup = "groupA";
FullAutoModeISBuilder builder = new FullAutoModeISBuilder("test-db");
builder.setStateModel("MasterSlave").setNumPartitions(2).setNumReplica(2).setNodeGroup(nodeGroup);
builder.add("test-db_0").add("test-db_1");
IdealState idealState = null;
try {
idealState = builder.build();
} catch (Exception e) {
Assert.fail("fail to build an auto-rebalance mode ideal-state.", e);
}
// System.out.println("ideal-state: " + idealState);
Assert.assertEquals(idealState.getRebalanceMode(), IdealState.RebalanceMode.FULL_AUTO,
"rebalancer mode should be auto");
Assert.assertEquals(idealState.getInstanceGroupTag(), nodeGroup);
}
@Test
public void testCustomModeISBuilder() {
CustomModeISBuilder builder = new CustomModeISBuilder("test-db");
builder.setStateModel("MasterSlave").setNumPartitions(2).setNumReplica(2);
builder.assignInstanceAndState("test-db_0", "node_0", "MASTER")
.assignInstanceAndState("test-db_0", "node_1", "SLAVE")
.assignInstanceAndState("test-db_1", "node_0", "SLAVE")
.assignInstanceAndState("test-db_1", "node_1", "MASTER");
IdealState idealState = null;
try {
idealState = builder.build();
} catch (Exception e) {
Assert.fail("fail to build a custom mode ideal-state.", e);
}
// System.out.println("ideal-state: " + idealState);
Assert.assertEquals(idealState.getRebalanceMode(), IdealState.RebalanceMode.CUSTOMIZED,
"rebalancer mode should be customized");
}
}
| 9,865 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/spectator/TestRoutingDataCache.java | package org.apache.helix.spectator;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Map;
import org.apache.helix.HelixConstants;
import org.apache.helix.PropertyType;
import org.apache.helix.TestHelper;
import org.apache.helix.integration.common.ZkStandAloneCMTestBase;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.mock.MockZkHelixDataAccessor;
import org.apache.helix.model.CurrentState;
import org.apache.helix.tools.ClusterVerifiers.BestPossibleExternalViewVerifier;
import org.apache.helix.tools.ClusterVerifiers.ZkHelixClusterVerifier;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestRoutingDataCache extends ZkStandAloneCMTestBase {
@Test
public void testUpdateOnNotification() {
Assert.assertTrue(_clusterVerifier.verifyByPolling());
MockZkHelixDataAccessor accessor =
new MockZkHelixDataAccessor(CLUSTER_NAME, new ZkBaseDataAccessor<ZNRecord>(_gZkClient));
RoutingDataCache cache =
new RoutingDataCache("CLUSTER_" + TestHelper.getTestClassName(), PropertyType.EXTERNALVIEW);
cache.refresh(accessor);
Assert.assertEquals(accessor.getReadCount(PropertyType.EXTERNALVIEW), 1);
accessor.clearReadCounters();
// refresh again should read nothing
cache.refresh(accessor);
Assert.assertEquals(accessor.getReadCount(PropertyType.EXTERNALVIEW), 0);
accessor.clearReadCounters();
// refresh again should read nothing as ideal state is same
cache.notifyDataChange(HelixConstants.ChangeType.EXTERNAL_VIEW);
cache.refresh(accessor);
Assert.assertEquals(accessor.getReadCount(PropertyType.EXTERNALVIEW), 0);
}
@Test(dependsOnMethods = { "testUpdateOnNotification" })
public void testSelectiveUpdates()
throws Exception {
// Added verifier to make sure the test starts at a stable state. Note, if
// testCurrentStatesSelectiveUpdate() run first. This test may fail without
// this line. The reason is that when testCurrentStatesSelectiveUpdate()
// stop one participant, this would trigger liveInstance update in controller
// which would lead to new external view for TestDB get updated. The update is
// async to the construction of RoutingDataCache in this test and subsequent
// refresh().
Assert.assertTrue(_clusterVerifier.verifyByPolling());
MockZkHelixDataAccessor accessor =
new MockZkHelixDataAccessor(CLUSTER_NAME, new ZkBaseDataAccessor<ZNRecord>(_gZkClient));
RoutingDataCache cache =
new RoutingDataCache("CLUSTER_" + TestHelper.getTestClassName(), PropertyType.EXTERNALVIEW);
cache.refresh(accessor);
Assert.assertEquals(accessor.getReadCount(PropertyType.EXTERNALVIEW), 1);
accessor.clearReadCounters();
// refresh again should read nothing
cache.refresh(accessor);
Assert.assertEquals(accessor.getReadCount(PropertyType.EXTERNALVIEW), 0);
// refresh again should read nothing
cache.notifyDataChange(HelixConstants.ChangeType.EXTERNAL_VIEW);
cache.refresh(accessor);
Assert.assertEquals(accessor.getReadCount(PropertyType.EXTERNALVIEW), 0);
// add new resources
_gSetupTool.addResourceToCluster(CLUSTER_NAME, "TestDB_1", 1, STATE_MODEL);
_gSetupTool.rebalanceStorageCluster(CLUSTER_NAME, "TestDB_1", _replica);
Thread.sleep(100);
Assert.assertTrue(_clusterVerifier.verifyByPolling());
accessor.clearReadCounters();
// refresh again should read only new current states and new idealstate
cache.notifyDataChange(HelixConstants.ChangeType.EXTERNAL_VIEW);
cache.refresh(accessor);
Assert.assertEquals(accessor.getReadCount(PropertyType.EXTERNALVIEW), 1);
// Add more resources
accessor.clearReadCounters();
_gSetupTool.addResourceToCluster(CLUSTER_NAME, "TestDB_2", 1, STATE_MODEL);
_gSetupTool.rebalanceStorageCluster(CLUSTER_NAME, "TestDB_2", _replica);
_gSetupTool.addResourceToCluster(CLUSTER_NAME, "TestDB_3", 1, STATE_MODEL);
_gSetupTool.rebalanceStorageCluster(CLUSTER_NAME, "TestDB_3", _replica);
Assert.assertTrue(_clusterVerifier.verifyByPolling());
// Totally four resources. Two of them are newly added.
cache.notifyDataChange(HelixConstants.ChangeType.EXTERNAL_VIEW);
cache.refresh(accessor);
Assert.assertEquals(accessor.getReadCount(PropertyType.EXTERNALVIEW), 2);
// update one resource
accessor.clearReadCounters();
_gSetupTool.getClusterManagementTool().enableResource(CLUSTER_NAME, "TestDB_2", false);
Assert.assertTrue(_clusterVerifier.verifyByPolling());
cache.notifyDataChange(HelixConstants.ChangeType.EXTERNAL_VIEW);
cache.refresh(accessor);
Assert.assertEquals(accessor.getReadCount(PropertyType.EXTERNALVIEW), 1);
}
@Test
public void testCurrentStatesSelectiveUpdate() {
// Add a live instance to the cluster so the original cluster is not affected
// by stopping a participant.
String instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + NODE_NR);
_gSetupTool.addInstanceToCluster(CLUSTER_NAME, instanceName);
_gSetupTool.rebalanceStorageCluster(CLUSTER_NAME, TEST_DB, _replica);
MockParticipantManager participant =
new MockParticipantManager(ZK_ADDR, CLUSTER_NAME, instanceName);
participant.syncStart();
Assert.assertTrue(_clusterVerifier.verifyByPolling());
try {
MockZkHelixDataAccessor accessor =
new MockZkHelixDataAccessor(CLUSTER_NAME, new ZkBaseDataAccessor<>(_gZkClient));
RoutingDataCache cache = new RoutingDataCache(CLUSTER_NAME, PropertyType.CURRENTSTATES);
// Empty current states map before refreshing.
Assert.assertTrue(cache.getCurrentStatesMap().isEmpty());
// 1. Initial cache refresh.
cache.refresh(accessor);
Map<String, Map<String, Map<String, CurrentState>>> currentStatesV1 =
cache.getCurrentStatesMap();
// Current states map is not empty and size equals to number of live instances.
Assert.assertFalse(currentStatesV1.isEmpty());
Assert.assertEquals(currentStatesV1.size(), _participants.length + 1);
// 2. Without any change, refresh routing data cache.
cache.refresh(accessor);
// Because of no current states change, current states cache doesn't refresh.
Assert.assertEquals(cache.getCurrentStatesMap(), currentStatesV1);
// 3. Stop one participant to make live instance change and refresh routing data cache.
participant.syncStop();
cache.notifyDataChange(HelixConstants.ChangeType.LIVE_INSTANCE);
cache.refresh(accessor);
Map<String, Map<String, Map<String, CurrentState>>> currentStatesV2 =
cache.getCurrentStatesMap();
// Current states cache should refresh and change.
Assert.assertFalse(currentStatesV2.isEmpty());
Assert.assertEquals(currentStatesV2.size(), _participants.length);
Assert.assertFalse(currentStatesV1.equals(currentStatesV2));
cache.refresh(accessor);
// No change.
Assert.assertEquals(cache.getCurrentStatesMap(), currentStatesV2);
} finally {
_gSetupTool.getClusterManagementTool().enableInstance(CLUSTER_NAME, instanceName, false);
_gSetupTool.dropInstanceFromCluster(CLUSTER_NAME, instanceName);
_gSetupTool.rebalanceStorageCluster(CLUSTER_NAME, TEST_DB, _replica);
Assert.assertTrue(_clusterVerifier.verifyByPolling());
}
}
}
| 9,866 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring/TestClusterEventStatusMonitor.java | package org.apache.helix.monitoring;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import javax.management.AttributeNotFoundException;
import javax.management.InstanceNotFoundException;
import javax.management.JMException;
import javax.management.MBeanException;
import javax.management.MBeanServer;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectInstance;
import javax.management.ObjectName;
import javax.management.ReflectionException;
import org.apache.helix.controller.stages.BestPossibleStateCalcStage;
import org.apache.helix.controller.stages.IntermediateStateCalcStage;
import org.apache.helix.controller.stages.ReadClusterDataStage;
import org.apache.helix.controller.stages.resource.ResourceMessageDispatchStage;
import org.apache.helix.monitoring.mbeans.ClusterEventMonitor;
import org.apache.helix.monitoring.mbeans.ClusterStatusMonitor;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestClusterEventStatusMonitor {
private static final int TEST_SLIDING_WINDOW_MS = 2000; // 2s window for testing
private class ClusterStatusMonitorForTest extends ClusterStatusMonitor {
public ClusterStatusMonitorForTest(String clusterName) {
super(clusterName);
active();
}
public ConcurrentHashMap<String, ClusterEventMonitor> getClusterEventMonitors() {
return _clusterEventMonitorMap;
}
}
@Test()
public void test()
throws InstanceNotFoundException, MalformedObjectNameException, NullPointerException,
IOException, InterruptedException, MBeanException, AttributeNotFoundException,
ReflectionException{
System.out.println("START TestClusterEventStatusMonitor");
String clusterName = "TestCluster";
ClusterStatusMonitorForTest monitor = new ClusterStatusMonitorForTest(clusterName);
MBeanServer _server = ManagementFactory.getPlatformMBeanServer();
Set<ObjectInstance> mbeans =
_server.queryMBeans(new ObjectName("ClusterStatus:Cluster=TestCluster,eventName=ClusterEvent,*"), null);
Assert.assertEquals(mbeans.size(), 0);
// Customize event monitors for testing
try {
this.addTestEventMonitor(monitor, ClusterEventMonitor.PhaseName.Callback.name());
this.addTestEventMonitor(monitor, ClusterEventMonitor.PhaseName.InQueue.name());
this.addTestEventMonitor(monitor, BestPossibleStateCalcStage.class.getSimpleName());
this.addTestEventMonitor(monitor, ReadClusterDataStage.class.getSimpleName());
this.addTestEventMonitor(monitor, IntermediateStateCalcStage.class.getSimpleName());
this.addTestEventMonitor(monitor, ResourceMessageDispatchStage.class.getSimpleName());
} catch (JMException jme) {
Assert.assertTrue(false, "Failed to customize event monitors");
}
int count = 5;
Long totalDuration = 0L;
for (int i = 1; i <= count; i++) {
monitor.updateClusterEventDuration(ClusterEventMonitor.PhaseName.Callback.name(), 100 * i);
monitor.updateClusterEventDuration(ClusterEventMonitor.PhaseName.InQueue.name(), 100 * i);
monitor.updateClusterEventDuration(BestPossibleStateCalcStage.class.getSimpleName(), 100 * i);
monitor.updateClusterEventDuration(ReadClusterDataStage.class.getSimpleName(), 100 * i);
monitor.updateClusterEventDuration(IntermediateStateCalcStage.class.getSimpleName(), 100 * i);
monitor.updateClusterEventDuration(ResourceMessageDispatchStage.class.getSimpleName(), 100 * i);
totalDuration += 100 * i;
}
mbeans =
_server.queryMBeans(
new ObjectName("ClusterStatus:cluster=TestCluster,eventName=ClusterEvent,*"), null);
Assert.assertEquals(mbeans.size(), 6);
for (ObjectInstance mbean : mbeans) {
Long duration = (Long) _server.getAttribute(mbean.getObjectName(), "TotalDurationCounter");
Long maxDuration = (Long) _server.getAttribute(mbean.getObjectName(), "MaxSingleDurationGauge");
Long eventCount = (Long) _server.getAttribute(mbean.getObjectName(), "EventCounter");
Double pct75th = (Double) _server.getAttribute(mbean.getObjectName(), "DurationGauge.Pct75th");
Double pct95th = (Double) _server.getAttribute(mbean.getObjectName(), "DurationGauge.Pct95th");
Double pct99th = (Double) _server.getAttribute(mbean.getObjectName(), "DurationGauge.Pct99th");
Long max = (Long) _server.getAttribute(mbean.getObjectName(), "DurationGauge.Max");
Double stddev = (Double) _server.getAttribute(mbean.getObjectName(), "DurationGauge.StdDev");
Assert.assertEquals(duration, totalDuration);
Assert.assertEquals(maxDuration, Long.valueOf(100 * count));
Assert.assertEquals(eventCount, Long.valueOf(count));
Assert.assertTrue(Math.abs(pct75th - 450.0) < 1);
Assert.assertTrue(Math.abs(pct95th - 500.0) < 1);
Assert.assertTrue(Math.abs(pct99th - 500.0) < 1);
Assert.assertTrue(max == 500);
Assert.assertTrue(Math.abs(stddev - 158.0) < 0.2);
}
System.out.println("\nWaiting for time window to expire\n");
Thread.sleep(TEST_SLIDING_WINDOW_MS);
// Since sliding window has expired, just make sure histograms have its values reset
for (ObjectInstance mbean : mbeans) {
Double pct75th = (Double) _server.getAttribute(mbean.getObjectName(), "DurationGauge.Pct75th");
Double pct95th = (Double) _server.getAttribute(mbean.getObjectName(), "DurationGauge.Pct95th");
Double pct99th = (Double) _server.getAttribute(mbean.getObjectName(), "DurationGauge.Pct99th");
Long max = (Long) _server.getAttribute(mbean.getObjectName(), "DurationGauge.Max");
Double stddev = (Double) _server.getAttribute(mbean.getObjectName(), "DurationGauge.StdDev");
Assert.assertTrue(pct75th == 0.0);
Assert.assertTrue(pct95th == 0.0);
Assert.assertTrue(pct99th == 0.0);
Assert.assertTrue(max == 0);
Assert.assertTrue(stddev == 0.0);
}
monitor.reset();
mbeans =
_server.queryMBeans(
new ObjectName("ClusterStatus:cluster=TestCluster,eventName=ClusterEvent,*"), null);
Assert.assertEquals(mbeans.size(), 0);
System.out.println("END TestParticipantMonitor");
}
private void addTestEventMonitor(ClusterStatusMonitorForTest monitor, String phaseName) throws
JMException {
ConcurrentHashMap<String, ClusterEventMonitor> monitors = monitor.getClusterEventMonitors();
ClusterEventMonitor eventMonitor = new ClusterEventMonitor(monitor, phaseName,
TEST_SLIDING_WINDOW_MS);
eventMonitor.register();
monitors.put(phaseName, eventMonitor);
}
}
| 9,867 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring/TestZKPathDataDumpTask.java | package org.apache.helix.monitoring;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Date;
import org.apache.helix.BaseDataAccessor;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.PropertyKey;
import org.apache.helix.TestHelper;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.model.Error;
import org.apache.helix.model.StatusUpdate;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestZKPathDataDumpTask extends ZkUnitTestBase {
@Test
public void test() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
int n = 1;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
"localhost", // participant name prefix
"TestDB", // resource name prefix
1, // resources
2, // partitions per resource
n, // number of nodes
1, // replicas
"MasterSlave", true); // do rebalance
HelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
BaseDataAccessor<ZNRecord> baseAccessor = accessor.getBaseDataAccessor();
HelixManager manager = mock(HelixManager.class);
when(manager.getHelixDataAccessor()).thenReturn(accessor);
when(manager.getClusterName()).thenReturn(clusterName);
// run dump task without statusUpdates and errors, should not remove any existing
// statusUpdate/error paths
ZKPathDataDumpTask task = new ZKPathDataDumpTask(manager, 0L, 0L, Integer.MAX_VALUE);
task.run();
PropertyKey controllerStatusUpdateKey = keyBuilder.controllerTaskStatuses();
Assert.assertTrue(baseAccessor.exists(controllerStatusUpdateKey.getPath(), 0));
PropertyKey controllerErrorKey = keyBuilder.controllerTaskErrors();
Assert.assertTrue(baseAccessor.exists(controllerErrorKey.getPath(), 0));
PropertyKey statusUpdateKey = keyBuilder.stateTransitionStatus("localhost_12918");
Assert.assertTrue(baseAccessor.exists(statusUpdateKey.getPath(), 0));
PropertyKey errorKey = keyBuilder.stateTransitionErrors("localhost_12918");
// add participant status updates and errors
statusUpdateKey =
keyBuilder.stateTransitionStatus("localhost_12918", "session_0", "TestDB0", "TestDB0_0");
accessor.setProperty(statusUpdateKey, new StatusUpdate(new ZNRecord("statusUpdate")));
errorKey =
keyBuilder.stateTransitionError("localhost_12918", "session_0", "TestDB0", "TestDB0_0");
accessor.setProperty(errorKey, new Error(new ZNRecord("error")));
// add controller status updates and errors
controllerStatusUpdateKey = keyBuilder.controllerTaskStatus("session_0", "TestDB");
accessor.setProperty(controllerStatusUpdateKey,
new StatusUpdate(new ZNRecord("controllerStatusUpdate")));
controllerErrorKey = keyBuilder.controllerTaskError("TestDB_error");
accessor.setProperty(controllerErrorKey, new Error(new ZNRecord("controllerError")));
// run dump task, should remove existing statusUpdate/error paths
task.run();
Assert.assertFalse(baseAccessor.exists(controllerStatusUpdateKey.getPath(), 0));
Assert.assertFalse(baseAccessor.exists(controllerErrorKey.getPath(), 0));
Assert.assertFalse(baseAccessor.exists(statusUpdateKey.getPath(), 0));
Assert.assertFalse(baseAccessor.exists(errorKey.getPath(), 0));
controllerStatusUpdateKey = keyBuilder.controllerTaskStatuses();
Assert.assertTrue(baseAccessor.exists(controllerStatusUpdateKey.getPath(), 0));
controllerErrorKey = keyBuilder.controllerTaskErrors();
Assert.assertTrue(baseAccessor.exists(controllerErrorKey.getPath(), 0));
statusUpdateKey = keyBuilder.stateTransitionStatus("localhost_12918");
Assert.assertTrue(baseAccessor.exists(statusUpdateKey.getPath(), 0));
errorKey = keyBuilder.stateTransitionErrors("localhost_12918");
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testCapacityReached() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
int n = 1;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
"localhost", // participant name prefix
"TestDB", // resource name prefix
1, // resources
2, // partitions per resource
n, // number of nodes
1, // replicas
"MasterSlave", true); // do rebalance
HelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient));
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
BaseDataAccessor<ZNRecord> baseAccessor = accessor.getBaseDataAccessor();
HelixManager manager = mock(HelixManager.class);
when(manager.getHelixDataAccessor()).thenReturn(accessor);
when(manager.getClusterName()).thenReturn(clusterName);
// run dump task without statusUpdates and errors, should not remove any existing
// statusUpdate/error paths
ZKPathDataDumpTask task = new ZKPathDataDumpTask(manager, Long.MAX_VALUE, Long.MAX_VALUE, 1);
task.run();
PropertyKey controllerStatusUpdateKey = keyBuilder.controllerTaskStatuses();
Assert.assertTrue(baseAccessor.exists(controllerStatusUpdateKey.getPath(), 0));
PropertyKey controllerErrorKey = keyBuilder.controllerTaskErrors();
Assert.assertTrue(baseAccessor.exists(controllerErrorKey.getPath(), 0));
PropertyKey statusUpdateKey = keyBuilder.stateTransitionStatus("localhost_12918");
Assert.assertTrue(baseAccessor.exists(statusUpdateKey.getPath(), 0));
PropertyKey errorKey = keyBuilder.stateTransitionErrors("localhost_12918");
Assert.assertTrue(baseAccessor.exists(errorKey.getPath(), 0));
// add participant status updates and errors
statusUpdateKey =
keyBuilder.stateTransitionStatus("localhost_12918", "session_0", "TestDB0", "TestDB0_0");
accessor.setProperty(statusUpdateKey, new StatusUpdate(new ZNRecord("statusUpdate")));
errorKey =
keyBuilder.stateTransitionError("localhost_12918", "session_0", "TestDB0", "TestDB0_0");
accessor.setProperty(errorKey, new Error(new ZNRecord("error")));
// add controller status updates and errors (one of each, should not trigger anything)
controllerStatusUpdateKey = keyBuilder.controllerTaskStatus("session_0", "TestDB");
accessor.setProperty(controllerStatusUpdateKey,
new StatusUpdate(new ZNRecord("controllerStatusUpdate")));
controllerErrorKey = keyBuilder.controllerTaskError("TestDB_error");
accessor.setProperty(controllerErrorKey, new Error(new ZNRecord("controllerError")));
// run dump task, should not remove anything because the threshold is not exceeded
task.run();
Assert.assertTrue(baseAccessor.exists(controllerStatusUpdateKey.getPath(), 0));
Assert.assertTrue(baseAccessor.exists(controllerErrorKey.getPath(), 0));
Assert.assertTrue(baseAccessor.exists(statusUpdateKey.getPath(), 0));
Assert.assertTrue(baseAccessor.exists(errorKey.getPath(), 0));
// add a second set of all status updates and errors
statusUpdateKey =
keyBuilder.stateTransitionStatus("localhost_12918", "session_0", "TestDB0", "TestDB0_1");
accessor.setProperty(statusUpdateKey, new StatusUpdate(new ZNRecord("statusUpdate")));
errorKey =
keyBuilder.stateTransitionError("localhost_12918", "session_0", "TestDB0", "TestDB0_1");
accessor.setProperty(errorKey, new Error(new ZNRecord("error")));
controllerStatusUpdateKey = keyBuilder.controllerTaskStatus("session_0", "TestDB1");
accessor.setProperty(controllerStatusUpdateKey,
new StatusUpdate(new ZNRecord("controllerStatusUpdate")));
controllerErrorKey = keyBuilder.controllerTaskError("TestDB1_error");
accessor.setProperty(controllerErrorKey, new Error(new ZNRecord("controllerError")));
// run dump task, should remove everything since capacities are exceeded
task.run();
Assert.assertFalse(baseAccessor.exists(controllerStatusUpdateKey.getPath(), 0));
Assert.assertFalse(baseAccessor.exists(controllerErrorKey.getPath(), 0));
Assert.assertFalse(baseAccessor.exists(statusUpdateKey.getPath(), 0));
Assert.assertFalse(baseAccessor.exists(errorKey.getPath(), 0));
deleteCluster(clusterName);
}
}
| 9,868 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring/TestWorkflowMonitor.java | package org.apache.helix.monitoring;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.lang.management.ManagementFactory;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import javax.management.MBeanAttributeInfo;
import javax.management.MBeanServer;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectInstance;
import javax.management.ObjectName;
import org.apache.helix.monitoring.mbeans.MonitorDomainNames;
import org.apache.helix.monitoring.mbeans.WorkflowMonitor;
import org.apache.helix.task.TaskState;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestWorkflowMonitor {
private static final String TEST_CLUSTER_NAME = "TestCluster";
private static final String TEST_WORKFLOW_TYPE = "WorkflowTestType";
private static final String TEST_WORKFLOW_MBEAN_NAME = String
.format("%s=%s, %s=%s", "cluster", TEST_CLUSTER_NAME, "workflowType", TEST_WORKFLOW_TYPE);
private static final MBeanServer beanServer = ManagementFactory.getPlatformMBeanServer();
@Test
public void testRun() throws Exception {
WorkflowMonitor wm = new WorkflowMonitor(TEST_CLUSTER_NAME, TEST_WORKFLOW_TYPE);
wm.register();
Set<ObjectInstance> existingInstances = beanServer.queryMBeans(
new ObjectName(MonitorDomainNames.ClusterStatus.name() + ":" + TEST_WORKFLOW_MBEAN_NAME),
null);
HashSet<String> expectedAttr = new HashSet<>(Arrays
.asList("SuccessfulWorkflowCount", "FailedWorkflowCount", "FailedWorkflowGauge",
"ExistingWorkflowGauge", "QueuedWorkflowGauge", "RunningWorkflowGauge"));
for (ObjectInstance i : existingInstances) {
for (MBeanAttributeInfo info : beanServer.getMBeanInfo(i.getObjectName()).getAttributes()) {
expectedAttr.remove(info.getName());
}
}
Assert.assertTrue(expectedAttr.isEmpty());
int successfulWfCnt = 10;
int failedWfCnt = 10;
int queuedWfCnt = 10;
int runningWfCnt = 10;
for (int i = 0; i < successfulWfCnt; i++) {
wm.updateWorkflowCounters(TaskState.COMPLETED);
wm.updateWorkflowGauges(TaskState.COMPLETED);
}
for (int i = 0; i < failedWfCnt; i++) {
wm.updateWorkflowCounters(TaskState.FAILED);
wm.updateWorkflowGauges(TaskState.FAILED);
}
for (int i = 0; i < queuedWfCnt; i++) {
wm.updateWorkflowGauges(TaskState.NOT_STARTED);
}
for (int i = 0; i < runningWfCnt; i++) {
wm.updateWorkflowGauges(TaskState.IN_PROGRESS);
}
// Test gauges
Assert.assertEquals(wm.getExistingWorkflowGauge(),
successfulWfCnt + failedWfCnt + queuedWfCnt + runningWfCnt);
Assert.assertEquals(wm.getFailedWorkflowGauge(), failedWfCnt);
Assert.assertEquals(wm.getQueuedWorkflowGauge(), queuedWfCnt);
Assert.assertEquals(wm.getRunningWorkflowGauge(), runningWfCnt);
// Test counts
Assert.assertEquals(wm.getFailedWorkflowCount(), failedWfCnt);
Assert.assertEquals(wm.getSuccessfulWorkflowCount(), successfulWfCnt);
wm.resetGauges();
for (int i = 0; i < successfulWfCnt; i++) {
wm.updateWorkflowCounters(TaskState.COMPLETED);
wm.updateWorkflowGauges(TaskState.COMPLETED);
}
for (int i = 0; i < failedWfCnt; i++) {
wm.updateWorkflowCounters(TaskState.FAILED);
wm.updateWorkflowGauges(TaskState.FAILED);
}
for (int i = 0; i < queuedWfCnt; i++) {
wm.updateWorkflowGauges(TaskState.NOT_STARTED);
}
for (int i = 0; i < runningWfCnt; i++) {
wm.updateWorkflowGauges(TaskState.IN_PROGRESS);
}
// After reset, counters should be accumulative, but gauges should be reset
Assert.assertEquals(wm.getExistingWorkflowGauge(),
successfulWfCnt + failedWfCnt + queuedWfCnt + runningWfCnt);
Assert.assertEquals(wm.getFailedWorkflowGauge(), failedWfCnt);
Assert.assertEquals(wm.getQueuedWorkflowGauge(), queuedWfCnt);
Assert.assertEquals(wm.getRunningWorkflowGauge(), runningWfCnt);
Assert.assertEquals(wm.getFailedWorkflowCount(), failedWfCnt * 2);
Assert.assertEquals(wm.getSuccessfulWorkflowCount(), successfulWfCnt * 2);
}
private ObjectName getObjectName() throws MalformedObjectNameException {
return new ObjectName(
String.format("%s:%s", MonitorDomainNames.ClusterStatus.name(), TEST_WORKFLOW_MBEAN_NAME));
}
private void registerMbean(Object bean, ObjectName name) {
try {
if (beanServer.isRegistered(name)) {
beanServer.unregisterMBean(name);
}
} catch (Exception e) {
// OK
}
try {
System.out.println("Register MBean: " + name);
beanServer.registerMBean(bean, name);
} catch (Exception e) {
System.out.println("Could not register MBean: " + name + e.toString());
}
}
}
| 9,869 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring/TestStatCollector.java | package org.apache.helix.monitoring;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
public class TestStatCollector {
@Test()
public void testCollectData() {
StatCollector collector = new StatCollector();
int nPoints = 100;
for (int i = 0; i < nPoints; i++) {
collector.addData(i * 1000);
}
AssertJUnit.assertEquals(collector.getNumDataPoints(), nPoints);
AssertJUnit.assertEquals((long) collector.getMax(), 99000);
AssertJUnit.assertEquals((long) collector.getTotalSum(), 4950000);
AssertJUnit.assertEquals((long) collector.getPercentile(40), 39400);
AssertJUnit.assertEquals((long) collector.getMean(), 49500);
AssertJUnit.assertEquals((long) collector.getMin(), 0);
collector.reset();
AssertJUnit.assertEquals(collector.getNumDataPoints(), 0);
AssertJUnit.assertEquals((long) collector.getMax(), 0);
AssertJUnit.assertEquals((long) collector.getTotalSum(), 0);
AssertJUnit.assertEquals((long) collector.getPercentile(40), 0);
AssertJUnit.assertEquals((long) collector.getMean(), 0);
AssertJUnit.assertEquals((long) collector.getMin(), 0);
}
}
| 9,870 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring/TestParticipantMonitor.java | package org.apache.helix.monitoring;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import javax.management.AttributeNotFoundException;
import javax.management.InstanceNotFoundException;
import javax.management.MBeanAttributeInfo;
import javax.management.MBeanException;
import javax.management.MBeanInfo;
import javax.management.MBeanServerConnection;
import javax.management.MBeanServerNotification;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectInstance;
import javax.management.ObjectName;
import javax.management.ReflectionException;
import org.apache.helix.TestHelper;
import org.apache.helix.model.Message;
import org.apache.helix.monitoring.mbeans.ClusterMBeanObserver;
import org.apache.helix.monitoring.mbeans.MonitorDomainNames;
import org.apache.helix.monitoring.mbeans.ParticipantMessageMonitor;
import org.apache.helix.monitoring.mbeans.ParticipantStatusMonitor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestParticipantMonitor {
private static Logger _logger = LoggerFactory.getLogger(TestParticipantMonitor.class);
private static String CLUSTER_NAME = TestHelper.getTestClassName();
private static final String PARTICIPANT_NAME = "participant_0";
private static final String DOMAIN_NAME = "CLMParticipantReport";
class ParticipantMonitorListener extends ClusterMBeanObserver {
Map<String, Map<String, Object>> _beanValueMap = new HashMap<>();
public ParticipantMonitorListener(String domain, String key, String value)
throws IOException, InstanceNotFoundException {
super(domain);
init(key, value);
}
void init(String key, String value) {
try {
Set<ObjectInstance> existingInstances =
_server.queryMBeans(new ObjectName(_domain + ":" + key + "=" + value + ",*"), null);
for (ObjectInstance instance : existingInstances) {
String mbeanName = instance.getObjectName().toString();
// System.out.println("mbeanName: " + mbeanName);
addMBean(instance.getObjectName());
}
} catch (Exception e) {
_logger.warn("fail to get all existing mbeans in " + _domain, e);
}
}
@Override
public void onMBeanRegistered(MBeanServerConnection server,
MBeanServerNotification mbsNotification) {
addMBean(mbsNotification.getMBeanName());
}
void addMBean(ObjectName beanName) {
try {
MBeanInfo info = _server.getMBeanInfo(beanName);
MBeanAttributeInfo[] infos = info.getAttributes();
_beanValueMap.put(beanName.toString(), new HashMap<String, Object>());
for (MBeanAttributeInfo infoItem : infos) {
Object val = _server.getAttribute(beanName, infoItem.getName());
// System.out.println(" " + infoItem.getName() + " : " +
// _server.getAttribute(beanName, infoItem.getName()) + " type : " + infoItem.getType());
_beanValueMap.get(beanName.toString()).put(infoItem.getName(), val);
}
} catch (Exception e) {
_logger.error("Error getting bean info, domain=" + _domain, e);
}
}
@Override
public void onMBeanUnRegistered(MBeanServerConnection server,
MBeanServerNotification mbsNotification) {
}
}
private ObjectName getObjectName(String name) throws MalformedObjectNameException {
return new ObjectName(
String.format("%s:%s", MonitorDomainNames.CLMParticipantReport.name(), name));
}
@Test()
public void testReportStateTransitionData()
throws InstanceNotFoundException, MalformedObjectNameException, NullPointerException,
IOException, InterruptedException, MBeanException, AttributeNotFoundException,
ReflectionException {
System.out.println("START TestParticipantStateTransitionMonitor");
ParticipantStatusMonitor monitor = new ParticipantStatusMonitor(false, null);
int monitorNum = 0;
StateTransitionContext cxt =
new StateTransitionContext(CLUSTER_NAME, "instance", "db_1", "a-b");
StateTransitionDataPoint data = new StateTransitionDataPoint(2000, 1000, 600, true);
monitor.reportTransitionStat(cxt, data);
data = new StateTransitionDataPoint(2000, 1200, 600, true);
monitor.reportTransitionStat(cxt, data);
ParticipantMonitorListener monitorListener =
new ParticipantMonitorListener(DOMAIN_NAME, "Cluster", CLUSTER_NAME);
Thread.sleep(1000);
Assert.assertEquals(monitorListener._beanValueMap.size(), monitorNum + 1);
// Note the values in listener's map is the snapshot when the MBean is detected.
Assert.assertEquals(monitorListener._beanValueMap.get(getObjectName(cxt.toString()).toString())
.get("TransitionLatencyGauge.Mean"), 2000.0);
Assert.assertEquals(monitorListener._beanValueMap.get(getObjectName(cxt.toString()).toString())
.get("TransitionExecutionLatencyGauge.Mean"), 1100.0);
Assert.assertEquals(monitorListener._beanValueMap.get(getObjectName(cxt.toString()).toString())
.get("TransitionMessageLatencyGauge.Mean"), 600.0);
Assert.assertEquals(monitorListener._beanValueMap.get(getObjectName(cxt.toString()).toString())
.get("TotalStateTransitionCounter"), 2L);
data = new StateTransitionDataPoint(2000, 500, 600, true);
monitor.reportTransitionStat(cxt, data);
Thread.sleep(1000);
Assert.assertEquals(monitorListener._beanValueMap.size(), monitorNum + 1);
data = new StateTransitionDataPoint(1000, 500, 300, true);
StateTransitionContext cxt2 =
new StateTransitionContext(CLUSTER_NAME, "instance", "db_2", "a-b");
monitor.reportTransitionStat(cxt2, data);
monitor.reportTransitionStat(cxt2, data);
Thread.sleep(1000);
// Only one metric will be generated for db_1 and db_2
Assert.assertEquals(monitorListener._beanValueMap.size(), monitorNum + 1);
Assert.assertTrue(cxt.equals(cxt2));
Assert.assertFalse(cxt.equals(new Object()));
Assert.assertTrue(
cxt.equals(new StateTransitionContext(CLUSTER_NAME, "instance", "db_1", "a-b")));
cxt2.getInstanceName();
ParticipantMonitorListener monitorListener2 =
new ParticipantMonitorListener(DOMAIN_NAME, "Cluster", CLUSTER_NAME);
Thread.sleep(1000);
// Same here. Helix only measures per cluster + per state transitions.
Assert.assertEquals(monitorListener2._beanValueMap.size(), monitorNum + 1);
monitorListener2.disconnect();
monitorListener.disconnect();
System.out.println("END TestParticipantStateTransitionMonitor");
}
@Test()
public void testReportMessageData()
throws InstanceNotFoundException, MalformedObjectNameException, NullPointerException,
IOException, InterruptedException, MBeanException, AttributeNotFoundException,
ReflectionException {
System.out.println("START TestParticipantMessageMonitor");
ParticipantStatusMonitor monitor = new ParticipantStatusMonitor(true, PARTICIPANT_NAME);
Message message = new Message(Message.MessageType.NO_OP, "0");
monitor.reportReceivedMessage(message);
Thread.sleep(1000);
ParticipantMonitorListener monitorListener =
new ParticipantMonitorListener(DOMAIN_NAME, "ParticipantName", PARTICIPANT_NAME);
Thread.sleep(1000);
Assert.assertEquals(monitorListener._beanValueMap.size(), 2);
Assert.assertEquals(monitorListener._beanValueMap.get(
getObjectName("ParticipantName=participant_0,MonitorType=ParticipantMessageMonitor")
.toString()).get("ReceivedMessages"), 1L);
Assert.assertEquals(monitorListener._beanValueMap.get(
getObjectName("ParticipantName=participant_0,MonitorType=ParticipantMessageMonitor")
.toString()).get("PendingMessages"), 1L);
monitor
.reportProcessedMessage(message, ParticipantMessageMonitor.ProcessedMessageState.COMPLETED);
Thread.sleep(1000);
monitorListener =
new ParticipantMonitorListener(DOMAIN_NAME, "ParticipantName", PARTICIPANT_NAME);
Thread.sleep(1000);
Assert.assertEquals(monitorListener._beanValueMap.get(
getObjectName("ParticipantName=participant_0,MonitorType=ParticipantMessageMonitor")
.toString()).get("ReceivedMessages"), 1L);
Assert.assertEquals(monitorListener._beanValueMap.get(
getObjectName("ParticipantName=participant_0,MonitorType=ParticipantMessageMonitor")
.toString()).get("PendingMessages"), 0L);
Assert.assertEquals(monitorListener._beanValueMap.get(
getObjectName("ParticipantName=participant_0,MonitorType=ParticipantMessageMonitor")
.toString()).get("CompletedMessages"), 1L);
monitor.reportReceivedMessage(message);
Thread.sleep(1000);
monitorListener =
new ParticipantMonitorListener(DOMAIN_NAME, "ParticipantName", PARTICIPANT_NAME);
Thread.sleep(1000);
Assert.assertEquals(monitorListener._beanValueMap.get(
getObjectName("ParticipantName=participant_0,MonitorType=ParticipantMessageMonitor")
.toString()).get("ReceivedMessages"), 2L);
Assert.assertEquals(monitorListener._beanValueMap.get(
getObjectName("ParticipantName=participant_0,MonitorType=ParticipantMessageMonitor")
.toString()).get("PendingMessages"), 1L);
monitor
.reportProcessedMessage(message, ParticipantMessageMonitor.ProcessedMessageState.DISCARDED);
Thread.sleep(1000);
monitorListener =
new ParticipantMonitorListener(DOMAIN_NAME, "ParticipantName", PARTICIPANT_NAME);
Thread.sleep(1000);
Assert.assertEquals(monitorListener._beanValueMap.get(
getObjectName("ParticipantName=participant_0,MonitorType=ParticipantMessageMonitor")
.toString()).get("DiscardedMessages"), 1L);
Assert.assertEquals(monitorListener._beanValueMap.get(
getObjectName("ParticipantName=participant_0,MonitorType=ParticipantMessageMonitor")
.toString()).get("PendingMessages"), 0L);
monitorListener.disconnect();
System.out.println("END TestParticipantMessageMonitor");
}
}
| 9,871 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring/TestClusterStatusMonitorLifecycle.java | package org.apache.helix.monitoring;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.lang.management.ManagementFactory;
import java.util.Date;
import java.util.HashSet;
import java.util.Set;
import javax.management.ObjectInstance;
import javax.management.ObjectName;
import javax.management.Query;
import javax.management.QueryExp;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.TestHelper;
import org.apache.helix.common.ZkTestBase;
import org.apache.helix.integration.manager.ClusterDistributedController;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.model.IdealState;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.tools.ClusterVerifiers.BestPossibleExternalViewVerifier;
import org.apache.helix.tools.ClusterVerifiers.ZkHelixClusterVerifier;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestClusterStatusMonitorLifecycle extends ZkTestBase {
MockParticipantManager[] _participants;
ClusterDistributedController[] _controllers;
String _controllerClusterName;
String _clusterNamePrefix;
String _firstClusterName;
Set<String> _clusters = new HashSet<>();
final int n = 5;
final int clusterNb = 10;
@BeforeClass
public void beforeClass() throws Exception {
String className = TestHelper.getTestClassName();
_clusterNamePrefix = className;
System.out
.println("START " + _clusterNamePrefix + " at " + new Date(System.currentTimeMillis()));
// setup 10 clusters
for (int i = 0; i < clusterNb; i++) {
String clusterName = _clusterNamePrefix + "0_" + i;
String participantName = "localhost" + i;
String resourceName = "TestDB" + i;
TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
participantName, // participant name prefix
resourceName, // resource name prefix
1, // resources
8, // partitions per resource
n, // number of nodes
3, // replicas
"MasterSlave", true); // do rebalance
_clusters.add(clusterName);
}
// setup controller cluster
_controllerClusterName = "CONTROLLER_" + _clusterNamePrefix;
TestHelper.setupCluster(_controllerClusterName, ZK_ADDR, // controller
0, // port
"controller", // participant name prefix
_clusterNamePrefix, // resource name prefix
1, // resources
clusterNb, // partitions per resource
n, // number of nodes
3, // replicas
"LeaderStandby", true); // do rebalance
// start distributed cluster controllers
_controllers = new ClusterDistributedController[n];
for (int i = 0; i < n; i++) {
_controllers[i] =
new ClusterDistributedController(ZK_ADDR, _controllerClusterName, "controller_" + i);
_controllers[i].syncStart();
}
ZkHelixClusterVerifier controllerClusterVerifier =
new BestPossibleExternalViewVerifier.Builder(_controllerClusterName).setZkClient(_gZkClient)
.setWaitTillVerify(TestHelper.DEFAULT_REBALANCE_PROCESSING_WAIT_TIME)
.build();
Assert.assertTrue(controllerClusterVerifier.verifyByPolling(),
"Controller cluster NOT in ideal state");
// start first cluster
_participants = new MockParticipantManager[n];
_firstClusterName = _clusterNamePrefix + "0_0";
for (int i = 0; i < n; i++) {
String instanceName = "localhost0_" + (12918 + i);
_participants[i] = new MockParticipantManager(ZK_ADDR, _firstClusterName, instanceName);
_participants[i].syncStart();
}
ZkHelixClusterVerifier firstClusterVerifier =
new BestPossibleExternalViewVerifier.Builder(_firstClusterName).setZkClient(_gZkClient)
.setWaitTillVerify(TestHelper.DEFAULT_REBALANCE_PROCESSING_WAIT_TIME)
.build();
// verify first cluster
Assert.assertTrue(firstClusterVerifier.verifyByPolling(), "first cluster NOT in ideal state");
// verify all the rest clusters
for (int i = 1; i < clusterNb; i++) {
ZkHelixClusterVerifier clusterVerifier =
new BestPossibleExternalViewVerifier.Builder(_clusterNamePrefix + "0_" + i)
.setZkClient(_gZkClient)
.setWaitTillVerify(TestHelper.DEFAULT_REBALANCE_PROCESSING_WAIT_TIME)
.build();
Assert.assertTrue(clusterVerifier.verifyByPolling(), "Cluster NOT in ideal state.");
}
}
@AfterClass
public void afterClass() throws Exception {
System.out.println("Cleaning up...");
cleanupControllers();
for (MockParticipantManager participant : _participants) {
if (participant != null) {
participant.syncStop();
}
}
deleteCluster(_controllerClusterName);
for (String cluster : _clusters) {
TestHelper.dropCluster(cluster, _gZkClient);
}
System.out.println("END " + _clusterNamePrefix + " at " + new Date(System.currentTimeMillis()));
}
/**
* Disconnects all the controllers one by one.
* NOTE: Invoking this method multiple times won't disconnect the controllers multiple times.
*/
private void cleanupControllers() {
for (int i = 0; i < n; i++) {
ClusterDistributedController controller = _controllers[i];
if (controller != null) {
ZkHelixClusterVerifier controllerClusterVerifier =
new BestPossibleExternalViewVerifier.Builder(controller.getClusterName()).setZkClient(
_gZkClient).setWaitTillVerify(TestHelper.DEFAULT_REBALANCE_PROCESSING_WAIT_TIME)
.build();
Assert.assertTrue(controllerClusterVerifier.verifyByPolling(),
"Controller cluster NOT in ideal state");
System.out.println(String.format("Disconnecting controller %s from cluster %s at %s",
controller.getInstanceName(), controller.getClusterName(),
new Date(System.currentTimeMillis())));
controller.syncStop();
_controllers[i] = null;
}
}
}
@Test
public void testClusterStatusMonitorLifecycle() throws Exception {
// Filter other unrelated clusters' metrics
final QueryExp exp1 =
Query.match(Query.attr("SensorName"), Query.value("*" + _clusterNamePrefix + "*"));
final Set<ObjectInstance> mbeans = new HashSet<>(ManagementFactory.getPlatformMBeanServer()
.queryMBeans(new ObjectName("ClusterStatus:*"), exp1));
_participants[0].disconnect();
// 1 participant goes away
// No change in instance/resource mbean
// Unregister 1 per-instance resource mbean and message queue mbean
final int previousMBeanCount = mbeans.size();
Assert.assertTrue(TestHelper.verify(() -> {
Set<ObjectInstance> newMbeans = new HashSet<>(ManagementFactory.getPlatformMBeanServer()
.queryMBeans(new ObjectName("ClusterStatus:*"), exp1));
mbeans.clear();
mbeans.addAll(newMbeans);
return newMbeans.size() == (previousMBeanCount - 2);
}, TestHelper.WAIT_DURATION));
HelixDataAccessor accessor = _participants[n - 1].getHelixDataAccessor();
String firstControllerName =
accessor.getProperty(accessor.keyBuilder().controllerLeader()).getId();
ClusterDistributedController firstController = null;
for (ClusterDistributedController controller : _controllers) {
if (controller.getInstanceName().equals(firstControllerName)) {
firstController = controller;
}
}
assert firstController != null;
firstController.disconnect();
ZkHelixClusterVerifier controllerClusterVerifier =
new BestPossibleExternalViewVerifier.Builder(_controllerClusterName).setZkClient(_gZkClient)
.setWaitTillVerify(TestHelper.DEFAULT_REBALANCE_PROCESSING_WAIT_TIME)
.build();
Assert.assertTrue(controllerClusterVerifier.verifyByPolling(),
"Controller cluster was not converged");
// 1 controller goes away
// 1 message queue mbean, 1 PerInstanceResource mbean, and one event mbean
final int previousMBeanCount2 = mbeans.size();
Assert.assertTrue(TestHelper.verify(() -> {
Set<ObjectInstance> newMbeans = new HashSet<>(ManagementFactory.getPlatformMBeanServer()
.queryMBeans(new ObjectName("ClusterStatus:*"), exp1));
mbeans.clear();
mbeans.addAll(newMbeans);
return newMbeans.size() == (previousMBeanCount2 - 3);
}, TestHelper.WAIT_DURATION));
String instanceName = "localhost0_" + (12918);
_participants[0] = new MockParticipantManager(ZK_ADDR, _firstClusterName, instanceName);
_participants[0].syncStart();
// 1 participant comes back
// No change in instance/resource mbean
// Register 1 per-instance resource mbean and 1 message queue mbean
final int previousMBeanCount3 = mbeans.size();
Assert.assertTrue(TestHelper.verify(() -> {
Set<ObjectInstance> newMbeans = new HashSet<>(ManagementFactory.getPlatformMBeanServer()
.queryMBeans(new ObjectName("ClusterStatus:*"), exp1));
mbeans.clear();
mbeans.addAll(newMbeans);
return newMbeans.size() == (previousMBeanCount3 + 2);
}, TestHelper.WAIT_DURATION));
// Add a resource
// Register 1 resource mbean
// Register 5 per-instance resource mbean
ClusterSetup setupTool = new ClusterSetup(ZK_ADDR);
IdealState idealState = accessor.getProperty(accessor.keyBuilder().idealStates("TestDB00"));
setupTool.addResourceToCluster(_firstClusterName, "TestDB1", idealState.getNumPartitions(),
"MasterSlave");
setupTool.rebalanceResource(_firstClusterName, "TestDB1",
Integer.parseInt(idealState.getReplicas()));
// Add one resource, PerInstanceResource mbeans and 1 resource monitor
final int previousMBeanCount4 = mbeans.size();
Assert.assertTrue(TestHelper.verify(() -> {
Set<ObjectInstance> newMbeans = new HashSet<>(ManagementFactory.getPlatformMBeanServer()
.queryMBeans(new ObjectName("ClusterStatus:*"), exp1));
mbeans.clear();
mbeans.addAll(newMbeans);
return newMbeans.size() == (previousMBeanCount4 + _participants.length + 1);
}, TestHelper.WAIT_DURATION));
// Remove a resource
// No change in instance/resource mbean
// Unregister 5 per-instance resource mbean
setupTool.dropResourceFromCluster(_firstClusterName, "TestDB1");
final int previousMBeanCount5 = mbeans.size();
Assert.assertTrue(TestHelper.verify(() -> {
Set<ObjectInstance> newMbeans = new HashSet<>(ManagementFactory.getPlatformMBeanServer()
.queryMBeans(new ObjectName("ClusterStatus:*"), exp1));
mbeans.clear();
mbeans.addAll(newMbeans);
return newMbeans.size() == (previousMBeanCount5 - (_participants.length + 1));
}, TestHelper.WAIT_DURATION));
// Cleanup controllers then MBeans should all be removed.
cleanupControllers();
// Check if any MBeans leftover.
// Note that MessageQueueStatus is not bound with controller only. So it will still exist.
final QueryExp exp2 = Query
.and(Query.not(Query.match(Query.attr("SensorName"), Query.value("MessageQueueStatus.*"))),
exp1);
boolean result = TestHelper.verify(() -> ManagementFactory.getPlatformMBeanServer()
.queryMBeans(new ObjectName("ClusterStatus:*"), exp2).isEmpty(), TestHelper.WAIT_DURATION);
Assert.assertTrue(result, "Remaining MBeans: " + ManagementFactory.getPlatformMBeanServer()
.queryMBeans(new ObjectName("ClusterStatus:*"), exp2).toString());
}
}
| 9,872 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestHelixCallbackMonitor.java | package org.apache.helix.monitoring.mbeans;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.lang.management.ManagementFactory;
import java.util.HashSet;
import java.util.Set;
import javax.management.JMException;
import javax.management.MBeanServer;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import org.apache.helix.HelixConstants;
import org.apache.helix.InstanceType;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestHelixCallbackMonitor {
private MBeanServer _beanServer = ManagementFactory.getPlatformMBeanServer();
private final InstanceType TEST_TYPE = InstanceType.PARTICIPANT;
private final String TEST_CLUSTER = "test_cluster";
private ObjectName buildObjectName(InstanceType type, String cluster,
HelixConstants.ChangeType changeType) throws MalformedObjectNameException {
return MBeanRegistrar.buildObjectName(MonitorDomainNames.HelixCallback.name(),
HelixCallbackMonitor.MONITOR_TYPE, type.name(), HelixCallbackMonitor.MONITOR_KEY, cluster,
HelixCallbackMonitor.MONITOR_CHANGE_TYPE, changeType.name());
}
private ObjectName buildObjectName(InstanceType type, String cluster,
HelixConstants.ChangeType changeType, int num) throws MalformedObjectNameException {
ObjectName objectName = buildObjectName(type, cluster, changeType);
if (num > 0) {
return new ObjectName(String
.format("%s,%s=%s", objectName.toString(), MBeanRegistrar.DUPLICATE,
String.valueOf(num)));
} else {
return objectName;
}
}
@Test
public void testMBeanRegisteration() throws JMException {
Set<HelixCallbackMonitor> monitors = new HashSet<>();
for (HelixConstants.ChangeType changeType : HelixConstants.ChangeType.values()) {
monitors.add(new HelixCallbackMonitor(TEST_TYPE, TEST_CLUSTER, null, changeType).register());
Assert.assertTrue(
_beanServer.isRegistered(buildObjectName(TEST_TYPE, TEST_CLUSTER, changeType)));
}
for (HelixConstants.ChangeType changeType : HelixConstants.ChangeType.values()) {
monitors.add(new HelixCallbackMonitor(TEST_TYPE, TEST_CLUSTER, null, changeType).register());
Assert.assertTrue(
_beanServer.isRegistered(buildObjectName(TEST_TYPE, TEST_CLUSTER, changeType, 1)));
}
for (HelixConstants.ChangeType changeType : HelixConstants.ChangeType.values()) {
monitors.add(new HelixCallbackMonitor(TEST_TYPE, TEST_CLUSTER, null, changeType).register());
Assert.assertTrue(
_beanServer.isRegistered(buildObjectName(TEST_TYPE, TEST_CLUSTER, changeType, 2)));
}
// Un-register all monitors
for (HelixCallbackMonitor monitor : monitors) {
monitor.unregister();
}
for (HelixConstants.ChangeType changeType : HelixConstants.ChangeType.values()) {
Assert.assertFalse(
_beanServer.isRegistered(buildObjectName(TEST_TYPE, TEST_CLUSTER, changeType)));
Assert.assertFalse(
_beanServer.isRegistered(buildObjectName(TEST_TYPE, TEST_CLUSTER, changeType, 1)));
Assert.assertFalse(
_beanServer.isRegistered(buildObjectName(TEST_TYPE, TEST_CLUSTER, changeType, 2)));
}
}
@Test
public void testCounter() throws JMException {
HelixCallbackMonitor monitor = new HelixCallbackMonitor(TEST_TYPE, TEST_CLUSTER, null,
HelixConstants.ChangeType.CURRENT_STATE);
monitor.register();
ObjectName name =
buildObjectName(TEST_TYPE, TEST_CLUSTER, HelixConstants.ChangeType.CURRENT_STATE);
monitor.increaseCallbackCounters(1000L);
Assert.assertEquals((long) _beanServer.getAttribute(name, "Counter"), 1);
Assert.assertEquals((long) _beanServer.getAttribute(name, "LatencyCounter"), 1000L);
Assert.assertEquals((long) _beanServer.getAttribute(name, "LatencyGauge.Max"), 1000L);
monitor.unregister();
}
}
| 9,873 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestZkClientMonitor.java | package org.apache.helix.monitoring.mbeans;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.lang.management.ManagementFactory;
import javax.management.AttributeNotFoundException;
import javax.management.JMException;
import javax.management.MBeanServer;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.apache.helix.SystemPropertyKeys.HELIX_MONITOR_TIME_WINDOW_LENGTH_MS;
public class TestZkClientMonitor {
private MBeanServer _beanServer = ManagementFactory.getPlatformMBeanServer();
private ObjectName buildObjectName(String tag, String key, String instance) throws MalformedObjectNameException {
return ZkClientMonitor.getObjectName(tag, key, instance);
}
private ObjectName buildObjectName(String tag, String key, String instance, int num)
throws MalformedObjectNameException {
ObjectName objectName = buildObjectName(tag, key, instance);
if (num > 0) {
return new ObjectName(String
.format("%s,%s=%s", objectName.toString(), MBeanRegistrar.DUPLICATE,
String.valueOf(num)));
} else {
return objectName;
}
}
private ObjectName buildPathMonitorObjectName(String tag, String key, String instance, String path)
throws MalformedObjectNameException {
return new ObjectName(String
.format("%s,%s=%s", buildObjectName(tag, key, instance).toString(), ZkClientPathMonitor.MONITOR_PATH, path));
}
@Test
public void testMBeanRegisteration() throws JMException {
final String TEST_TAG_1 = "test_tag_1";
final String TEST_KEY_1 = "test_key_1";
ZkClientMonitor monitor = new ZkClientMonitor(TEST_TAG_1, TEST_KEY_1, null, true, null);
Assert.assertFalse(_beanServer.isRegistered(buildObjectName(TEST_TAG_1, TEST_KEY_1, null)));
monitor.register();
Assert.assertTrue(_beanServer.isRegistered(buildObjectName(TEST_TAG_1, TEST_KEY_1, null)));
// no per-path monitor items created since "monitorRootPathOnly" = true
Assert.assertFalse(_beanServer.isRegistered(
buildPathMonitorObjectName(TEST_TAG_1, TEST_KEY_1, null,
ZkClientPathMonitor.PredefinedPath.IdealStates.name())));
ZkClientMonitor monitorDuplicate = new ZkClientMonitor(TEST_TAG_1, TEST_KEY_1, null, true, null);
monitorDuplicate.register();
Assert.assertTrue(_beanServer.isRegistered(buildObjectName(TEST_TAG_1, TEST_KEY_1, null, 1)));
monitor.unregister();
monitorDuplicate.unregister();
Assert.assertFalse(_beanServer.isRegistered(buildObjectName(TEST_TAG_1, TEST_KEY_1, null)));
Assert.assertFalse(_beanServer.isRegistered(buildObjectName(TEST_TAG_1, TEST_KEY_1, null, 1)));
}
@Test
public void testCounter() throws JMException {
final String TEST_TAG = "test_tag_3";
final String TEST_KEY = "test_key_3";
final String TEST_INSTANCE = "test_instance_3";
ZkClientMonitor monitor = new ZkClientMonitor(TEST_TAG, TEST_KEY, TEST_INSTANCE, false, null);
monitor.register();
ObjectName name = buildObjectName(TEST_TAG, TEST_KEY, TEST_INSTANCE);
ObjectName rootName = buildPathMonitorObjectName(TEST_TAG, TEST_KEY,
TEST_INSTANCE, ZkClientPathMonitor.PredefinedPath.Root.name());
ObjectName idealStateName = buildPathMonitorObjectName(TEST_TAG, TEST_KEY, TEST_INSTANCE,
ZkClientPathMonitor.PredefinedPath.IdealStates.name());
ObjectName instancesName = buildPathMonitorObjectName(TEST_TAG, TEST_KEY, TEST_INSTANCE,
ZkClientPathMonitor.PredefinedPath.Instances.name());
ObjectName currentStateName = buildPathMonitorObjectName(TEST_TAG, TEST_KEY, TEST_INSTANCE,
ZkClientPathMonitor.PredefinedPath.CurrentStates.name());
monitor.increaseDataChangeEventCounter();
long eventCount = (long) _beanServer.getAttribute(name, "DataChangeEventCounter");
Assert.assertEquals(eventCount, 1);
monitor.increaseStateChangeEventCounter();
long stateChangeCount = (long) _beanServer.getAttribute(name, "StateChangeEventCounter");
Assert.assertEquals(stateChangeCount, 1);
monitor.increasExpiredSessionCounter();
long expiredSessionCount = (long) _beanServer.getAttribute(name, "ExpiredSessionCounter");
Assert.assertEquals(expiredSessionCount, 1);
monitor.increaseOutstandingRequestGauge();
long requestGauge = (long) _beanServer.getAttribute(name, "OutstandingRequestGauge");
Assert.assertEquals(requestGauge, 1);
monitor.decreaseOutstandingRequestGauge();
requestGauge = (long) _beanServer.getAttribute(name, "OutstandingRequestGauge");
Assert.assertEquals(requestGauge, 0);
try {
_beanServer.getAttribute(name, "PendingCallbackGauge");
Assert.fail();
} catch (AttributeNotFoundException ex) {
// Expected AttributeNotFoundException because the metric does not exist in
// MBean server.
}
monitor.record("TEST/IDEALSTATES/myResource", 0, System.currentTimeMillis() - 10,
ZkClientMonitor.AccessType.READ);
Assert.assertEquals((long) _beanServer.getAttribute(rootName, "ReadCounter"), 1);
Assert.assertEquals((long) _beanServer.getAttribute(idealStateName, "ReadCounter"), 1);
Assert.assertTrue((long) _beanServer.getAttribute(rootName, "ReadLatencyGauge.Max") >= 10);
monitor.record("TEST/INSTANCES/testDB0", 0, System.currentTimeMillis() - 15,
ZkClientMonitor.AccessType.READ);
Assert.assertEquals((long) _beanServer.getAttribute(rootName, "ReadCounter"), 2);
Assert.assertEquals((long) _beanServer.getAttribute(instancesName, "ReadCounter"), 1);
Assert.assertEquals((long) _beanServer.getAttribute(idealStateName, "ReadCounter"), 1);
Assert.assertTrue((long) _beanServer.getAttribute(rootName, "ReadTotalLatencyCounter") >= 25);
monitor.record("TEST/INSTANCES/node_1/CURRENTSTATES/session_1/Resource", 5,
System.currentTimeMillis() - 10, ZkClientMonitor.AccessType.WRITE);
Assert.assertEquals((long) _beanServer.getAttribute(rootName, "WriteCounter"), 1);
Assert.assertEquals((long) _beanServer.getAttribute(currentStateName, "WriteCounter"), 1);
Assert.assertEquals((long) _beanServer.getAttribute(currentStateName, "WriteBytesCounter"), 5);
Assert.assertEquals((long) _beanServer.getAttribute(instancesName, "WriteCounter"), 1);
Assert.assertEquals((long) _beanServer.getAttribute(instancesName, "WriteBytesCounter"), 5);
Assert.assertTrue((long) _beanServer.getAttribute(rootName, "WriteTotalLatencyCounter") >= 10);
Assert
.assertTrue((long) _beanServer.getAttribute(instancesName, "WriteLatencyGauge.Max") >= 10);
Assert.assertTrue(
(long) _beanServer.getAttribute(instancesName, "WriteTotalLatencyCounter") >= 10);
monitor.recordDataPropagationLatency("TEST/INSTANCES/node_1/CURRENTSTATES/session_1/Resource",
5);
String dataPropagationLatencyGaugeAttr =
ZkClientPathMonitor.PredefinedMetricDomains.DataPropagationLatencyGauge.name() + ".Max";
Assert.assertEquals((long) _beanServer.getAttribute(rootName, dataPropagationLatencyGaugeAttr),
5);
Assert.assertEquals(
(long) _beanServer.getAttribute(currentStateName, dataPropagationLatencyGaugeAttr), 5);
Assert.assertEquals(
(long) _beanServer.getAttribute(idealStateName, dataPropagationLatencyGaugeAttr), 0);
}
@Test
public void testCustomizedResetInterval() throws JMException, InterruptedException {
// Use a customized reservoir sliding length of 1 ms.
String timeWindowBackup = System.getProperty(HELIX_MONITOR_TIME_WINDOW_LENGTH_MS);
System.setProperty(HELIX_MONITOR_TIME_WINDOW_LENGTH_MS, "1");
final String TEST_TAG = "test_tag_x";
final String TEST_KEY = "test_key_x";
final String TEST_INSTANCE = "test_instance_x";
String dataPropagationLatencyGaugeAttr =
ZkClientPathMonitor.PredefinedMetricDomains.DataPropagationLatencyGauge.name() + ".Max";
ZkClientMonitor monitor = new ZkClientMonitor(TEST_TAG, TEST_KEY, TEST_INSTANCE, false, null);
monitor.register();
ObjectName rootName = buildPathMonitorObjectName(TEST_TAG, TEST_KEY, TEST_INSTANCE,
ZkClientPathMonitor.PredefinedPath.Root.name());
monitor
.recordDataPropagationLatency("TEST/INSTANCES/node_1/CURRENTSTATES/session_1/Resource", 5);
Assert
.assertEquals((long) _beanServer.getAttribute(rootName, dataPropagationLatencyGaugeAttr),
5);
// The reservoir length is 10 ms, so the prev max of 5 is not valid anymore.
Thread.sleep(10);
monitor
.recordDataPropagationLatency("TEST/INSTANCES/node_1/CURRENTSTATES/session_1/Resource", 4);
Assert
.assertEquals((long) _beanServer.getAttribute(rootName, dataPropagationLatencyGaugeAttr),
4);
// Reset the customized reservoir sliding length.
// Otherwise, reservoir sliding length would be kept to 1 ms for the histogram metrics
// in later unit tests and cause later tests' failure.
if (timeWindowBackup == null) {
System.clearProperty(HELIX_MONITOR_TIME_WINDOW_LENGTH_MS);
} else {
System.setProperty(HELIX_MONITOR_TIME_WINDOW_LENGTH_MS, timeWindowBackup);
}
}
}
| 9,874 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestResetClusterMetrics.java | package org.apache.helix.monitoring.mbeans;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.lang.management.ManagementFactory;
import javax.management.InstanceNotFoundException;
import javax.management.MBeanInfo;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.apache.helix.TestHelper;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.model.IdealState.RebalanceMode;
import org.apache.helix.tools.ClusterStateVerifier;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestResetClusterMetrics extends ZkUnitTestBase {
/**
* Ensure cluster status lifecycle is tied to controller leader status
*/
@Test
public void testControllerDisconnect() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
// Set up a cluster with one of everything
TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, "localhost", "Resource", 1, 1, 1, 1,
"OnlineOffline", RebalanceMode.FULL_AUTO, true);
// Add a participant
MockParticipantManager participant =
new MockParticipantManager(ZK_ADDR, clusterName, "localhost_12918");
participant.syncStart();
// Add a controller
ClusterControllerManager controller =
new ClusterControllerManager(ZK_ADDR, clusterName, "controller_0");
controller.syncStart();
// Make sure everything gets assigned
Thread.sleep(1000);
boolean result =
ClusterStateVerifier
.verifyByZkCallback(new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName));
Assert.assertTrue(result);
// Check the metrics
Assert.assertTrue(metricsExist(clusterName, participant.getInstanceName()));
// Stop the controller
controller.syncStop();
participant.syncStop();
TestHelper.dropCluster(clusterName, _gZkClient);
// Check the metrics
Thread.sleep(1000);
Assert.assertFalse(metricsExist(clusterName, participant.getInstanceName()));
}
private boolean metricsExist(String clusterName, String instanceName) throws Exception {
MBeanServer server = ManagementFactory.getPlatformMBeanServer();
String instanceBeanName =
ClusterStatusMonitor.CLUSTER_DN_KEY + "=" + clusterName + ","
+ ClusterStatusMonitor.INSTANCE_DN_KEY + "=" + instanceName;
boolean instanceBeanFound;
try {
MBeanInfo info = server.getMBeanInfo(objectName(instanceBeanName));
instanceBeanFound = info != null;
} catch (InstanceNotFoundException e) {
instanceBeanFound = false;
}
String clusterBeanName = ClusterStatusMonitor.CLUSTER_DN_KEY + "=" + clusterName;
boolean clusterBeanFound;
try {
MBeanInfo info = server.getMBeanInfo(objectName(clusterBeanName));
clusterBeanFound = info != null;
} catch (InstanceNotFoundException e) {
clusterBeanFound = false;
}
return instanceBeanFound && clusterBeanFound;
}
private ObjectName objectName(String beanName) throws Exception {
return new ObjectName(MonitorDomainNames.ClusterStatus.name() + ":" + beanName);
}
}
| 9,875 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestTopStateHandoffMetrics.java | package org.apache.helix.monitoring.mbeans;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.Range;
import org.apache.helix.HelixConstants;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.stages.AttributeName;
import org.apache.helix.controller.stages.BaseStageTest;
import org.apache.helix.controller.stages.CurrentStateComputationStage;
import org.apache.helix.controller.stages.ReadClusterDataStage;
import org.apache.helix.controller.stages.TopStateHandoffReportStage;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Message;
import org.apache.helix.model.Resource;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
public class TestTopStateHandoffMetrics extends BaseStageTest {
public final static String TEST_INPUT_FILE = "TestTopStateHandoffMetrics.json";
public final static String TEST_RESOURCE = "TestResource";
public final static String PARTITION = "PARTITION";
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private static final String NON_GRACEFUL_HANDOFF_DURATION = "PartitionTopStateNonGracefulHandoffGauge.Max";
private static final String GRACEFUL_HANDOFF_DURATION = "PartitionTopStateHandoffDurationGauge.Max";
private static final String HANDOFF_HELIX_LATENCY = "PartitionTopStateHandoffHelixLatencyGauge.Max";
private static final Range<Long> DURATION_ZERO = Range.closed(0L, 0L);
private TestConfig config;
@BeforeClass
public void beforeClass() {
super.beforeClass();
try {
config = OBJECT_MAPPER
.readValue(getClass().getClassLoader().getResourceAsStream(TEST_INPUT_FILE), TestConfig.class);
} catch (IOException e) {
e.printStackTrace();
}
}
private static class CurrentStateInfo {
String currentState;
String previousState;
long startTime;
long endTime;
@JsonCreator
public CurrentStateInfo(
@JsonProperty("CurrentState") String cs,
@JsonProperty("PreviousState") String ps,
@JsonProperty("StartTime") long start,
@JsonProperty("EndTime") long end
) {
currentState = cs;
previousState = ps;
startTime = start;
endTime = end;
}
}
private static class TestCaseConfig {
final Map<String, CurrentStateInfo> initialCurrentStates;
final Map<String, CurrentStateInfo> currentStateWithMissingTopState;
final Map<String, CurrentStateInfo> finalCurrentState;
final long duration;
final boolean isGraceful;
final long helixLatency;
@JsonCreator
public TestCaseConfig(
@JsonProperty("InitialCurrentStates") Map<String, CurrentStateInfo> initial,
@JsonProperty("MissingTopStates") Map<String, CurrentStateInfo> missing,
@JsonProperty("HandoffCurrentStates") Map<String, CurrentStateInfo> handoff,
@JsonProperty("Duration") long d,
@JsonProperty("HelixLatency") long helix,
@JsonProperty("IsGraceful") boolean graceful
) {
initialCurrentStates = initial;
currentStateWithMissingTopState = missing;
finalCurrentState = handoff;
duration = d;
helixLatency = helix;
isGraceful = graceful;
}
}
private static class TestConfig {
final List<TestCaseConfig> succeeded;
final List<TestCaseConfig> failed;
final List<TestCaseConfig> fast;
final List<TestCaseConfig> succeededNonGraceful;
final List<TestCaseConfig> failedWithoutRecovery;
@JsonCreator
public TestConfig(
@JsonProperty("succeeded") List<TestCaseConfig> succeededCfg,
@JsonProperty("failed") List<TestCaseConfig> failedCfg,
@JsonProperty("fast") List<TestCaseConfig> fastCfg,
@JsonProperty("succeededNonGraceful") List<TestCaseConfig> nonGraceful,
@JsonProperty("failedWithoutRecovery") List<TestCaseConfig> unrecoveredFailedCfg
) {
succeeded = succeededCfg;
failed = failedCfg;
fast = fastCfg;
succeededNonGraceful = nonGraceful;
failedWithoutRecovery = unrecoveredFailedCfg;
}
}
private void preSetup() {
setupLiveInstances(3);
setupStateModel();
Resource resource = new Resource(TEST_RESOURCE);
resource.setStateModelDefRef("MasterSlave");
resource.addPartition(PARTITION);
event.addAttribute(AttributeName.RESOURCES.name(),
Collections.singletonMap(TEST_RESOURCE, resource));
event.addAttribute(AttributeName.LastRebalanceFinishTimeStamp.name(),
TopStateHandoffReportStage.TIMESTAMP_NOT_RECORDED);
ClusterStatusMonitor monitor = new ClusterStatusMonitor("TestCluster");
monitor.active();
event.addAttribute(AttributeName.clusterStatusMonitor.name(), monitor);
}
@Test(dataProvider = "successCurrentStateInput")
public void testTopStateSuccessHandoff(TestCaseConfig cfg) {
runTestWithNoInjection(cfg, false);
}
@Test(dataProvider = "fastCurrentStateInput")
public void testFastTopStateHandoffWithNoMissingTopState(TestCaseConfig cfg) {
runTestWithNoInjection(cfg, false);
}
@Test(dataProvider = "fastCurrentStateInput")
public void testFastTopStateHandoffWithNoMissingTopStateAndOldInstanceCrash(TestCaseConfig cfg) {
preSetup();
event.addAttribute(AttributeName.LastRebalanceFinishTimeStamp.name(), 7500L);
// By simulating last master instance crash, we now have:
// - M->S from 6000 to 7000
// - lastPipelineFinishTimestamp is 7500
// - S->M from 8000 to 9000
// Therefore the recorded latency should be 9000 - 7500 = 1500, though original master crashed,
// since this is a single top state handoff observed within 1 pipeline, we treat it as graceful,
// and only record user latency for transiting to master
Range<Long> expectedDuration = Range.closed(1500L, 1500L);
Range<Long> expectedHelixLatency = Range.closed(500L, 500L);
runStageAndVerify(
cfg.initialCurrentStates, cfg.currentStateWithMissingTopState, cfg.finalCurrentState,
new MissingStatesDataCacheInject() {
@Override
public void doInject(ResourceControllerDataProvider cache) {
Map<String, LiveInstance> liMap = new HashMap<>(cache.getLiveInstances());
liMap.remove("localhost_1");
cache.setLiveInstances(new ArrayList<>(liMap.values()));
}
}, 1, 0, 0, // threshold value not set hence no change in guage
expectedDuration,
DURATION_ZERO,
expectedDuration, expectedHelixLatency
);
event.addAttribute(AttributeName.LastRebalanceFinishTimeStamp.name(),
TopStateHandoffReportStage.TIMESTAMP_NOT_RECORDED);
}
@Test(dataProvider = "succeededNonGraceful")
public void testTopStateSuccessfulYetNonGracefulHandoff(TestCaseConfig cfg) {
// localhost_0 crashed at 15000
// localhost_1 slave -> master started 20000, ended 22000, top state handoff = 7000
preSetup();
final String downInstance = "localhost_0";
final Long lastOfflineTime = 15000L;
Range<Long> expectedDuration = Range.closed(7000L, 7000L);
runStageAndVerify(
cfg.initialCurrentStates, cfg.currentStateWithMissingTopState, cfg.finalCurrentState,
new MissingStatesDataCacheInject() {
@Override
public void doInject(ResourceControllerDataProvider cache) {
accessor.removeProperty(accessor.keyBuilder().liveInstance(downInstance));
Map<String, LiveInstance> liMap = new HashMap<>(cache.getLiveInstances());
liMap.remove("localhost_0");
cache.setLiveInstances(new ArrayList<>(liMap.values()));
cache.getInstanceOfflineTimeMap().put("localhost_0", lastOfflineTime);
cache.notifyDataChange(HelixConstants.ChangeType.LIVE_INSTANCE);
}
}, 1, 0, 0, // threshold value not set hence no change in guage.
DURATION_ZERO, // graceful handoff duration should be 0
expectedDuration, // we should have an record for non-graceful handoff
expectedDuration, // max handoff should be same as non-graceful handoff
DURATION_ZERO // we don't record user latency for non-graceful transition
);
}
@Test(dataProvider = "failedCurrentStateInput")
public void testTopStateFailedHandoff(TestCaseConfig cfg) {
// There are two scenarios here :
// 1. localhost_0 looses top-state at 15000 and top state is recovered on localhost_1 at 22000. This means that
// hand-off took 7000 which is greater than threshold (5000).
// 2. localhost_0 looses top-state at 15000 and recovers at 18000.
// In both scenarios as recovery has been achieved so both failed counter and missingTopStatePartitionsThresholdGauge
// will be set to 0.
ClusterConfig clusterConfig = new ClusterConfig(_clusterName);
clusterConfig.setMissTopStateDurationThreshold(5000L);
setClusterConfig(clusterConfig);
runTestWithNoInjection(cfg, true);
}
@Test(dataProvider = "failedWithoutRecovery")
public void testTopStateFailedUnrecoveredHandoff(TestCaseConfig cfg) {
// Scenario : localhost_0 looses top-state at 15000 and it's NEVER recovered.
// In this scenario missingTopStatePartitionsThresholdGauge value should be set to 1.
ClusterConfig clusterConfig = new ClusterConfig(_clusterName);
clusterConfig.setMissTopStateDurationThreshold(5000L);
setClusterConfig(clusterConfig);
preSetup();
Range<Long> duration = Range.closed(cfg.duration, cfg.duration);
Range<Long> expectedDuration = cfg.isGraceful ? duration : DURATION_ZERO;
Range<Long> expectedNonGracefulDuration = cfg.isGraceful ? DURATION_ZERO : duration;
Range<Long> expectedHelixLatency = cfg.isGraceful ? Range.closed(cfg.helixLatency, cfg.helixLatency) : DURATION_ZERO;
runStageAndVerify(cfg.initialCurrentStates, cfg.currentStateWithMissingTopState,
cfg.finalCurrentState, null, 0,
1, 1, // No recovered state observed so no change in gauge and counter
expectedDuration, expectedNonGracefulDuration,
expectedDuration, expectedHelixLatency);
}
// Test success with no available clue about previous master.
// For example, controller is just changed to a new node.
@Test(
dataProvider = "successCurrentStateInput",
dependsOnMethods = "testHandoffDurationWithPendingMessage"
)
public void testHandoffDurationWithDefaultStartTime(final TestCaseConfig cfg) {
preSetup();
// No initialCurrentStates means no input can be used as the clue of the previous master.
// in such case, reportTopStateMissing will use current system time as missing top state
// start time, and we assume it is a graceful handoff, and only "to top state" user latency
// will be recorded
long helixLatency = 1000;
long userLatency = 1000;
for (CurrentStateInfo states : cfg.finalCurrentState.values()) {
if (states.currentState.equals("MASTER")) {
states.endTime = System.currentTimeMillis() + helixLatency + userLatency;
states.startTime = System.currentTimeMillis() + helixLatency;
break;
}
}
// actual timestamp when running the stage will be later than current time, so the expected
// helix latency will be less than the mocked helix latency
runStageAndVerify(Collections.EMPTY_MAP, cfg.currentStateWithMissingTopState,
cfg.finalCurrentState, null, 1, 0,
0, // No previous missingTopStateRecord so no change in guage
Range.closed(0L, helixLatency + userLatency),
DURATION_ZERO,
Range.closed(0L, helixLatency + userLatency),
Range.closed(0L, helixLatency)
);
}
/**
* Test success with only a pending message as the clue.
* For instance, if the master was dropped, there is no way to track the dropping time.
* So either use current system time.
* @see org.apache.helix.monitoring.mbeans.TestTopStateHandoffMetrics#testHandoffDurationWithDefaultStartTime
* Or we can check if any pending message to be used as the start time.
*/
@Test(dataProvider = "successCurrentStateInput", dependsOnMethods = "testTopStateSuccessHandoff")
public void testHandoffDurationWithPendingMessage(final TestCaseConfig cfg) {
final long messageTimeBeforeMasterless = 145;
preSetup();
long durationToVerify = cfg.duration + messageTimeBeforeMasterless;
long userLatency = 0;
for (CurrentStateInfo info : cfg.finalCurrentState.values()) {
if (info.currentState.equals("MASTER")) {
userLatency = info.endTime - info.startTime;
}
}
long helixLatency = durationToVerify - userLatency;
// No initialCurrentStates means no input can be used as the clue of the previous master.
// in this case, we will treat the handoff as graceful and only to-master user latency
// will be recorded
runStageAndVerify(
Collections.EMPTY_MAP, cfg.currentStateWithMissingTopState, cfg.finalCurrentState,
new MissingStatesDataCacheInject() {
@Override public void doInject(ResourceControllerDataProvider cache) {
String topStateNode = null;
for (String instance : cfg.initialCurrentStates.keySet()) {
if (cfg.initialCurrentStates.get(instance).currentState.equals("MASTER")) {
topStateNode = instance;
break;
}
}
// Simulate the previous top state instance goes offline
if (topStateNode != null) {
long originalStartTime = cfg.currentStateWithMissingTopState.get(topStateNode).startTime;
// Inject a message that fit expectedDuration
Message message =
new Message(Message.MessageType.STATE_TRANSITION, "thisisafakemessage");
message.setTgtSessionId(SESSION_PREFIX + topStateNode.split("_")[1]);
message.setToState("MASTER");
message.setCreateTimeStamp(originalStartTime - messageTimeBeforeMasterless);
message.setTgtName(topStateNode);
message.setResourceName(TEST_RESOURCE);
message.setPartitionName(PARTITION);
cache.cacheMessages(Collections.singletonList(message));
}
}
}, 1, 0, 0,
Range.closed(durationToVerify, durationToVerify),
DURATION_ZERO,
Range.closed(durationToVerify, durationToVerify),
Range.closed(helixLatency, helixLatency));
}
@DataProvider(name = "successCurrentStateInput")
public Object[][] successCurrentState() {
return testCaseConfigListToObjectArray(config.succeeded);
}
@DataProvider(name = "failedCurrentStateInput")
public Object[][] failedCurrentState() {
return testCaseConfigListToObjectArray(config.failed);
}
@DataProvider(name = "failedWithoutRecovery")
public Object[][] failedWithoutRecovery() {
return testCaseConfigListToObjectArray(config.failedWithoutRecovery);
}
@DataProvider(name = "fastCurrentStateInput")
public Object[][] fastCurrentState() {
return testCaseConfigListToObjectArray(config.fast);
}
@DataProvider(name = "succeededNonGraceful")
public Object[][] nonGracefulCurrentState() {
return testCaseConfigListToObjectArray(config.succeededNonGraceful);
}
private Object[][] testCaseConfigListToObjectArray(List<TestCaseConfig> configs) {
Object[][] result = new Object[configs.size()][];
for (int i = 0; i < configs.size(); i++) {
result[i] = new Object[] {configs.get(i)};
}
return result;
}
private void runTestWithNoInjection(TestCaseConfig cfg, boolean expectFail) {
preSetup();
Range<Long> duration = Range.closed(cfg.duration, cfg.duration);
Range<Long> expectedDuration = cfg.isGraceful ? duration : DURATION_ZERO;
Range<Long> expectedNonGracefulDuration = cfg.isGraceful ? DURATION_ZERO : duration;
Range<Long> expectedHelixLatency =
cfg.isGraceful ? Range.closed(cfg.helixLatency, cfg.helixLatency) : DURATION_ZERO;
runStageAndVerify(cfg.initialCurrentStates, cfg.currentStateWithMissingTopState,
cfg.finalCurrentState, null, expectFail ? 0 : 1, expectFail ? 1 : 0, 0, expectedDuration, expectedNonGracefulDuration,
expectedDuration, expectedHelixLatency);
}
private Map<String, CurrentState> generateCurrentStateMap(
Map<String, CurrentStateInfo> currentStateRawData) {
Map<String, CurrentState> currentStateMap = new HashMap<String, CurrentState>();
for (String instanceName : currentStateRawData.keySet()) {
CurrentStateInfo info = currentStateRawData.get(instanceName);
CurrentState currentState = new CurrentState(TEST_RESOURCE);
currentState.setSessionId(SESSION_PREFIX + instanceName.split("_")[1]);
currentState.setState(PARTITION, info.currentState);
currentState.setPreviousState(PARTITION, info.previousState);
currentState.setStartTime(PARTITION, info.startTime);
currentState.setEndTime(PARTITION, info.endTime);
currentStateMap.put(instanceName, currentState);
}
return currentStateMap;
}
private void runPipeLine(Map<String, CurrentStateInfo> initialCurrentStates,
Map<String, CurrentStateInfo> missingTopStates,
Map<String, CurrentStateInfo> handOffCurrentStates,
MissingStatesDataCacheInject testInjection) {
if (initialCurrentStates != null && !initialCurrentStates.isEmpty()) {
doRunStages(initialCurrentStates, null);
}
if (missingTopStates != null && !missingTopStates.isEmpty()) {
doRunStages(missingTopStates, testInjection);
}
if (handOffCurrentStates != null && !handOffCurrentStates.isEmpty()) {
doRunStages(handOffCurrentStates, null);
}
}
private void doRunStages(Map<String, CurrentStateInfo> currentStates,
MissingStatesDataCacheInject clusterDataInjection) {
setupCurrentStates(generateCurrentStateMap(currentStates));
runStage(event, new ReadClusterDataStage());
if (clusterDataInjection != null) {
ResourceControllerDataProvider cache = event.getAttribute(AttributeName.ControllerDataProvider.name());
clusterDataInjection.doInject(cache);
}
runStage(event, new CurrentStateComputationStage());
runStage(event, new TopStateHandoffReportStage());
}
private void runStageAndVerify(
Map<String, CurrentStateInfo> initialCurrentStates,
Map<String, CurrentStateInfo> missingTopStates,
Map<String, CurrentStateInfo> handOffCurrentStates,
MissingStatesDataCacheInject inject,
int successCnt,
int failCnt,
int missingTopStatesBeyondThresholdCnt,
Range<Long> expectedDuration,
Range<Long> expectedNonGracefulDuration,
Range<Long> expectedMaxDuration,
Range<Long> expectedHelixLatency
) {
event.addAttribute(AttributeName.ControllerDataProvider.name(),
new ResourceControllerDataProvider());
runPipeLine(initialCurrentStates, missingTopStates, handOffCurrentStates, inject);
ClusterStatusMonitor clusterStatusMonitor =
event.getAttribute(AttributeName.clusterStatusMonitor.name());
ResourceMonitor monitor = clusterStatusMonitor.getResourceMonitor(TEST_RESOURCE);
Assert.assertEquals(monitor.getSucceededTopStateHandoffCounter(), successCnt);
Assert.assertEquals(monitor.getFailedTopStateHandoffCounter(), failCnt);
Assert.assertEquals(monitor.getMissingTopStatePartitionsBeyondThresholdGuage(), missingTopStatesBeyondThresholdCnt);
long graceful = monitor.getPartitionTopStateHandoffDurationGauge()
.getAttributeValue(GRACEFUL_HANDOFF_DURATION).longValue();
long nonGraceful = monitor.getPartitionTopStateNonGracefulHandoffDurationGauge()
.getAttributeValue(NON_GRACEFUL_HANDOFF_DURATION).longValue();
long helix = monitor.getPartitionTopStateHandoffHelixLatencyGauge()
.getAttributeValue(HANDOFF_HELIX_LATENCY).longValue();
long max = monitor.getMaxSinglePartitionTopStateHandoffDurationGauge();
Assert.assertTrue(expectedDuration.contains(graceful));
Assert.assertTrue(expectedNonGracefulDuration.contains(nonGraceful));
Assert.assertTrue(expectedHelixLatency.contains(helix));
Assert.assertTrue(expectedMaxDuration.contains(max));
}
interface MissingStatesDataCacheInject {
void doInject(ResourceControllerDataProvider cache);
}
}
| 9,876 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestTaskPerformanceMetrics.java | package org.apache.helix.monitoring.mbeans;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.lang.management.ManagementFactory;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.management.MBeanAttributeInfo;
import javax.management.MBeanInfo;
import javax.management.MBeanServerConnection;
import javax.management.ObjectInstance;
import javax.management.ObjectName;
import javax.management.Query;
import javax.management.QueryExp;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.TestHelper;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.task.MockTask;
import org.apache.helix.task.JobConfig;
import org.apache.helix.task.TaskConfig;
import org.apache.helix.task.TaskSynchronizedTestBase;
import org.apache.helix.task.Workflow;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
/**
* Tests that performance profiling metrics via JobMonitorMBean are computed correctly.
*/
public class TestTaskPerformanceMetrics extends TaskSynchronizedTestBase {
private static final long TASK_LATENCY = 100L;
// Configurable values for test setup
private static final MBeanServerConnection _server = ManagementFactory.getPlatformMBeanServer();
private Map<String, Object> _beanValueMap = new HashMap<>();
@BeforeClass
public void beforeClass() throws Exception {
setSingleTestEnvironment();
super.beforeClass();
}
/**
* Test the following metrics are dynamically emitted:
* SubmissionToStartDelay
* ControllerInducedDelay
* The test schedules a workflow with 30 jobs, each with one task with TASK_LATENCY.
* AllowOverlapJobAssignment is false, so these jobs will be run in series, one at a time.
* With this setup, we can assume that the mean value of the metrics above will increase every
* time we poll at some interval greater than TASK_LATENCY.
* @throws Exception
*/
@Test
public void testTaskPerformanceMetrics() throws Exception {
// Create a workflow
JobConfig.Builder jobConfigBuilder = new JobConfig.Builder();
TaskConfig.Builder taskConfigBuilder = new TaskConfig.Builder();
List<TaskConfig> taskConfigs = new ArrayList<>();
TaskConfig taskConfig = taskConfigBuilder.setTaskId("1").setCommand("Reindex").build();
taskConfig.getConfigMap().put("Latency", Long.toString(TASK_LATENCY));
taskConfigs.add(taskConfig);
jobConfigBuilder.addTaskConfigs(taskConfigs)
.setJobCommandConfigMap(ImmutableMap.of(MockTask.JOB_DELAY, Long.toString(TASK_LATENCY)));
Workflow.Builder workflowBuilder = new Workflow.Builder("wf");
for (int i = 0; i < 30; i++) {
workflowBuilder.addJob("job_" + i, jobConfigBuilder);
}
Workflow workflow = workflowBuilder.build();
// Start the controller and start the workflow
_controller = new ClusterControllerManager(ZK_ADDR, CLUSTER_NAME);
_controller.syncStart();
_driver.start(workflow);
// Confirm that there are metrics computed dynamically here and keeps increasing because jobs
// are processed one by one
double oldSubmissionToStartDelay = 0.0d;
double oldControllerInducedDelay = -1L;
for (int i = 0; i < 5; i++) {
// Wait until new dynamic metrics are updated.
final double oldDelay = oldSubmissionToStartDelay;
TestHelper.verify(() -> {
extractMetrics();
return ((double) _beanValueMap.getOrDefault("SubmissionToScheduleDelayGauge.Mean", 0.0d))
> oldDelay
&& ((double) _beanValueMap.getOrDefault("SubmissionToProcessDelayGauge.Mean", 0.0d))
> 0.0d;
}, TestHelper.WAIT_DURATION);
// For SubmissionToProcessDelay, the value will stay constant because the Controller will
// create JobContext right away most of the time
Assert.assertTrue(_beanValueMap.containsKey("SubmissionToProcessDelayGauge.Mean"));
Assert.assertTrue(_beanValueMap.containsKey("SubmissionToScheduleDelayGauge.Mean"));
Assert.assertTrue(_beanValueMap.containsKey("ControllerInducedDelayGauge.Mean"));
// Get the new values
double submissionToProcessDelay =
(double) _beanValueMap.get("SubmissionToProcessDelayGauge.Mean");
double newSubmissionToScheduleDelay =
(double) _beanValueMap.get("SubmissionToScheduleDelayGauge.Mean");
double newControllerInducedDelay =
(double) _beanValueMap.get("ControllerInducedDelayGauge.Mean");
Assert.assertTrue(submissionToProcessDelay > 0);
Assert.assertTrue(oldSubmissionToStartDelay < newSubmissionToScheduleDelay);
Assert.assertTrue(oldControllerInducedDelay < newControllerInducedDelay);
oldSubmissionToStartDelay = newSubmissionToScheduleDelay;
oldControllerInducedDelay = newControllerInducedDelay;
}
}
/**
* Queries for all MBeans from the MBean Server and only looks at the relevant MBean and gets its
* metric numbers.
*/
private void extractMetrics() {
try {
QueryExp exp = Query.match(Query.attr("SensorName"), Query.value(CLUSTER_NAME + ".Job.*"));
Set<ObjectInstance> mbeans = new HashSet<>(
ManagementFactory.getPlatformMBeanServer().queryMBeans(new ObjectName("ClusterStatus:*"), exp));
for (ObjectInstance instance : mbeans) {
ObjectName beanName = instance.getObjectName();
if (instance.getClassName().endsWith("JobMonitor")) {
MBeanInfo info = _server.getMBeanInfo(beanName);
MBeanAttributeInfo[] infos = info.getAttributes();
for (MBeanAttributeInfo infoItem : infos) {
Object val = _server.getAttribute(beanName, infoItem.getName());
_beanValueMap.put(infoItem.getName(), val);
}
}
}
} catch (Exception e) {
// update failed
}
}
}
| 9,877 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestDropResourceMetricsReset.java | package org.apache.helix.monitoring.mbeans;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import javax.management.InstanceNotFoundException;
import javax.management.MBeanServerConnection;
import javax.management.MBeanServerNotification;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import org.apache.helix.TestHelper;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.model.IdealState.RebalanceMode;
import org.apache.helix.tools.ClusterSetup;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
public class TestDropResourceMetricsReset extends ZkUnitTestBase {
private CountDownLatch _registerLatch;
private CountDownLatch _unregisterLatch;
private String _className = TestHelper.getTestClassName();
@BeforeMethod
public void beforeMethod() {
_registerLatch = new CountDownLatch(1);
_unregisterLatch = new CountDownLatch(1);
}
@Test
public void testBasic() throws Exception {
final int NUM_PARTICIPANTS = 4;
final int NUM_PARTITIONS = 64;
final int NUM_REPLICAS = 1;
final String RESOURCE_NAME = "BasicDB0";
String methodName = TestHelper.getTestMethodName();
String clusterName = _className + "_" + methodName;
ParticipantMonitorListener listener =
new ParticipantMonitorListener("ClusterStatus", clusterName, RESOURCE_NAME);
// Set up cluster
TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
"localhost", // participant name prefix
"BasicDB", // resource name prefix
1, // resources
NUM_PARTITIONS, // partitions per resource
NUM_PARTICIPANTS, // number of nodes
NUM_REPLICAS, // replicas
"MasterSlave", RebalanceMode.FULL_AUTO, // use FULL_AUTO mode to test node tagging
true); // do rebalance
// Start participants and controller
ClusterSetup setupTool = new ClusterSetup(_gZkClient);
MockParticipantManager[] participants = new MockParticipantManager[NUM_PARTICIPANTS];
for (int i = 0; i < NUM_PARTICIPANTS; i++) {
participants[i] =
new MockParticipantManager(ZK_ADDR, clusterName, "localhost_" + (12918 + i));
participants[i].syncStart();
}
ClusterControllerManager controller =
new ClusterControllerManager(ZK_ADDR, clusterName, "controller_0");
controller.syncStart();
// Verify that the bean was created
boolean noTimeout = _registerLatch.await(30000, TimeUnit.MILLISECONDS);
Assert.assertTrue(noTimeout);
// Drop the resource
setupTool.dropResourceFromCluster(clusterName, RESOURCE_NAME);
// Verify that the bean was removed
noTimeout = _unregisterLatch.await(30000, TimeUnit.MILLISECONDS);
Assert.assertTrue(noTimeout);
// Clean up
listener.disconnect();
controller.syncStop();
for (MockParticipantManager participant : participants) {
participant.syncStop();
}
TestHelper.dropCluster(clusterName, _gZkClient);
}
@Test (dependsOnMethods = "testBasic")
public void testDropWithNoCurrentState() throws Exception {
final int NUM_PARTICIPANTS = 1;
final int NUM_PARTITIONS = 1;
final int NUM_REPLICAS = 1;
final String RESOURCE_NAME = "TestDB0";
String methodName = TestHelper.getTestMethodName();
String clusterName = _className + "_" + methodName;
ParticipantMonitorListener listener =
new ParticipantMonitorListener("ClusterStatus", clusterName, RESOURCE_NAME);
// Set up cluster
TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
"localhost", // participant name prefix
"TestDB", // resource name prefix
1, // resources
NUM_PARTITIONS, // partitions per resource
NUM_PARTICIPANTS, // number of nodes
NUM_REPLICAS, // replicas
"MasterSlave", RebalanceMode.FULL_AUTO, // use FULL_AUTO mode to test node tagging
true); // do rebalance
// Start participants and controller
ClusterSetup setupTool = new ClusterSetup(_gZkClient);
MockParticipantManager participant =
new MockParticipantManager(ZK_ADDR, clusterName, "localhost_12918");
participant.syncStart();
ClusterControllerManager controller =
new ClusterControllerManager(ZK_ADDR, clusterName, "controller_0");
controller.syncStart();
// Verify that the bean was created
boolean noTimeout = _registerLatch.await(30000, TimeUnit.MILLISECONDS);
Assert.assertTrue(noTimeout);
// stop the participant, so the resource does not exist in any current states.
participant.syncStop();
// Drop the resource
setupTool.dropResourceFromCluster(clusterName, RESOURCE_NAME);
// TEMP WORKAROUND
// Adding a liveinstance has an effect of creating a CurrentStageChange event, which in turn triggers
// ExternalViewStage that includes the metric cleanup logic
// TODO: Fix and cleanup by refactoring mbean unregistration logic
// https://github.com/apache/helix/issues/1980
participant.syncStart();
// Verify that the bean was removed
noTimeout = _unregisterLatch.await(30000, TimeUnit.MILLISECONDS);
Assert.assertTrue(noTimeout);
// Clean up
participant.syncStop();
listener.disconnect();
controller.syncStop();
TestHelper.dropCluster(clusterName, _gZkClient);
}
private ObjectName getObjectName(String resourceName, String clusterName)
throws MalformedObjectNameException {
String clusterBeanName =
String.format("%s=%s", ClusterStatusMonitor.CLUSTER_DN_KEY, clusterName);
String resourceBeanName =
String.format("%s,%s=%s", clusterBeanName, ClusterStatusMonitor.RESOURCE_DN_KEY,
resourceName);
return new ObjectName(String.format("%s:%s", MonitorDomainNames.ClusterStatus.name(),
resourceBeanName));
}
private class ParticipantMonitorListener extends ClusterMBeanObserver {
private final ObjectName _objectName;
public ParticipantMonitorListener(String domain, String cluster, String resource)
throws InstanceNotFoundException, IOException, MalformedObjectNameException,
NullPointerException {
super(domain);
_objectName = getObjectName(resource, cluster);
}
@Override
public void onMBeanRegistered(MBeanServerConnection server,
MBeanServerNotification mbsNotification) {
if (mbsNotification.getMBeanName().equals(_objectName)) {
_registerLatch.countDown();
}
}
@Override
public void onMBeanUnRegistered(MBeanServerConnection server,
MBeanServerNotification mbsNotification) {
if (mbsNotification.getMBeanName().equals(_objectName)) {
_unregisterLatch.countDown();
}
}
}
}
| 9,878 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestInstanceMonitor.java | package org.apache.helix.monitoring.mbeans;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.management.JMException;
import javax.management.ObjectName;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestInstanceMonitor {
@Test
public void testInstanceMonitor()
throws JMException {
String testCluster = "testCluster";
String testInstance = "testInstance";
String testDomain = "testDomain:key=value";
Set<String> tags = ImmutableSet.of("test", "DEFAULT");
Map<String, List<String>> disabledPartitions =
ImmutableMap.of("instance1", ImmutableList.of("partition1", "partition2"));
InstanceMonitor monitor =
new InstanceMonitor(testCluster, testInstance, new ObjectName(testDomain));
// Verify init status.
Assert.assertEquals(monitor.getSensorName(),
"ParticipantStatus.testCluster.DEFAULT.testInstance");
Assert.assertEquals(monitor.getInstanceName(), testInstance);
Assert.assertEquals(monitor.getOnline(), 0L);
Assert.assertEquals(monitor.getEnabled(), 0L);
Assert.assertEquals(monitor.getTotalMessageReceived(), 0L);
Assert.assertEquals(monitor.getDisabledPartitions(), 0L);
Assert.assertEquals(monitor.getMaxCapacityUsageGauge(), 0.0d);
// Update metrics.
monitor.updateMaxCapacityUsage(0.5d);
monitor.increaseMessageCount(10L);
monitor.updateInstance(tags, disabledPartitions, Collections.emptyList(), true, true);
monitor.updateMessageQueueSize(100L);
monitor.updatePastDueMessageGauge(50L);
// Verify metrics.
Assert.assertEquals(monitor.getTotalMessageReceived(), 10L);
Assert.assertEquals(monitor.getSensorName(),
"ParticipantStatus.testCluster.DEFAULT|test.testInstance");
Assert.assertEquals(monitor.getInstanceName(), testInstance);
Assert.assertEquals(monitor.getOnline(), 1L);
Assert.assertEquals(monitor.getEnabled(), 1L);
Assert.assertEquals(monitor.getDisabledPartitions(), 2L);
Assert.assertEquals(monitor.getMaxCapacityUsageGauge(), 0.5d);
Assert.assertEquals(monitor.getMessageQueueSizeGauge(), 100L);
Assert.assertEquals(monitor.getPastDueMessageGauge(), 50L);
monitor.unregister();
}
}
| 9,879 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestClusterStatusMonitor.java | package org.apache.helix.monitoring.mbeans;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.UUID;
import javax.management.AttributeNotFoundException;
import javax.management.InstanceNotFoundException;
import javax.management.JMException;
import javax.management.MBeanException;
import javax.management.MBeanServerConnection;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import javax.management.ReflectionException;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import org.apache.helix.TestHelper;
import org.apache.helix.common.caches.TaskDataCache;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.task.AssignableInstanceManager;
import org.apache.helix.task.assigner.TaskAssignResult;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.controller.stages.BestPossibleStateOutput;
import org.apache.helix.model.BuiltInStateModelDefinitions;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.Message;
import org.apache.helix.model.Partition;
import org.apache.helix.model.Resource;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.tools.DefaultIdealStateCalculator;
import org.apache.helix.tools.StateModelConfigGenerator;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.testng.collections.Sets;
import static org.mockito.Mockito.when;
public class TestClusterStatusMonitor {
private static final MBeanServerConnection _server = ManagementFactory.getPlatformMBeanServer();
private String testDB = "TestDB";
private String testDB_0 = testDB + "_0";
@Test()
public void testReportData() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
int n = 5;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
ClusterStatusMonitor monitor = new ClusterStatusMonitor(clusterName);
monitor.active();
ObjectName clusterMonitorObjName = monitor.getObjectName(monitor.clusterBeanName());
Assert.assertTrue(_server.isRegistered(clusterMonitorObjName));
// Test #setPerInstanceResourceStatus()
BestPossibleStateOutput bestPossibleStates = new BestPossibleStateOutput();
bestPossibleStates.setState(testDB, new Partition(testDB_0), "localhost_12918", "MASTER");
bestPossibleStates.setState(testDB, new Partition(testDB_0), "localhost_12919", "SLAVE");
bestPossibleStates.setState(testDB, new Partition(testDB_0), "localhost_12920", "SLAVE");
bestPossibleStates.setState(testDB, new Partition(testDB_0), "localhost_12921", "OFFLINE");
bestPossibleStates.setState(testDB, new Partition(testDB_0), "localhost_12922", "DROPPED");
Map<String, InstanceConfig> instanceConfigMap = Maps.newHashMap();
for (int i = 0; i < n; i++) {
String instanceName = "localhost_" + (12918 + i);
InstanceConfig config = new InstanceConfig(instanceName);
instanceConfigMap.put(instanceName, config);
}
Map<String, Resource> resourceMap = Maps.newHashMap();
Resource db = new Resource(testDB);
db.setStateModelDefRef("MasterSlave");
db.addPartition(testDB_0);
resourceMap.put(testDB, db);
Map<String, StateModelDefinition> stateModelDefMap = Maps.newHashMap();
StateModelDefinition msStateModelDef =
new StateModelDefinition(StateModelConfigGenerator.generateConfigForMasterSlave());
stateModelDefMap.put("MasterSlave", msStateModelDef);
monitor.setPerInstanceResourceStatus(bestPossibleStates, instanceConfigMap, resourceMap,
stateModelDefMap);
// localhost_12918 should have 1 partition because it's MASTER
ObjectName objName =
monitor.getObjectName(monitor.getPerInstanceResourceBeanName("localhost_12918", testDB));
Object value = _server.getAttribute(objName, "PartitionGauge");
Assert.assertTrue(value instanceof Long);
Assert.assertEquals((Long) value, new Long(1));
value = _server.getAttribute(objName, "SensorName");
Assert.assertTrue(value instanceof String);
Assert.assertEquals((String) value, String.format("%s.%s.%s.%s.%s",
ClusterStatusMonitor.PARTICIPANT_STATUS_KEY, clusterName, ClusterStatusMonitor.DEFAULT_TAG,
"localhost_12918", testDB));
// localhost_12919 should have 1 partition because it's SLAVE
objName =
monitor.getObjectName(monitor.getPerInstanceResourceBeanName("localhost_12919", testDB));
value = _server.getAttribute(objName, "PartitionGauge");
Assert.assertTrue(value instanceof Long);
Assert.assertEquals((Long) value, new Long(1));
// localhost_12921 should have 0 partition because it's OFFLINE
objName =
monitor.getObjectName(monitor.getPerInstanceResourceBeanName("localhost_12921", testDB));
value = _server.getAttribute(objName, "PartitionGauge");
Assert.assertTrue(value instanceof Long);
Assert.assertEquals((Long) value, new Long(0));
// localhost_12922 should have 0 partition because it's DROPPED
objName =
monitor.getObjectName(monitor.getPerInstanceResourceBeanName("localhost_12922", testDB));
value = _server.getAttribute(objName, "PartitionGauge");
Assert.assertTrue(value instanceof Long);
Assert.assertEquals((Long) value, new Long(0));
// Missing localhost_12918 in best possible ideal-state should remove it from mbean
bestPossibleStates.getInstanceStateMap(testDB, new Partition(testDB_0)).remove(
"localhost_12918");
monitor.setPerInstanceResourceStatus(bestPossibleStates, instanceConfigMap, resourceMap,
stateModelDefMap);
objName =
monitor.getObjectName(monitor.getPerInstanceResourceBeanName("localhost_12918", testDB));
Assert.assertFalse(_server.isRegistered(objName),
"Fail to unregister PerInstanceResource mbean for localhost_12918");
// Clean up
monitor.reset();
objName =
monitor.getObjectName(monitor.getPerInstanceResourceBeanName("localhost_12920", testDB));
Assert.assertFalse(_server.isRegistered(objName),
"Fail to unregister PerInstanceResource mbean for localhost_12920");
Assert.assertFalse(_server.isRegistered(clusterMonitorObjName),
"Failed to unregister ClusterStatusMonitor.");
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test()
public void testMessageMetrics() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
int n = 5;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
ClusterStatusMonitor monitor = new ClusterStatusMonitor(clusterName);
monitor.active();
ObjectName clusterMonitorObjName = monitor.getObjectName(monitor.clusterBeanName());
Assert.assertTrue(_server.isRegistered(clusterMonitorObjName));
Map<String, Set<Message>> instanceMessageMap = Maps.newHashMap();
Set<String> liveInstanceSet = Sets.newHashSet();
for (int i = 0; i < n; i++) {
String instanceName = "localhost_" + (12918 + i);
liveInstanceSet.add(instanceName);
long now = System.currentTimeMillis();
Set<Message> messages = Sets.newHashSet();
// add 10 regular messages to each instance
for (int j = 0; j < 10; j++) {
Message m = new Message(Message.MessageType.STATE_TRANSITION, UUID.randomUUID().toString());
m.setTgtName(instanceName);
messages.add(m);
}
// add 10 past-due messages to each instance (using default completion period)
for (int j = 0; j < 10; j++) {
Message m = new Message(Message.MessageType.STATE_TRANSITION, UUID.randomUUID().toString());
m.setTgtName(instanceName);
m.setCreateTimeStamp(now - Message.MESSAGE_EXPECT_COMPLETION_PERIOD - 1000);
messages.add(m);
}
// add other 5 past-due messages to each instance (using explicitly set COMPLETION time in message)
for (int j = 0; j < 5; j++) {
Message m = new Message(Message.MessageType.STATE_TRANSITION, UUID.randomUUID().toString());
m.setTgtName(instanceName);
m.setCompletionDueTimeStamp(now - 1000);
messages.add(m);
}
instanceMessageMap.put(instanceName, messages);
}
monitor.setClusterInstanceStatus(liveInstanceSet, liveInstanceSet, Collections.emptySet(),
Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), instanceMessageMap);
Assert.assertEquals(monitor.getInstanceMessageQueueBacklog(), 25 * n);
Assert.assertEquals(monitor.getTotalPastDueMessageGauge(), 15 * n);
Object totalMsgSize =
_server.getAttribute(clusterMonitorObjName, "InstanceMessageQueueBacklog");
Assert.assertTrue(totalMsgSize instanceof Long);
Assert.assertEquals((long) totalMsgSize, 25 * n);
Object totalPastdueMsgCount =
_server.getAttribute(clusterMonitorObjName, "TotalPastDueMessageGauge");
Assert.assertTrue(totalPastdueMsgCount instanceof Long);
Assert.assertEquals((long) totalPastdueMsgCount, 15 * n);
for (String instance : liveInstanceSet) {
ObjectName objName =
monitor.getObjectName(monitor.getInstanceBeanName(instance));
Object messageSize = _server.getAttribute(objName, "MessageQueueSizeGauge");
Assert.assertTrue(messageSize instanceof Long);
Assert.assertEquals((long) messageSize, 25L);
Object pastdueMsgCount = _server.getAttribute(objName, "PastDueMessageGauge");
Assert.assertTrue(pastdueMsgCount instanceof Long);
Assert.assertEquals((long) pastdueMsgCount, 15L);
}
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testResourceAggregation() throws JMException, IOException {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
ClusterStatusMonitor monitor = new ClusterStatusMonitor(clusterName);
monitor.active();
ObjectName clusterMonitorObjName = monitor.getObjectName(monitor.clusterBeanName());
Assert.assertTrue(_server.isRegistered(clusterMonitorObjName));
int numInstance = 5;
int numPartition = 10;
int numReplica = 3;
List<String> instances = new ArrayList<String>();
for (int i = 0; i < numInstance; i++) {
String instance = "localhost_" + (12918 + i);
instances.add(instance);
}
ZNRecord idealStateRecord = DefaultIdealStateCalculator
.calculateIdealState(instances, numPartition, numReplica, testDB, "MASTER", "SLAVE");
IdealState idealState = new IdealState(TestResourceMonitor.deepCopyZNRecord(idealStateRecord));
idealState.setMinActiveReplicas(numReplica);
ExternalView externalView = new ExternalView(TestResourceMonitor.deepCopyZNRecord(idealStateRecord));
StateModelDefinition stateModelDef =
BuiltInStateModelDefinitions.MasterSlave.getStateModelDefinition();
monitor.setResourceState(testDB, externalView, idealState, stateModelDef);
Assert.assertEquals(monitor.getTotalPartitionGauge(), numPartition);
Assert.assertEquals(monitor.getTotalResourceGauge(), 1);
Assert.assertEquals(monitor.getMissingMinActiveReplicaPartitionGauge(), 0);
Assert.assertEquals(monitor.getMissingTopStatePartitionGauge(), 0);
Assert.assertEquals(monitor.getMissingReplicaPartitionGauge(), 0);
Assert.assertEquals(monitor.getStateTransitionCounter(), 0);
Assert.assertEquals(monitor.getPendingStateTransitionGuage(), 0);
Assert.assertEquals(monitor.getDifferenceWithIdealStateGauge(), 0);
int lessMinActiveReplica = 6;
Random r = new Random();
externalView = new ExternalView(TestResourceMonitor.deepCopyZNRecord(idealStateRecord));
int start = r.nextInt(numPartition - lessMinActiveReplica - 1);
for (int i = start; i < start + lessMinActiveReplica; i++) {
String partition = testDB + "_" + i;
Map<String, String> map = externalView.getStateMap(partition);
Iterator<String> it = map.keySet().iterator();
int flag = 0;
while (it.hasNext()) {
String key = it.next();
if (map.get(key).equalsIgnoreCase("SLAVE")) {
if (flag++ % 2 == 0) {
map.put(key, "OFFLINE");
} else {
it.remove();
}
}
}
externalView.setStateMap(partition, map);
}
monitor.setResourceState(testDB, externalView, idealState, stateModelDef);
Assert.assertEquals(monitor.getTotalPartitionGauge(), numPartition);
Assert.assertEquals(monitor.getMissingMinActiveReplicaPartitionGauge(), lessMinActiveReplica);
Assert.assertEquals(monitor.getMissingTopStatePartitionGauge(), 0);
Assert.assertEquals(monitor.getMissingReplicaPartitionGauge(), lessMinActiveReplica);
Assert.assertEquals(monitor.getStateTransitionCounter(), 0);
Assert.assertEquals(monitor.getPendingStateTransitionGuage(), 0);
Assert.assertEquals(monitor.getDifferenceWithIdealStateGauge(), lessMinActiveReplica);
int missTopState = 7;
externalView = new ExternalView(TestResourceMonitor.deepCopyZNRecord(idealStateRecord));
start = r.nextInt(numPartition - missTopState - 1);
for (int i = start; i < start + missTopState; i++) {
String partition = testDB + "_" + i;
Map<String, String> map = externalView.getStateMap(partition);
int flag = 0;
for (String key : map.keySet()) {
if (map.get(key).equalsIgnoreCase("MASTER")) {
if (flag++ % 2 == 0) {
map.put(key, "OFFLINE");
} else {
map.remove(key);
}
break;
}
}
externalView.setStateMap(partition, map);
}
monitor.setResourceState(testDB, externalView, idealState, stateModelDef);
Assert.assertEquals(monitor.getTotalPartitionGauge(), numPartition);
Assert.assertEquals(monitor.getMissingMinActiveReplicaPartitionGauge(), 0);
Assert.assertEquals(monitor.getMissingTopStatePartitionGauge(), missTopState);
Assert.assertEquals(monitor.getMissingReplicaPartitionGauge(), missTopState);
Assert.assertEquals(monitor.getStateTransitionCounter(), 0);
Assert.assertEquals(monitor.getPendingStateTransitionGuage(), 0);
Assert.assertEquals(monitor.getDifferenceWithIdealStateGauge(), missTopState);
int missReplica = 5;
externalView = new ExternalView(TestResourceMonitor.deepCopyZNRecord(idealStateRecord));
start = r.nextInt(numPartition - missReplica - 1);
for (int i = start; i < start + missReplica; i++) {
String partition = testDB + "_" + i;
Map<String, String> map = externalView.getStateMap(partition);
Iterator<String> it = map.keySet().iterator();
while (it.hasNext()) {
String key = it.next();
if (map.get(key).equalsIgnoreCase("SLAVE")) {
it.remove();
break;
}
}
externalView.setStateMap(partition, map);
}
monitor.setResourceState(testDB, externalView, idealState, stateModelDef);
Assert.assertEquals(monitor.getTotalPartitionGauge(), numPartition);
Assert.assertEquals(monitor.getMissingMinActiveReplicaPartitionGauge(), 0);
Assert.assertEquals(monitor.getMissingTopStatePartitionGauge(), 0);
Assert.assertEquals(monitor.getMissingReplicaPartitionGauge(), missReplica);
Assert.assertEquals(monitor.getStateTransitionCounter(), 0);
Assert.assertEquals(monitor.getPendingStateTransitionGuage(), 0);
Assert.assertEquals(monitor.getDifferenceWithIdealStateGauge(), missReplica);
int messageCount = 4;
List<Message> messages = new ArrayList<>();
for (int i = 0; i < messageCount; i++) {
Message message = new Message(Message.MessageType.STATE_TRANSITION, "message" + i);
message.setResourceName(testDB);
message.setTgtName(instances.get(i % instances.size()));
messages.add(message);
}
monitor.increaseMessageReceived(messages);
Assert.assertEquals(monitor.getStateTransitionCounter(), messageCount);
Assert.assertEquals(monitor.getPendingStateTransitionGuage(), 0);
// test pending state transition message report and read
messageCount = new Random().nextInt(numPartition) + 1;
monitor.setResourcePendingMessages(testDB, messageCount);
Assert.assertEquals(monitor.getPendingStateTransitionGuage(), messageCount);
// Reset monitor.
monitor.reset();
Assert.assertFalse(_server.isRegistered(clusterMonitorObjName),
"Failed to unregister ClusterStatusMonitor.");
}
@Test
public void testUpdateInstanceCapacityStatus()
throws MalformedObjectNameException, IOException, AttributeNotFoundException, MBeanException,
ReflectionException, InstanceNotFoundException {
String clusterName = "testCluster";
List<Double> maxUsageList = ImmutableList.of(0.0d, 0.32d, 0.85d, 1.0d, 0.50d, 0.75d);
Map<String, Double> maxUsageMap = new HashMap<>();
Map<String, Map<String, Integer>> instanceCapacityMap = new HashMap<>();
Random rand = new Random();
for (int i = 0; i < maxUsageList.size(); i++) {
String instanceName = "instance" + i;
maxUsageMap.put(instanceName, maxUsageList.get(i));
instanceCapacityMap.put(instanceName,
ImmutableMap.of("capacity1", rand.nextInt(100), "capacity2", rand.nextInt(100)));
}
// Setup cluster status monitor.
ClusterStatusMonitor monitor = new ClusterStatusMonitor(clusterName);
monitor.active();
ObjectName clusterMonitorObjName = monitor.getObjectName(monitor.clusterBeanName());
// Cluster status monitor is registered.
Assert.assertTrue(_server.isRegistered(clusterMonitorObjName));
// Before calling setClusterInstanceStatus, instance monitors are not yet registered.
for (Map.Entry<String, Double> entry : maxUsageMap.entrySet()) {
String instance = entry.getKey();
String instanceBeanName = String
.format("%s,%s=%s", monitor.clusterBeanName(), ClusterStatusMonitor.INSTANCE_DN_KEY,
instance);
ObjectName instanceObjectName = monitor.getObjectName(instanceBeanName);
Assert.assertFalse(_server.isRegistered(instanceObjectName));
}
// Call setClusterInstanceStatus to register instance monitors.
monitor.setClusterInstanceStatus(maxUsageMap.keySet(), maxUsageMap.keySet(),
Collections.emptySet(), Collections.emptyMap(), Collections.emptyMap(),
Collections.emptyMap(), Collections.emptyMap());
// Update instance capacity status.
for (Map.Entry<String, Double> usageEntry : maxUsageMap.entrySet()) {
String instanceName = usageEntry.getKey();
monitor.updateInstanceCapacityStatus(instanceName, usageEntry.getValue(),
instanceCapacityMap.get(instanceName));
}
verifyCapacityMetrics(monitor, maxUsageMap, instanceCapacityMap);
// Change capacity keys: "capacity2" -> "capacity3"
for (String instanceName : instanceCapacityMap.keySet()) {
instanceCapacityMap.put(instanceName,
ImmutableMap.of("capacity1", rand.nextInt(100), "capacity3", rand.nextInt(100)));
}
// Update instance capacity status.
for (Map.Entry<String, Double> usageEntry : maxUsageMap.entrySet()) {
String instanceName = usageEntry.getKey();
monitor.updateInstanceCapacityStatus(instanceName, usageEntry.getValue(),
instanceCapacityMap.get(instanceName));
}
// "capacity2" metric should not exist in MBean server.
String removedAttribute = "capacity2Gauge";
for (Map.Entry<String, Map<String, Integer>> instanceEntry : instanceCapacityMap.entrySet()) {
String instance = instanceEntry.getKey();
String instanceBeanName = String
.format("%s,%s=%s", monitor.clusterBeanName(), ClusterStatusMonitor.INSTANCE_DN_KEY,
instance);
ObjectName instanceObjectName = monitor.getObjectName(instanceBeanName);
try {
_server.getAttribute(instanceObjectName, removedAttribute);
Assert.fail();
} catch (AttributeNotFoundException ex) {
// Expected AttributeNotFoundException because "capacity2Gauge" metric does not exist in
// MBean server.
}
}
verifyCapacityMetrics(monitor, maxUsageMap, instanceCapacityMap);
// Reset monitor.
monitor.reset();
Assert.assertFalse(_server.isRegistered(clusterMonitorObjName),
"Failed to unregister ClusterStatusMonitor.");
for (String instance : maxUsageMap.keySet()) {
String instanceBeanName =
String.format("%s,%s=%s", monitor.clusterBeanName(), ClusterStatusMonitor.INSTANCE_DN_KEY, instance);
ObjectName instanceObjectName = monitor.getObjectName(instanceBeanName);
Assert.assertFalse(_server.isRegistered(instanceObjectName),
"Failed to unregister instance monitor for instance: " + instance);
}
}
@Test
public void testRecordAvailableThreadsPerType() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
ClusterStatusMonitor monitor = new ClusterStatusMonitor(clusterName);
monitor.active();
ObjectName clusterMonitorObjName = monitor.getObjectName(monitor.clusterBeanName());
Assert.assertTrue(_server.isRegistered(clusterMonitorObjName));
Map<String, InstanceConfig> instanceConfigMap = new HashMap<>();
Map<String, LiveInstance> liveInstanceMap = new HashMap<>();
for (int i = 0; i < 3; i++) {
String instanceName = "localhost_" + (12918 + i);
LiveInstance liveInstance = new LiveInstance(instanceName);
InstanceConfig instanceConfig = new InstanceConfig(instanceName);
liveInstanceMap.put(instanceName, liveInstance);
instanceConfigMap.put(instanceName, instanceConfig);
}
ClusterConfig clusterConfig = new ClusterConfig(clusterName);
clusterConfig.resetTaskQuotaRatioMap();
clusterConfig.setTaskQuotaRatio("type1", 30);
clusterConfig.setTaskQuotaRatio("type2", 10);
TaskDataCache taskDataCache = Mockito.mock(TaskDataCache.class);
when(taskDataCache.getJobConfigMap()).thenReturn(Collections.emptyMap());
AssignableInstanceManager assignableInstanceManager = new AssignableInstanceManager();
assignableInstanceManager.buildAssignableInstances(clusterConfig, taskDataCache,
liveInstanceMap, instanceConfigMap);
monitor.updateAvailableThreadsPerJob(assignableInstanceManager.getGlobalCapacityMap());
ObjectName type1ObjectName = monitor.getObjectName(monitor.getJobBeanName("type1"));
ObjectName type2ObjectName = monitor.getObjectName(monitor.getJobBeanName("type2"));
Assert.assertTrue(_server.isRegistered(type1ObjectName));
Assert.assertEquals(_server.getAttribute(type1ObjectName, "AvailableThreadGauge"), 90L);
Assert.assertTrue(_server.isRegistered(type2ObjectName));
Assert.assertEquals(_server.getAttribute(type2ObjectName, "AvailableThreadGauge"), 30L);
TaskAssignResult taskAssignResult = Mockito.mock(TaskAssignResult.class);
when(taskAssignResult.getQuotaType()).thenReturn("type1");
// Use non-existing instance to bypass the actual assignment, but still decrease thread counts
assignableInstanceManager.assign("UnknownInstance", taskAssignResult);
// Do it twice for type 1
assignableInstanceManager.assign("UnknownInstance", taskAssignResult);
when(taskAssignResult.getQuotaType()).thenReturn("type2");
assignableInstanceManager.assign("UnknownInstance", taskAssignResult);
monitor.updateAvailableThreadsPerJob(assignableInstanceManager.getGlobalCapacityMap());
Assert.assertEquals(_server.getAttribute(type1ObjectName, "AvailableThreadGauge"), 88L);
Assert.assertEquals(_server.getAttribute(type2ObjectName, "AvailableThreadGauge"), 29L);
}
private void verifyCapacityMetrics(ClusterStatusMonitor monitor, Map<String, Double> maxUsageMap,
Map<String, Map<String, Integer>> instanceCapacityMap)
throws MalformedObjectNameException, IOException, AttributeNotFoundException, MBeanException,
ReflectionException, InstanceNotFoundException {
// Verify results.
for (Map.Entry<String, Map<String, Integer>> instanceEntry : instanceCapacityMap.entrySet()) {
String instance = instanceEntry.getKey();
Map<String, Integer> capacityMap = instanceEntry.getValue();
String instanceBeanName = String
.format("%s,%s=%s", monitor.clusterBeanName(), ClusterStatusMonitor.INSTANCE_DN_KEY,
instance);
ObjectName instanceObjectName = monitor.getObjectName(instanceBeanName);
Assert.assertTrue(_server.isRegistered(instanceObjectName));
Assert.assertEquals(_server.getAttribute(instanceObjectName,
InstanceMonitor.InstanceMonitorMetric.MAX_CAPACITY_USAGE_GAUGE.metricName()),
maxUsageMap.get(instance));
for (Map.Entry<String, Integer> capacityEntry : capacityMap.entrySet()) {
String capacityKey = capacityEntry.getKey();
String attributeName = capacityKey + "Gauge";
Assert.assertEquals((long) _server.getAttribute(instanceObjectName, attributeName),
(long) instanceCapacityMap.get(instance).get(capacityKey));
}
}
}
private void verifyMessageMetrics(ClusterStatusMonitor monitor, Map<String, Double> maxUsageMap,
Map<String, Map<String, Integer>> instanceCapacityMap)
throws MalformedObjectNameException, IOException, AttributeNotFoundException, MBeanException,
ReflectionException, InstanceNotFoundException {
// Verify results.
for (Map.Entry<String, Map<String, Integer>> instanceEntry : instanceCapacityMap.entrySet()) {
String instance = instanceEntry.getKey();
Map<String, Integer> capacityMap = instanceEntry.getValue();
String instanceBeanName = String
.format("%s,%s=%s", monitor.clusterBeanName(), ClusterStatusMonitor.INSTANCE_DN_KEY,
instance);
ObjectName instanceObjectName = monitor.getObjectName(instanceBeanName);
Assert.assertTrue(_server.isRegistered(instanceObjectName));
Assert.assertEquals(_server.getAttribute(instanceObjectName,
InstanceMonitor.InstanceMonitorMetric.MAX_CAPACITY_USAGE_GAUGE.metricName()),
maxUsageMap.get(instance));
for (Map.Entry<String, Integer> capacityEntry : capacityMap.entrySet()) {
String capacityKey = capacityEntry.getKey();
String attributeName = capacityKey + "Gauge";
Assert.assertEquals((long) _server.getAttribute(instanceObjectName, attributeName),
(long) instanceCapacityMap.get(instance).get(capacityKey));
}
}
}
}
| 9,880 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestCustomizedViewMonitor.java | package org.apache.helix.monitoring.mbeans;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.Stack;
import javax.management.JMException;
import javax.management.MBeanServerConnection;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import org.apache.helix.TestHelper;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestCustomizedViewMonitor {
private static final MBeanServerConnection _server = ManagementFactory.getPlatformMBeanServer();
private final String TEST_CLUSTER = "test_cluster";
private final String MAX_SUFFIX = ".Max";
private final String MEAN_SUFFIX = ".Mean";
private ObjectName buildObjectName(int duplicateNum) throws MalformedObjectNameException {
ObjectName objectName = new ObjectName(String
.format("%s:%s=%s,%s=%s", MonitorDomainNames.AggregatedView.name(), "Type",
"CustomizedView", "Cluster", TEST_CLUSTER));
if (duplicateNum == 0) {
return objectName;
} else {
return new ObjectName(
String.format("%s,%s=%s", objectName.toString(), MBeanRegistrar.DUPLICATE, duplicateNum));
}
}
@Test
public void testMBeanRegistration() throws JMException, IOException {
int numOfMonitors = 5;
Stack<CustomizedViewMonitor> monitors = new Stack<>();
for (int i = 0; i < numOfMonitors; i++) {
CustomizedViewMonitor monitor = new CustomizedViewMonitor(TEST_CLUSTER);
monitor.register();
monitors.push(monitor);
}
for (int i = 0; i < numOfMonitors; i++) {
if (i == numOfMonitors - 1) {
Assert.assertTrue(_server.isRegistered(buildObjectName(0)));
} else {
Assert.assertTrue(_server.isRegistered(buildObjectName(numOfMonitors - i - 1)));
}
CustomizedViewMonitor monitor = monitors.pop();
assert monitor != null;
monitor.unregister();
if (i == numOfMonitors - 1) {
Assert.assertFalse(_server.isRegistered(buildObjectName(0)));
} else {
Assert.assertFalse(_server.isRegistered(buildObjectName(numOfMonitors - i - 1)));
}
}
}
@Test
public void testMetricInitialization() throws Exception {
CustomizedViewMonitor monitor = new CustomizedViewMonitor(TEST_CLUSTER);
monitor.register();
int sum = 0;
for (int i = 0; i < 10; i++) {
monitor.recordUpdateToAggregationLatency(i);
sum += i;
int expectedMax = i;
double expectedMean = sum / (i + 1.0);
Assert.assertTrue(TestHelper.verify(() -> (long) _server.getAttribute(buildObjectName(0),
CustomizedViewMonitor.UPDATE_TO_AGGREGATION_LATENCY_GAUGE + MAX_SUFFIX) == expectedMax,
TestHelper.WAIT_DURATION));
Assert.assertTrue(TestHelper.verify(() -> (double) _server.getAttribute(buildObjectName(0),
CustomizedViewMonitor.UPDATE_TO_AGGREGATION_LATENCY_GAUGE + MEAN_SUFFIX) == expectedMean,
TestHelper.WAIT_DURATION));
}
monitor.unregister();
}
}
| 9,881 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestRebalancerMetrics.java | package org.apache.helix.monitoring.mbeans;
import java.util.Arrays;
import java.util.Date;
import java.util.Map;
import org.apache.helix.api.config.StateTransitionThrottleConfig;
import org.apache.helix.controller.common.PartitionStateMap;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.stages.AttributeName;
import org.apache.helix.controller.stages.BaseStageTest;
import org.apache.helix.controller.stages.BestPossibleStateCalcStage;
import org.apache.helix.controller.stages.BestPossibleStateOutput;
import org.apache.helix.controller.stages.CurrentStateOutput;
import org.apache.helix.controller.stages.IntermediateStateCalcStage;
import org.apache.helix.controller.stages.MessageGenerationPhase;
import org.apache.helix.controller.stages.MessageSelectionStage;
import org.apache.helix.controller.stages.ReadClusterDataStage;
import org.apache.helix.model.BuiltInStateModelDefinitions;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.Partition;
import org.apache.helix.model.Resource;
import org.testng.Assert;
import org.testng.annotations.Test;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
public class TestRebalancerMetrics extends BaseStageTest {
@Test
public void testRecoveryRebalanceMetrics() {
System.out
.println("START testRecoveryRebalanceMetrics at " + new Date(System.currentTimeMillis()));
String resource = "testResourceName";
int numPartition = 100;
int numReplica = 3;
int maxPending = 3;
setupIdealState(5, new String[] {resource}, numPartition,
numReplica, IdealState.RebalanceMode.FULL_AUTO,
BuiltInStateModelDefinitions.MasterSlave.name());
setupInstances(5);
setupLiveInstances(5);
setupStateModel();
Map<String, Resource> resourceMap =
getResourceMap(new String[] {resource}, numPartition,
BuiltInStateModelDefinitions.MasterSlave.name());
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
event.addAttribute(AttributeName.RESOURCES.name(), resourceMap);
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), resourceMap);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
event.addAttribute(AttributeName.ControllerDataProvider.name(), new ResourceControllerDataProvider());
ClusterStatusMonitor monitor = new ClusterStatusMonitor(_clusterName);
monitor.active();
event.addAttribute(AttributeName.clusterStatusMonitor.name(), monitor);
runStage(event, new ReadClusterDataStage());
ResourceControllerDataProvider cache = event.getAttribute(AttributeName.ControllerDataProvider.name());
setupThrottleConfig(cache.getClusterConfig(),
StateTransitionThrottleConfig.RebalanceType.RECOVERY_BALANCE, maxPending);
runStage(event, new BestPossibleStateCalcStage());
runStage(event, new MessageGenerationPhase());
runStage(event, new MessageSelectionStage());
runStage(event, new IntermediateStateCalcStage());
ClusterStatusMonitor clusterStatusMonitor = event.getAttribute(AttributeName.clusterStatusMonitor.name());
ResourceMonitor resourceMonitor = clusterStatusMonitor.getResourceMonitor(resource);
Assert.assertEquals(resourceMonitor.getNumPendingRecoveryRebalanceReplicas(),
numPartition * numReplica - resourceMonitor.getNumPendingLoadRebalanceReplicas());
Assert.assertEquals(resourceMonitor.getNumRecoveryRebalanceThrottledReplicas(),
numPartition * numReplica - resourceMonitor.getNumPendingLoadRebalanceReplicas() - maxPending);
System.out
.println("END testRecoveryRebalanceMetrics at " + new Date(System.currentTimeMillis()));
}
@Test
public void testLoadBalanceMetrics() {
System.out
.println("START testLoadBalanceMetrics at " + new Date(System.currentTimeMillis()));
String resource = "testResourceName";
int numPartition = 100;
int numReplica = 3;
int maxPending = 3;
setupIdealState(5, new String[] {resource}, numPartition,
numReplica, IdealState.RebalanceMode.FULL_AUTO,
BuiltInStateModelDefinitions.MasterSlave.name());
setupInstances(5);
setupLiveInstances(4);
setupStateModel();
Map<String, Resource> resourceMap =
getResourceMap(new String[] {resource}, numPartition,
BuiltInStateModelDefinitions.MasterSlave.name());
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
event.addAttribute(AttributeName.RESOURCES.name(), resourceMap);
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), resourceMap);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
event.addAttribute(AttributeName.ControllerDataProvider.name(), new ResourceControllerDataProvider());
ClusterStatusMonitor monitor = new ClusterStatusMonitor(_clusterName);
monitor.active();
event.addAttribute(AttributeName.clusterStatusMonitor.name(), monitor);
runStage(event, new ReadClusterDataStage());
runStage(event, new BestPossibleStateCalcStage());
BestPossibleStateOutput bestPossibleStateOutput =
event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.name());
currentStateOutput = copyCurrentStateFromBestPossible(bestPossibleStateOutput, resource);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
setupLiveInstances(4);
ResourceControllerDataProvider cache = event.getAttribute(AttributeName.ControllerDataProvider.name());
cache.clearCachedResourceAssignments();
runStage(event, new ReadClusterDataStage());
setupThrottleConfig(cache.getClusterConfig(),
StateTransitionThrottleConfig.RebalanceType.LOAD_BALANCE, maxPending);
runStage(event, new BestPossibleStateCalcStage());
runStage(event, new MessageGenerationPhase());
runStage(event, new MessageSelectionStage());
runStage(event, new IntermediateStateCalcStage());
ClusterStatusMonitor clusterStatusMonitor = event.getAttribute(AttributeName.clusterStatusMonitor.name());
ResourceMonitor resourceMonitor = clusterStatusMonitor.getResourceMonitor(resource);
long numPendingLoadBalance = resourceMonitor.getNumPendingLoadRebalanceReplicas();
Assert.assertTrue(numPendingLoadBalance > 0);
Assert.assertEquals(resourceMonitor.getNumLoadRebalanceThrottledReplicas(), numPendingLoadBalance - maxPending);
System.out
.println("END testLoadBalanceMetrics at " + new Date(System.currentTimeMillis()));
}
private void setupThrottleConfig(ClusterConfig clusterConfig,
StateTransitionThrottleConfig.RebalanceType rebalanceType, int maxPending) {
StateTransitionThrottleConfig resourceThrottle =
new StateTransitionThrottleConfig(rebalanceType,
StateTransitionThrottleConfig.ThrottleScope.RESOURCE, maxPending);
clusterConfig.setStateTransitionThrottleConfigs(Arrays.asList(resourceThrottle));
}
private CurrentStateOutput copyCurrentStateFromBestPossible(
BestPossibleStateOutput bestPossibleStateOutput, String resource) {
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
PartitionStateMap partitionStateMap = bestPossibleStateOutput.getPartitionStateMap(resource);
for (Partition partition : partitionStateMap.partitionSet()) {
Map<String, String> stateMap = partitionStateMap.getPartitionMap(partition);
for (String instance : stateMap.keySet()) {
currentStateOutput.setCurrentState(resource, partition, instance, stateMap.get(instance));
}
}
return currentStateOutput;
}
}
| 9,882 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestDisableResourceMbean.java | package org.apache.helix.monitoring.mbeans;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.lang.management.ManagementFactory;
import java.util.Date;
import javax.management.MBeanServerConnection;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.TestHelper;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.model.HelixConfigScope;
import org.apache.helix.model.IdealState.RebalanceMode;
import org.apache.helix.model.ResourceConfig;
import org.apache.helix.model.builder.HelixConfigScopeBuilder;
import org.apache.helix.tools.ClusterVerifiers.BestPossibleExternalViewVerifier;
import org.apache.helix.tools.ClusterVerifiers.ZkHelixClusterVerifier;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestDisableResourceMbean extends ZkUnitTestBase {
private MBeanServerConnection _mbeanServer = ManagementFactory.getPlatformMBeanServer();
@Test
public void testDisableResourceMonitoring() throws Exception {
final int NUM_PARTICIPANTS = 2;
String clusterName = TestHelper.getTestClassName() + "_" + TestHelper.getTestMethodName();
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
// Set up cluster
TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
"localhost", // participant name prefix
"TestDB", // resource name prefix
3, // resources
32, // partitions per resource
4, // number of nodes
1, // replicas
"MasterSlave", RebalanceMode.FULL_AUTO, // use FULL_AUTO mode to test node tagging
true); // do rebalance
MockParticipantManager[] participants = new MockParticipantManager[NUM_PARTICIPANTS];
for (int i = 0; i < NUM_PARTICIPANTS; i++) {
participants[i] =
new MockParticipantManager(ZK_ADDR, clusterName, "localhost_" + (12918 + i));
participants[i].syncStart();
}
ConfigAccessor configAccessor = new ConfigAccessor(_gZkClient);
HelixConfigScope resourceScope =
new HelixConfigScopeBuilder(HelixConfigScope.ConfigScopeProperty.RESOURCE)
.forCluster(clusterName).forResource("TestDB1").build();
configAccessor
.set(resourceScope, ResourceConfig.ResourceConfigProperty.MONITORING_DISABLED.name(),
"true");
resourceScope = new HelixConfigScopeBuilder(HelixConfigScope.ConfigScopeProperty.RESOURCE)
.forCluster(clusterName).forResource("TestDB2").build();
configAccessor
.set(resourceScope, ResourceConfig.ResourceConfigProperty.MONITORING_DISABLED.name(),
"false");
ClusterControllerManager controller =
new ClusterControllerManager(ZK_ADDR, clusterName, "controller_0");
controller.syncStart();
ZkHelixClusterVerifier clusterVerifier =
new BestPossibleExternalViewVerifier.Builder(clusterName).setZkClient(_gZkClient)
.setWaitTillVerify(TestHelper.DEFAULT_REBALANCE_PROCESSING_WAIT_TIME)
.build();
Assert.assertTrue(clusterVerifier.verifyByPolling());
// Verify the bean was created for TestDB0, but not for TestDB1.
pollForMBeanExistance(getMbeanName("TestDB0", clusterName), true);
pollForMBeanExistance(getMbeanName("TestDB1", clusterName), false);
pollForMBeanExistance(getMbeanName("TestDB2", clusterName), true);
controller.syncStop();
for (MockParticipantManager participant : participants) {
participant.syncStop();
}
TestHelper.dropCluster(clusterName, _gZkClient);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
private void pollForMBeanExistance(final ObjectName objectName, boolean expectation)
throws Exception {
boolean result = TestHelper.verify(new TestHelper.Verifier() {
@Override
public boolean verify() throws Exception {
return _mbeanServer.isRegistered(objectName);
}
}, 3000);
Assert.assertEquals(result, expectation);
}
private ObjectName getMbeanName(String resourceName, String clusterName)
throws MalformedObjectNameException {
String clusterBeanName =
String.format("%s=%s", ClusterStatusMonitor.CLUSTER_DN_KEY, clusterName);
String resourceBeanName = String
.format("%s,%s=%s", clusterBeanName, ClusterStatusMonitor.RESOURCE_DN_KEY, resourceName);
return new ObjectName(
String.format("%s:%s", MonitorDomainNames.ClusterStatus.name(), resourceBeanName));
}
}
| 9,883 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestRoutingTableProviderMonitor.java | package org.apache.helix.monitoring.mbeans;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.lang.management.ManagementFactory;
import java.util.HashSet;
import java.util.Set;
import javax.management.AttributeNotFoundException;
import javax.management.JMException;
import javax.management.MBeanServer;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import org.apache.helix.PropertyType;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestRoutingTableProviderMonitor {
private MBeanServer _beanServer = ManagementFactory.getPlatformMBeanServer();
private final String TEST_CLUSTER = "test_cluster";
private ObjectName buildObjectName(PropertyType type, String cluster)
throws MalformedObjectNameException {
return MBeanRegistrar.buildObjectName(MonitorDomainNames.RoutingTableProvider.name(),
RoutingTableProviderMonitor.CLUSTER_KEY, cluster, RoutingTableProviderMonitor.DATA_TYPE_KEY,
type.name());
}
private ObjectName buildObjectName(PropertyType type, String cluster, int num)
throws MalformedObjectNameException {
ObjectName objectName = buildObjectName(type, cluster);
if (num > 0) {
return new ObjectName(String
.format("%s,%s=%s", objectName.toString(), MBeanRegistrar.DUPLICATE,
String.valueOf(num)));
} else {
return objectName;
}
}
@Test
public void testMBeanRegisteration() throws JMException {
Set<RoutingTableProviderMonitor> monitors = new HashSet<>();
for (PropertyType type : PropertyType.values()) {
monitors.add(new RoutingTableProviderMonitor(type, TEST_CLUSTER).register());
Assert.assertTrue(_beanServer.isRegistered(buildObjectName(type, TEST_CLUSTER)));
}
for (PropertyType type : PropertyType.values()) {
monitors.add(new RoutingTableProviderMonitor(type, TEST_CLUSTER).register());
Assert.assertTrue(_beanServer.isRegistered(buildObjectName(type, TEST_CLUSTER, 1)));
}
for (PropertyType type : PropertyType.values()) {
monitors.add(new RoutingTableProviderMonitor(type, TEST_CLUSTER).register());
Assert.assertTrue(_beanServer.isRegistered(buildObjectName(type, TEST_CLUSTER, 2)));
}
// Un-register all monitors
for (RoutingTableProviderMonitor monitor : monitors) {
monitor.unregister();
}
for (PropertyType type : PropertyType.values()) {
Assert.assertFalse(_beanServer.isRegistered(buildObjectName(type, TEST_CLUSTER)));
Assert.assertFalse(_beanServer.isRegistered(buildObjectName(type, TEST_CLUSTER, 1)));
Assert.assertFalse(_beanServer.isRegistered(buildObjectName(type, TEST_CLUSTER, 2)));
}
}
@Test
public void testMetrics() throws JMException, InterruptedException {
PropertyType type = PropertyType.EXTERNALVIEW;
RoutingTableProviderMonitor monitor = new RoutingTableProviderMonitor(type, TEST_CLUSTER);
monitor.register();
ObjectName name = buildObjectName(type, TEST_CLUSTER);
monitor.increaseCallbackCounters(10);
Assert.assertEquals((long) _beanServer.getAttribute(name, "CallbackCounter"), 1);
Assert.assertEquals((long) _beanServer.getAttribute(name, "EventQueueSizeGauge"), 10);
monitor.increaseCallbackCounters(15);
Assert.assertEquals((long) _beanServer.getAttribute(name, "CallbackCounter"), 2);
Assert.assertEquals((long) _beanServer.getAttribute(name, "EventQueueSizeGauge"), 15);
Assert.assertEquals((long) _beanServer.getAttribute(name, "DataRefreshLatencyGauge.Max"), 0);
Assert.assertEquals((long) _beanServer.getAttribute(name, "DataRefreshCounter"), 0);
// StatePropagationLatencyGauge only apply for current state
try {
_beanServer.getAttribute(name, "StatePropagationLatencyGauge.Max");
Assert.fail();
} catch (AttributeNotFoundException ex) {
// Expected AttributeNotFoundException because the metric does not exist in
// MBean server.
}
long startTime = System.currentTimeMillis();
Thread.sleep(5);
monitor.increaseDataRefreshCounters(startTime);
long latency = (long) _beanServer.getAttribute(name, "DataRefreshLatencyGauge.Max");
Assert.assertTrue(latency >= 5 && latency <= System.currentTimeMillis() - startTime);
Assert.assertEquals((long) _beanServer.getAttribute(name, "DataRefreshCounter"), 1);
monitor.increaseDataRefreshCounters(startTime);
long newLatency = (long) _beanServer.getAttribute(name, "DataRefreshLatencyGauge.Max");
Assert.assertTrue(newLatency >= latency);
Assert.assertEquals((long) _beanServer.getAttribute(name, "DataRefreshCounter"), 2);
monitor.unregister();
}
public void testCurrentStateMetrics() throws JMException, InterruptedException {
PropertyType type = PropertyType.CURRENTSTATES;
RoutingTableProviderMonitor monitor = new RoutingTableProviderMonitor(type, TEST_CLUSTER);
monitor.register();
ObjectName name = buildObjectName(type, TEST_CLUSTER);
monitor.increaseCallbackCounters(10);
Assert.assertEquals((long) _beanServer.getAttribute(name, "StatePropagationLatencyGauge.Max"), 0);
monitor.recordStatePropagationLatency(5);
long statelatency = (long) _beanServer.getAttribute(name, "StatePropagationLatencyGauge.Max");
Assert.assertEquals(statelatency, 5);
monitor.recordStatePropagationLatency(10);
statelatency = (long) _beanServer.getAttribute(name, "StatePropagationLatencyGauge.Max");
Assert.assertEquals(statelatency, 10);
monitor.unregister();
}
}
| 9,884 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestResourceMonitor.java | package org.apache.helix.monitoring.mbeans;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.TreeMap;
import javax.management.AttributeNotFoundException;
import javax.management.InstanceNotFoundException;
import javax.management.JMException;
import javax.management.MBeanException;
import javax.management.MBeanServerConnection;
import javax.management.ObjectName;
import javax.management.ReflectionException;
import com.google.common.collect.ImmutableMap;
import org.apache.helix.TestHelper;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.model.BuiltInStateModelDefinitions;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.tools.DefaultIdealStateCalculator;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestResourceMonitor {
String _clusterName = "Test-cluster";
String _dbName = "TestDB";
int _replicas = 3;
int _partitions = 50;
@Test()
public void testReportData() throws JMException {
final int n = 5;
ResourceMonitor monitor =
new ResourceMonitor(_clusterName, _dbName, new ObjectName("testDomain:key=value"));
monitor.register();
try {
List<String> instances = new ArrayList<>();
for (int i = 0; i < n; i++) {
String instance = "localhost_" + (12918 + i);
instances.add(instance);
}
ZNRecord idealStateRecord = DefaultIdealStateCalculator
.calculateIdealState(instances, _partitions, _replicas - 1, _dbName, "MASTER", "SLAVE");
IdealState idealState = new IdealState(deepCopyZNRecord(idealStateRecord));
idealState.setMinActiveReplicas(_replicas - 1);
ExternalView externalView = new ExternalView(deepCopyZNRecord(idealStateRecord));
StateModelDefinition stateModelDef =
BuiltInStateModelDefinitions.MasterSlave.getStateModelDefinition();
monitor.updateResourceState(externalView, idealState, stateModelDef);
Assert.assertEquals(monitor.getDifferenceWithIdealStateGauge(), 0);
Assert.assertEquals(monitor.getErrorPartitionGauge(), 0);
Assert.assertEquals(monitor.getExternalViewPartitionGauge(), _partitions);
Assert.assertEquals(monitor.getPartitionGauge(), _partitions);
Assert.assertEquals(monitor.getMissingMinActiveReplicaPartitionGauge(), 0);
Assert.assertEquals(monitor.getMissingReplicaPartitionGauge(), 0);
Assert.assertEquals(monitor.getMissingTopStatePartitionGauge(), 0);
Assert.assertEquals(monitor.getBeanName(), _clusterName + " " + _dbName);
int errorCount = 5;
Random r = new Random();
int start = r.nextInt(_partitions - errorCount - 1);
for (int i = start; i < start + errorCount; i++) {
String partition = _dbName + "_" + i;
Map<String, String> map = externalView.getStateMap(partition);
for (String key : map.keySet()) {
if (map.get(key).equalsIgnoreCase("SLAVE")) {
map.put(key, "ERROR");
break;
}
}
externalView.setStateMap(partition, map);
}
monitor.updateResourceState(externalView, idealState, stateModelDef);
Assert.assertEquals(monitor.getDifferenceWithIdealStateGauge(), errorCount);
Assert.assertEquals(monitor.getErrorPartitionGauge(), errorCount);
Assert.assertEquals(monitor.getExternalViewPartitionGauge(), _partitions);
Assert.assertEquals(monitor.getPartitionGauge(), _partitions);
Assert.assertEquals(monitor.getMissingMinActiveReplicaPartitionGauge(), 0);
Assert.assertEquals(monitor.getMissingReplicaPartitionGauge(), errorCount);
Assert.assertEquals(monitor.getMissingTopStatePartitionGauge(), 0);
int lessMinActiveReplica = 6;
externalView = new ExternalView(deepCopyZNRecord(idealStateRecord));
start = r.nextInt(_partitions - lessMinActiveReplica - 1);
for (int i = start; i < start + lessMinActiveReplica; i++) {
String partition = _dbName + "_" + i;
Map<String, String> map = externalView.getStateMap(partition);
Iterator<String> it = map.keySet().iterator();
int flag = 0;
while (it.hasNext()) {
String key = it.next();
if (map.get(key).equalsIgnoreCase("SLAVE")) {
if (flag++ % 2 == 0) {
map.put(key, "OFFLINE");
} else {
it.remove();
}
}
}
externalView.setStateMap(partition, map);
}
monitor.updateResourceState(externalView, idealState, stateModelDef);
Assert.assertEquals(monitor.getDifferenceWithIdealStateGauge(), lessMinActiveReplica);
Assert.assertEquals(monitor.getErrorPartitionGauge(), 0);
Assert.assertEquals(monitor.getExternalViewPartitionGauge(), _partitions);
Assert.assertEquals(monitor.getPartitionGauge(), _partitions);
Assert.assertEquals(monitor.getMissingMinActiveReplicaPartitionGauge(), lessMinActiveReplica);
Assert.assertEquals(monitor.getMissingReplicaPartitionGauge(), lessMinActiveReplica);
Assert.assertEquals(monitor.getMissingTopStatePartitionGauge(), 0);
int lessReplica = 4;
externalView = new ExternalView(deepCopyZNRecord(idealStateRecord));
start = r.nextInt(_partitions - lessReplica - 1);
for (int i = start; i < start + lessReplica; i++) {
String partition = _dbName + "_" + i;
Map<String, String> map = externalView.getStateMap(partition);
int flag = 0;
Iterator<String> it = map.keySet().iterator();
while (it.hasNext()) {
String key = it.next();
if (map.get(key).equalsIgnoreCase("SLAVE")) {
if (flag++ % 2 == 0) {
map.put(key, "OFFLINE");
} else {
it.remove();
}
break;
}
}
externalView.setStateMap(partition, map);
}
monitor.updateResourceState(externalView, idealState, stateModelDef);
Assert.assertEquals(monitor.getDifferenceWithIdealStateGauge(), lessReplica);
Assert.assertEquals(monitor.getErrorPartitionGauge(), 0);
Assert.assertEquals(monitor.getExternalViewPartitionGauge(), _partitions);
Assert.assertEquals(monitor.getPartitionGauge(), _partitions);
Assert.assertEquals(monitor.getMissingMinActiveReplicaPartitionGauge(), 0);
Assert.assertEquals(monitor.getMissingReplicaPartitionGauge(), lessReplica);
Assert.assertEquals(monitor.getMissingTopStatePartitionGauge(), 0);
int missTopState = 7;
externalView = new ExternalView(deepCopyZNRecord(idealStateRecord));
start = r.nextInt(_partitions - missTopState - 1);
for (int i = start; i < start + missTopState; i++) {
String partition = _dbName + "_" + i;
Map<String, String> map = externalView.getStateMap(partition);
int flag = 0;
for (String key : map.keySet()) {
if (map.get(key).equalsIgnoreCase("MASTER")) {
if (flag++ % 2 == 0) {
map.put(key, "OFFLINE");
} else {
map.remove(key);
}
break;
}
}
externalView.setStateMap(partition, map);
}
monitor.updateResourceState(externalView, idealState, stateModelDef);
Assert.assertEquals(monitor.getDifferenceWithIdealStateGauge(), missTopState);
Assert.assertEquals(monitor.getErrorPartitionGauge(), 0);
Assert.assertEquals(monitor.getExternalViewPartitionGauge(), _partitions);
Assert.assertEquals(monitor.getPartitionGauge(), _partitions);
Assert.assertEquals(monitor.getMissingMinActiveReplicaPartitionGauge(), 0);
Assert.assertEquals(monitor.getMissingReplicaPartitionGauge(), missTopState);
Assert.assertEquals(monitor.getMissingTopStatePartitionGauge(), missTopState);
Assert.assertEquals(monitor.getNumPendingStateTransitionGauge(), 0);
// test pending state transition message report and read
int messageCount = new Random().nextInt(_partitions) + 1;
monitor.updatePendingStateTransitionMessages(messageCount);
Assert.assertEquals(monitor.getNumPendingStateTransitionGauge(), messageCount);
Assert.assertEquals(monitor.getRebalanceState(),
ResourceMonitor.RebalanceStatus.UNKNOWN.name());
monitor.setRebalanceState(ResourceMonitor.RebalanceStatus.NORMAL);
Assert
.assertEquals(monitor.getRebalanceState(), ResourceMonitor.RebalanceStatus.NORMAL.name());
monitor.setRebalanceState(ResourceMonitor.RebalanceStatus.BEST_POSSIBLE_STATE_CAL_FAILED);
Assert.assertEquals(monitor.getRebalanceState(),
ResourceMonitor.RebalanceStatus.BEST_POSSIBLE_STATE_CAL_FAILED.name());
monitor.setRebalanceState(ResourceMonitor.RebalanceStatus.INTERMEDIATE_STATE_CAL_FAILED);
Assert.assertEquals(monitor.getRebalanceState(),
ResourceMonitor.RebalanceStatus.INTERMEDIATE_STATE_CAL_FAILED.name());
} finally {
// Has to unregister this monitor to clean up. Otherwise, later tests may be affected and fail.
monitor.unregister();
}
}
@Test
public void testUpdatePartitionWeightStats() throws JMException, IOException {
final MBeanServerConnection mBeanServer = ManagementFactory.getPlatformMBeanServer();
final String clusterName = TestHelper.getTestMethodName();
final String resource = "testDB";
final ObjectName resourceObjectName = new ObjectName("testDomain:key=value");
final ResourceMonitor monitor =
new ResourceMonitor(clusterName, resource, resourceObjectName);
monitor.register();
try {
Map<String, Map<String, Integer>> partitionWeightMap =
ImmutableMap.of(resource, ImmutableMap.of("capacity1", 20, "capacity2", 40));
// Update Metrics
partitionWeightMap.values().forEach(monitor::updatePartitionWeightStats);
verifyPartitionWeightMetrics(mBeanServer, resourceObjectName, partitionWeightMap);
// Change capacity keys: "capacity2" -> "capacity3"
partitionWeightMap =
ImmutableMap.of(resource, ImmutableMap.of("capacity1", 20, "capacity3", 60));
// Update metrics.
partitionWeightMap.values().forEach(monitor::updatePartitionWeightStats);
// Verify results.
verifyPartitionWeightMetrics(mBeanServer, resourceObjectName, partitionWeightMap);
// "capacity2" metric should not exist in MBean server.
String removedAttribute = "capacity2Gauge";
try {
mBeanServer.getAttribute(resourceObjectName, removedAttribute);
Assert.fail("AttributeNotFoundException should be thrown because attribute [capacity2Gauge]"
+ " is removed.");
} catch (AttributeNotFoundException expected) {
}
} finally {
// Reset monitor.
monitor.unregister();
Assert.assertFalse(mBeanServer.isRegistered(resourceObjectName),
"Failed to unregister resource monitor.");
}
}
/**
* Return a deep copy of a ZNRecord.
*
* @return
*/
public static ZNRecord deepCopyZNRecord(ZNRecord record) {
ZNRecord copy = new ZNRecord(record.getId());
copy.getSimpleFields().putAll(record.getSimpleFields());
for (String mapKey : record.getMapFields().keySet()) {
Map<String, String> mapField = record.getMapFields().get(mapKey);
copy.getMapFields().put(mapKey, new TreeMap<>(mapField));
}
for (String listKey : record.getListFields().keySet()) {
copy.getListFields().put(listKey, new ArrayList<>(record.getListFields().get(listKey)));
}
if (record.getRawPayload() != null) {
byte[] rawPayload = new byte[record.getRawPayload().length];
System.arraycopy(record.getRawPayload(), 0, rawPayload, 0, record.getRawPayload().length);
copy.setRawPayload(rawPayload);
}
copy.setVersion(record.getVersion());
copy.setCreationTime(record.getCreationTime());
copy.setModifiedTime(record.getModifiedTime());
copy.setEphemeralOwner(record.getEphemeralOwner());
return copy;
}
private void verifyPartitionWeightMetrics(MBeanServerConnection mBeanServer,
ObjectName objectName, Map<String, Map<String, Integer>> expectedPartitionWeightMap)
throws IOException, AttributeNotFoundException, MBeanException, ReflectionException,
InstanceNotFoundException {
final String gaugeMetricSuffix = "Gauge";
for (Map.Entry<String, Map<String, Integer>> entry : expectedPartitionWeightMap.entrySet()) {
// Resource monitor for this resource is already registered.
Assert.assertTrue(mBeanServer.isRegistered(objectName));
for (Map.Entry<String, Integer> capacityEntry : entry.getValue().entrySet()) {
String attributeName = capacityEntry.getKey() + gaugeMetricSuffix;
try {
// Wait until the attribute is already registered to mbean server.
Assert.assertTrue(TestHelper.verify(
() -> !mBeanServer.getAttributes(objectName, new String[]{attributeName}).isEmpty(),
2000));
} catch (Exception ignored) {
}
Assert.assertEquals((long) mBeanServer.getAttribute(objectName, attributeName),
(long) capacityEntry.getValue());
}
}
}
}
| 9,885 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring | Create_ds/helix/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestClusterAggregateMetrics.java | package org.apache.helix.monitoring.mbeans;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.lang.management.ManagementFactory;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import javax.management.MBeanAttributeInfo;
import javax.management.MBeanInfo;
import javax.management.MBeanServerConnection;
import javax.management.ObjectInstance;
import javax.management.ObjectName;
import javax.management.Query;
import javax.management.QueryExp;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.TestHelper;
import org.apache.helix.common.ZkTestBase;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.tools.ClusterStateVerifier;
import org.apache.helix.tools.ClusterVerifiers.BestPossibleExternalViewVerifier;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
/**
* This test specifically tests MBean metrics instrumented in ClusterStatusMonitor that aggregate
* individual
* resource-level metrics into cluster-level figures.
* Sets up 3 Participants and 5 partitions with 3 replicas each, the test monitors the change in the
* numbers
* when a Participant is disabled.
*/
public class TestClusterAggregateMetrics extends ZkTestBase {
// Configurable values for test setup
private static final int NUM_PARTICIPANTS = 3;
private static final int NUM_PARTITIONS = 5;
private static final int NUM_REPLICAS = 3;
private static final String PARTITION_COUNT = "TotalPartitionGauge";
private static final String ERROR_PARTITION_COUNT = "ErrorPartitionGauge";
private static final String WITHOUT_TOPSTATE_COUNT = "MissingTopStatePartitionGauge";
private static final String IS_EV_MISMATCH_COUNT = "DifferenceWithIdealStateGauge";
private static final int START_PORT = 12918;
private static final String STATE_MODEL = "MasterSlave";
private static final String TEST_DB = "TestDB";
private static final MBeanServerConnection _server = ManagementFactory.getPlatformMBeanServer();
private final String CLASS_NAME = getShortClassName();
private final String CLUSTER_NAME = CLUSTER_PREFIX + "_" + CLASS_NAME;
private ClusterSetup _setupTool;
private HelixManager _manager;
private MockParticipantManager[] _participants = new MockParticipantManager[NUM_PARTICIPANTS];
private ClusterControllerManager _controller;
private Map<String, Object> _beanValueMap = new HashMap<>();
@BeforeClass
public void beforeClass() throws Exception {
System.out.println("START " + CLASS_NAME + " at " + new Date(System.currentTimeMillis()));
_setupTool = new ClusterSetup(ZK_ADDR);
// setup storage cluster
_setupTool.addCluster(CLUSTER_NAME, true);
_setupTool.addResourceToCluster(CLUSTER_NAME, TEST_DB, NUM_PARTITIONS, STATE_MODEL);
for (int i = 0; i < NUM_PARTICIPANTS; i++) {
String storageNodeName = PARTICIPANT_PREFIX + "_" + (START_PORT + i);
_setupTool.addInstanceToCluster(CLUSTER_NAME, storageNodeName);
}
_setupTool.rebalanceStorageCluster(CLUSTER_NAME, TEST_DB, NUM_REPLICAS);
// start dummy participants
for (int i = 0; i < NUM_PARTICIPANTS; i++) {
String instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + i);
_participants[i] = new MockParticipantManager(ZK_ADDR, CLUSTER_NAME, instanceName);
_participants[i].syncStart();
}
// start controller
String controllerName = CONTROLLER_PREFIX + "_0";
_controller = new ClusterControllerManager(ZK_ADDR, CLUSTER_NAME, controllerName);
_controller.syncStart();
boolean result = ClusterStateVerifier.verifyByPolling(
new ClusterStateVerifier.MasterNbInExtViewVerifier(ZK_ADDR, CLUSTER_NAME), 10000, 100);
Assert.assertTrue(result);
result = ClusterStateVerifier.verifyByPolling(
new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR, CLUSTER_NAME), 10000, 100);
Assert.assertTrue(result);
// create cluster manager
_manager = HelixManagerFactory.getZKHelixManager(CLUSTER_NAME, "Admin",
InstanceType.ADMINISTRATOR, ZK_ADDR);
_manager.connect();
}
/**
* Shutdown order: 1) disconnect the controller 2) disconnect participants.
*/
@AfterClass
public void afterClass() {
if (_controller != null && _controller.isConnected()) {
_controller.syncStop();
}
for (int i = 0; i < NUM_PARTICIPANTS; i++) {
if (_participants[i] != null && _participants[i].isConnected()) {
_participants[i].syncStop();
}
}
if (_manager != null && _manager.isConnected()) {
_manager.disconnect();
}
deleteCluster(CLUSTER_NAME);
System.out.println("END " + CLASS_NAME + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testAggregateMetrics() throws Exception {
BestPossibleExternalViewVerifier verifier =
new BestPossibleExternalViewVerifier.Builder(CLUSTER_NAME)
.setZkClient(_gZkClient)
.setWaitTillVerify(TestHelper.DEFAULT_REBALANCE_PROCESSING_WAIT_TIME)
.build();
// Everything should be up and running initially with 5 total partitions
Map<String, Long> expectedMetricValues = new HashMap<>();
expectedMetricValues.put(PARTITION_COUNT, 5L);
expectedMetricValues.put(ERROR_PARTITION_COUNT, 0L);
expectedMetricValues.put(WITHOUT_TOPSTATE_COUNT, 0L);
expectedMetricValues.put(IS_EV_MISMATCH_COUNT, 0L);
Assert.assertTrue(TestHelper.verify(() -> {
return verifyMetrics(expectedMetricValues);
}, TestHelper.WAIT_DURATION), "Expected metrics and observed metrics don't align.");
// Disable all Participants (instances)
_setupTool.getClusterManagementTool()
.manuallyEnableMaintenanceMode(CLUSTER_NAME, true, "Test", null);
for (int i = 0; i < NUM_PARTICIPANTS; i++) {
String instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + i);
_setupTool.getClusterManagementTool().enableInstance(CLUSTER_NAME, instanceName, false);
}
_setupTool.getClusterManagementTool()
.manuallyEnableMaintenanceMode(CLUSTER_NAME, false, "Test", null);
// Confirm that the Participants have been disabled
boolean result = TestHelper.verify(() -> {
for (int i = 0; i < NUM_PARTICIPANTS; i++) {
String instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + i);
InstanceConfig instanceConfig =
_manager.getConfigAccessor().getInstanceConfig(CLUSTER_NAME, instanceName);
if (instanceConfig.getInstanceEnabled()) {
return false;
}
}
return true;
}, TestHelper.WAIT_DURATION);
Assert.assertTrue(result);
Assert.assertTrue(verifier.verifyByPolling());
expectedMetricValues.put(WITHOUT_TOPSTATE_COUNT, 5L);
Assert.assertTrue(TestHelper.verify(() -> {
return verifyMetrics(expectedMetricValues);
}, TestHelper.WAIT_DURATION), "Expected metrics and observed metrics don't align.");
// Re-enable all Participants (instances)
for (int i = 0; i < NUM_PARTICIPANTS; i++) {
String instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + i);
_setupTool.getClusterManagementTool().enableInstance(CLUSTER_NAME, instanceName, true);
}
// Confirm that the Participants have been enabled
result = TestHelper.verify(() -> {
for (int i = 0; i < NUM_PARTICIPANTS; i++) {
String instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + i);
InstanceConfig instanceConfig =
_manager.getConfigAccessor().getInstanceConfig(CLUSTER_NAME, instanceName);
if (!instanceConfig.getInstanceEnabled()) {
return false;
}
}
return true;
}, TestHelper.WAIT_DURATION);
Assert.assertTrue(result);
Assert.assertTrue(verifier.verifyByPolling());
expectedMetricValues.put(WITHOUT_TOPSTATE_COUNT, 0L);
result = TestHelper.verify(() -> {
return verifyMetrics(expectedMetricValues);
}, TestHelper.WAIT_DURATION);
Assert.assertTrue(result);
// Drop the resource and check that all metrics are zero.
_setupTool.dropResourceFromCluster(CLUSTER_NAME, TEST_DB);
// Check that the resource has been removed
result = TestHelper.verify(
() -> _manager.getHelixDataAccessor().getPropertyStat(
_manager.getHelixDataAccessor().keyBuilder().idealStates(TEST_DB)) == null,
TestHelper.WAIT_DURATION);
Assert.assertTrue(result);
Assert.assertTrue(verifier.verifyByPolling());
expectedMetricValues.put(PARTITION_COUNT, 0L);
Assert.assertTrue(TestHelper.verify(() -> {
return verifyMetrics(expectedMetricValues);
}, TestHelper.WAIT_DURATION), "Expected metrics and observed metrics don't align.");
}
/**
* Queries for all MBeans from the MBean Server and only looks at the relevant MBean and gets its
* metric numbers.
*/
private void updateMetrics() {
try {
QueryExp exp = Query.match(Query.attr("SensorName"), Query.value("*" + CLUSTER_NAME + "*"));
Set<ObjectInstance> mbeans = new HashSet<>(ManagementFactory.getPlatformMBeanServer()
.queryMBeans(new ObjectName("ClusterStatus:*"), exp));
for (ObjectInstance instance : mbeans) {
ObjectName beanName = instance.getObjectName();
if (beanName.toString().equals("ClusterStatus:cluster=" + CLUSTER_NAME)) {
MBeanInfo info = _server.getMBeanInfo(beanName);
MBeanAttributeInfo[] infos = info.getAttributes();
for (MBeanAttributeInfo infoItem : infos) {
Object val = _server.getAttribute(beanName, infoItem.getName());
_beanValueMap.put(infoItem.getName(), val);
}
}
}
} catch (Exception e) {
// update failed
}
}
private boolean verifyMetrics(Map<String, Long> expectedValues) {
updateMetrics();
return expectedValues.entrySet().stream()
.allMatch(entry -> _beanValueMap.get(entry.getKey()).equals(entry.getValue()));
}
}
| 9,886 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging/TestAsyncCallbackSvc.java | package org.apache.helix.messaging;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import org.apache.helix.HelixException;
import org.apache.helix.HelixManager;
import org.apache.helix.NotificationContext;
import org.apache.helix.messaging.handling.AsyncCallbackService;
import org.apache.helix.messaging.handling.MessageHandler;
import org.apache.helix.mock.MockManager;
import org.apache.helix.model.Message;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
public class TestAsyncCallbackSvc {
class MockHelixManager extends MockManager {
public String getSessionId() {
return "123";
}
}
class TestAsyncCallback extends AsyncCallback {
HashSet<String> _repliedMessageId = new HashSet<String>();
@Override
public void onTimeOut() {
// TODO Auto-generated method stub
}
@Override
public void onReplyMessage(Message message) {
// TODO Auto-generated method stub
_repliedMessageId.add(message.getMsgId());
}
}
@Test(groups = {
"unitTest"
})
public void testAsyncCallbackSvc() throws Exception {
AsyncCallbackService svc = new AsyncCallbackService();
HelixManager manager = new MockHelixManager();
NotificationContext changeContext = new NotificationContext(manager);
Message msg = new Message(svc.getMessageType(), UUID.randomUUID().toString());
msg.setTgtSessionId(manager.getSessionId());
try {
MessageHandler aHandler = svc.createHandler(msg, changeContext);
} catch (HelixException e) {
AssertJUnit.assertTrue(e.getMessage().indexOf(msg.getMsgId()) != -1);
}
Message msg2 = new Message("RandomType", UUID.randomUUID().toString());
msg2.setTgtSessionId(manager.getSessionId());
try {
MessageHandler aHandler = svc.createHandler(msg2, changeContext);
} catch (HelixException e) {
AssertJUnit.assertTrue(e.getMessage().indexOf(msg2.getMsgId()) != -1);
}
Message msg3 = new Message(svc.getMessageType(), UUID.randomUUID().toString());
msg3.setTgtSessionId(manager.getSessionId());
msg3.setCorrelationId("wfwegw");
try {
MessageHandler aHandler = svc.createHandler(msg3, changeContext);
} catch (HelixException e) {
AssertJUnit.assertTrue(e.getMessage().indexOf(msg3.getMsgId()) != -1);
}
TestAsyncCallback callback = new TestAsyncCallback();
String corrId = UUID.randomUUID().toString();
svc.registerAsyncCallback(corrId, new TestAsyncCallback());
svc.registerAsyncCallback(corrId, callback);
List<Message> msgSent = new ArrayList<Message>();
msgSent.add(new Message("Test", UUID.randomUUID().toString()));
callback.setMessagesSent(msgSent);
msg = new Message(svc.getMessageType(), UUID.randomUUID().toString());
msg.setTgtSessionId("*");
msg.setCorrelationId(corrId);
MessageHandler aHandler = svc.createHandler(msg, changeContext);
Map<String, String> resultMap = new HashMap<String, String>();
aHandler.handleMessage();
AssertJUnit.assertTrue(callback.isDone());
AssertJUnit.assertTrue(callback._repliedMessageId.contains(msg.getMsgId()));
}
}
| 9,887 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging/TestAsyncCallback.java | package org.apache.helix.messaging;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.UUID;
import org.apache.helix.model.Message;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
public class TestAsyncCallback {
class AsyncCallbackSample extends AsyncCallback {
int _onTimeOutCalled = 0;
int _onReplyMessageCalled = 0;
@Override
public void onTimeOut() {
// TODO Auto-generated method stub
_onTimeOutCalled++;
}
@Override
public void onReplyMessage(Message message) {
_onReplyMessageCalled++;
}
}
@Test()
public void testAsyncCallback() throws Exception {
System.out.println("START TestAsyncCallback at " + new Date(System.currentTimeMillis()));
AsyncCallbackSample callback = new AsyncCallbackSample();
AssertJUnit.assertFalse(callback.isInterrupted());
AssertJUnit.assertFalse(callback.isTimedOut());
AssertJUnit.assertTrue(callback.getMessageReplied().size() == 0);
int nMsgs = 5;
List<Message> messageSent = new ArrayList<Message>();
for (int i = 0; i < nMsgs; i++) {
messageSent.add(new Message("Test", UUID.randomUUID().toString()));
}
callback.setMessagesSent(messageSent);
for (int i = 0; i < nMsgs; i++) {
AssertJUnit.assertFalse(callback.isDone());
callback.onReply(new Message("TestReply", UUID.randomUUID().toString()));
}
AssertJUnit.assertTrue(callback.isDone());
AssertJUnit.assertTrue(callback._onTimeOutCalled == 0);
sleep(50);
callback = new AsyncCallbackSample();
callback.setMessagesSent(messageSent);
callback.setTimeout(1000);
sleep(50);
callback.startTimer();
AssertJUnit.assertFalse(callback.isTimedOut());
for (int i = 0; i < nMsgs - 1; i++) {
sleep(50);
AssertJUnit.assertFalse(callback.isDone());
AssertJUnit.assertTrue(callback._onReplyMessageCalled == i);
callback.onReply(new Message("TestReply", UUID.randomUUID().toString()));
}
sleep(1000);
AssertJUnit.assertTrue(callback.isTimedOut());
AssertJUnit.assertTrue(callback._onTimeOutCalled == 1);
AssertJUnit.assertFalse(callback.isDone());
callback = new AsyncCallbackSample();
callback.setMessagesSent(messageSent);
callback.setTimeout(1000);
callback.startTimer();
sleep(50);
AssertJUnit.assertFalse(callback.isTimedOut());
for (int i = 0; i < nMsgs; i++) {
AssertJUnit.assertFalse(callback.isDone());
sleep(50);
AssertJUnit.assertTrue(callback._onReplyMessageCalled == i);
callback.onReply(new Message("TestReply", UUID.randomUUID().toString()));
}
AssertJUnit.assertTrue(callback.isDone());
sleep(1300);
AssertJUnit.assertFalse(callback.isTimedOut());
AssertJUnit.assertTrue(callback._onTimeOutCalled == 0);
System.out.println("END TestAsyncCallback at " + new Date(System.currentTimeMillis()));
}
void sleep(int time) {
try {
Thread.sleep(time);
} catch (Exception e) {
System.out.println(e);
}
}
}
| 9,888 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging/TestDefaultMessagingService.java | package org.apache.helix.messaging;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import com.google.common.collect.ImmutableList;
import org.apache.helix.Criteria;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixProperty;
import org.apache.helix.InstanceType;
import org.apache.helix.MockAccessor;
import org.apache.helix.NotificationContext;
import org.apache.helix.PropertyKey;
import org.apache.helix.PropertyType;
import org.apache.helix.SystemPropertyKeys;
import org.apache.helix.messaging.handling.HelixTaskResult;
import org.apache.helix.messaging.handling.MessageHandler;
import org.apache.helix.messaging.handling.MessageHandlerFactory;
import org.apache.helix.messaging.handling.MultiTypeMessageHandlerFactory;
import org.apache.helix.messaging.handling.TaskExecutor;
import org.apache.helix.mock.MockManager;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.LiveInstance.LiveInstanceProperty;
import org.apache.helix.model.Message;
import org.apache.helix.tools.DefaultIdealStateCalculator;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.testng.Assert;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
public class TestDefaultMessagingService {
class MockHelixManager extends MockManager {
class MockDataAccessor extends MockAccessor {
@Override
public <T extends HelixProperty> T getProperty(PropertyKey key) {
PropertyType type = key.getType();
if (type == PropertyType.EXTERNALVIEW || type == PropertyType.IDEALSTATES) {
return (T) new ExternalView(_externalView);
}
return null;
}
@Override
public <T extends HelixProperty> List<T> getChildValues(PropertyKey key,
boolean throwException) {
PropertyType type = key.getType();
List<T> result = new ArrayList<T>();
Class<? extends HelixProperty> clazz = key.getTypeClass();
if (type == PropertyType.EXTERNALVIEW || type == PropertyType.IDEALSTATES) {
HelixProperty typedInstance = HelixProperty.convertToTypedInstance(clazz, _externalView);
result.add((T) typedInstance);
return result;
} else if (type == PropertyType.LIVEINSTANCES) {
return (List<T>) HelixProperty.convertToTypedList(clazz, _liveInstances);
}
return result;
}
}
HelixDataAccessor _accessor = new MockDataAccessor();
ZNRecord _externalView;
List<String> _instances;
List<ZNRecord> _liveInstances;
String _db = "DB";
int _replicas = 3;
int _partitions = 50;
public MockHelixManager() {
_liveInstances = new ArrayList<ZNRecord>();
_instances = new ArrayList<String>();
for (int i = 0; i < 5; i++) {
String instance = "localhost_" + (12918 + i);
_instances.add(instance);
ZNRecord metaData = new ZNRecord(instance);
metaData.setSimpleField(LiveInstanceProperty.SESSION_ID.toString(), UUID.randomUUID()
.toString());
_liveInstances.add(metaData);
}
_externalView =
DefaultIdealStateCalculator.calculateIdealState(_instances, _partitions, _replicas, _db,
"MASTER", "SLAVE");
}
@Override
public boolean isConnected() {
return true;
}
@Override
public HelixDataAccessor getHelixDataAccessor() {
return _accessor;
}
@Override
public String getInstanceName() {
return "localhost_12919";
}
@Override
public InstanceType getInstanceType() {
return InstanceType.PARTICIPANT;
}
}
class TestMessageHandlerFactory implements MultiTypeMessageHandlerFactory {
class TestMessageHandler extends MessageHandler {
public TestMessageHandler(Message message, NotificationContext context) {
super(message, context);
// TODO Auto-generated constructor stub
}
@Override
public HelixTaskResult handleMessage() throws InterruptedException {
HelixTaskResult result = new HelixTaskResult();
result.setSuccess(true);
return result;
}
@Override
public void onError(Exception e, ErrorCode code, ErrorType type) {
// TODO Auto-generated method stub
}
}
@Override
public MessageHandler createHandler(Message message, NotificationContext context) {
// TODO Auto-generated method stub
return new TestMessageHandler(message, context);
}
@Override public List<String> getMessageTypes() {
return ImmutableList.of("TestingMessageHandler");
}
@Override
public void reset() {
// TODO Auto-generated method stub
}
}
class TestStateTransitionHandlerFactory implements MultiTypeMessageHandlerFactory {
@Override
public MessageHandler createHandler(Message message, NotificationContext context) {
return null;
}
@Override
public List<String> getMessageTypes() {
return ImmutableList.of(Message.MessageType.STATE_TRANSITION.name(),
Message.MessageType.STATE_TRANSITION_CANCELLATION.name(),
Message.MessageType.CONTROLLER_MSG.name());
}
@Override
public void reset() {
}
}
class MockDefaultMessagingService extends DefaultMessagingService {
public MockDefaultMessagingService(HelixManager manager) {
super(manager);
}
public Map<String, MessageHandlerFactory> getMessageHandlerFactoryMap() {
return _messageHandlerFactoriestobeAdded;
}
}
@Test()
public void TestMessageSend() {
HelixManager manager = new MockHelixManager();
DefaultMessagingService svc = new DefaultMessagingService(manager);
TestMessageHandlerFactory factory = new TestMessageHandlerFactory();
svc.registerMessageHandlerFactory(factory.getMessageType(), factory);
Criteria recipientCriteria = new Criteria();
recipientCriteria.setInstanceName("localhost_12919");
recipientCriteria.setRecipientInstanceType(InstanceType.PARTICIPANT);
recipientCriteria.setSelfExcluded(true);
Message template = new Message(factory.getMessageType(), UUID.randomUUID().toString());
AssertJUnit.assertEquals(0, svc.send(recipientCriteria, template));
recipientCriteria.setSelfExcluded(false);
AssertJUnit.assertEquals(1, svc.send(recipientCriteria, template));
// all instances, all partitions
recipientCriteria.setSelfExcluded(false);
recipientCriteria.setInstanceName("%");
recipientCriteria.setResource("DB");
recipientCriteria.setPartition("%");
AssertJUnit.assertEquals(200, svc.send(recipientCriteria, template));
// all instances, all partitions, use * instead of %
recipientCriteria.setSelfExcluded(false);
recipientCriteria.setInstanceName("*");
recipientCriteria.setResource("DB");
recipientCriteria.setPartition("*");
AssertJUnit.assertEquals(200, svc.send(recipientCriteria, template));
// tail pattern
recipientCriteria.setSelfExcluded(false);
recipientCriteria.setInstanceName("localhost%");
recipientCriteria.setResource("DB");
recipientCriteria.setPartition("%");
AssertJUnit.assertEquals(200, svc.send(recipientCriteria, template));
// exclude this instance, send to all others for all partitions
recipientCriteria.setSelfExcluded(true);
recipientCriteria.setInstanceName("%");
recipientCriteria.setResource("DB");
recipientCriteria.setPartition("%");
AssertJUnit.assertEquals(159, svc.send(recipientCriteria, template));
// single instance, all partitions
recipientCriteria.setSelfExcluded(true);
recipientCriteria.setInstanceName("localhost_12920");
recipientCriteria.setResource("DB");
recipientCriteria.setPartition("%");
AssertJUnit.assertEquals(39, svc.send(recipientCriteria, template));
// single character wildcards
recipientCriteria.setSelfExcluded(true);
recipientCriteria.setInstanceName("l_calhost_12_20");
recipientCriteria.setResource("DB");
recipientCriteria.setPartition("%");
AssertJUnit.assertEquals(39, svc.send(recipientCriteria, template));
// head pattern
recipientCriteria.setSelfExcluded(true);
recipientCriteria.setInstanceName("%12920");
recipientCriteria.setResource("DB");
recipientCriteria.setPartition("%");
AssertJUnit.assertEquals(39, svc.send(recipientCriteria, template));
// middle pattern
recipientCriteria.setSelfExcluded(true);
recipientCriteria.setInstanceName("l%_12920");
recipientCriteria.setResource("DB");
recipientCriteria.setPartition("%");
AssertJUnit.assertEquals(39, svc.send(recipientCriteria, template));
// send to a controller
recipientCriteria.setSelfExcluded(true);
recipientCriteria.setInstanceName("localhost_12920");
recipientCriteria.setRecipientInstanceType(InstanceType.CONTROLLER);
recipientCriteria.setResource("DB");
recipientCriteria.setPartition("%");
AssertJUnit.assertEquals(1, svc.send(recipientCriteria, template));
}
@Test public void testMultipleMessageTypeRegisteration() {
HelixManager manager = new MockManager();
MockDefaultMessagingService svc = new MockDefaultMessagingService(manager);
TestStateTransitionHandlerFactory factory = new TestStateTransitionHandlerFactory();
svc.registerMessageHandlerFactory(factory.getMessageTypes(), factory);
Assert.assertTrue(
svc.getMessageHandlerFactoryMap().containsKey(Message.MessageType.STATE_TRANSITION.name()));
Assert.assertTrue(svc.getMessageHandlerFactoryMap()
.containsKey(Message.MessageType.STATE_TRANSITION_CANCELLATION.name()));
Assert.assertTrue(
svc.getMessageHandlerFactoryMap().containsKey(Message.MessageType.CONTROLLER_MSG.name()));
}
@Test
public void testTaskThreadpoolResetTimeoutProperty() {
HelixManager manager = new MockManager();
System.setProperty(SystemPropertyKeys.TASK_THREADPOOL_RESET_TIMEOUT, "300");
MockDefaultMessagingService svc = new MockDefaultMessagingService(manager);
Assert.assertEquals(svc.getTaskThreadpoolResetTimeout(), 300);
System.clearProperty(SystemPropertyKeys.TASK_THREADPOOL_RESET_TIMEOUT);
svc = new MockDefaultMessagingService(new MockManager());
Assert.assertEquals(svc.getTaskThreadpoolResetTimeout(), TaskExecutor.DEFAULT_MSG_HANDLER_RESET_TIMEOUT_MS);
}
}
| 9,889 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging/handling/TestResourceThreadpoolSize.java | package org.apache.helix.messaging.handling;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadPoolExecutor;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.TestHelper;
import org.apache.helix.integration.common.ZkStandAloneCMTestBase;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.integration.task.WorkflowGenerator;
import org.apache.helix.messaging.DefaultMessagingService;
import org.apache.helix.mock.participant.DummyProcess;
import org.apache.helix.model.HelixConfigScope;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.Message.MessageType;
import org.apache.helix.model.builder.FullAutoModeISBuilder;
import org.apache.helix.model.builder.HelixConfigScopeBuilder;
import org.apache.helix.tools.ClusterVerifiers.BestPossibleExternalViewVerifier;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestResourceThreadpoolSize extends ZkStandAloneCMTestBase {
public static final String TEST_FACTORY = "TestFactory";
public static final String ONLINE_OFFLINE = "OnlineOffline";
public static final String OFFLINE_TO_SLAVE = "OFFLINE.SLAVE";
public static final String SLAVE_TO_MASTER = "SLAVE.MASTER";
@Test
public void TestThreadPoolSizeConfig() {
String resourceName = "NextDB";
int numPartition = 64;
int numReplica = 3;
int threadPoolSize = 12;
setResourceThreadPoolSize(resourceName, threadPoolSize);
_gSetupTool.addResourceToCluster(CLUSTER_NAME, resourceName, numPartition, STATE_MODEL);
_gSetupTool.rebalanceStorageCluster(CLUSTER_NAME, resourceName, numReplica);
Assert.assertTrue(_clusterVerifier.verifyByPolling());
long taskcount = 0;
for (int i = 0; i < NODE_NR; i++) {
DefaultMessagingService svc =
(DefaultMessagingService) (_participants[i].getMessagingService());
HelixTaskExecutor helixExecutor = svc.getExecutor();
ThreadPoolExecutor executor =
(ThreadPoolExecutor) (helixExecutor._executorMap.get(MessageType.STATE_TRANSITION + "."
+ resourceName));
Assert.assertNotNull(executor);
Assert.assertEquals(threadPoolSize, executor.getMaximumPoolSize());
taskcount += executor.getCompletedTaskCount();
Assert.assertTrue(executor.getCompletedTaskCount() > 0);
}
// (numPartition * numReplica) O->S, numPartition S->M
// Plus possible racing condition: when preference list is [n1, n2, n3],
// but n2 or n3 becomes Slave before n1 and captured by controller, i.e. [n1:O, n2:S, n3:O],
// controller will set n2 to Master first and then change it back to n1
Assert.assertTrue(taskcount >= numPartition * (numReplica + 1));
}
@Test (dependsOnMethods = "TestThreadPoolSizeConfig")
public void TestCustomizedResourceThreadPool() {
int customizedPoolSize = 7;
int configuredPoolSize = 9;
for (MockParticipantManager participant : _participants) {
participant.getStateMachineEngine().registerStateModelFactory(ONLINE_OFFLINE,
new TestOnlineOfflineStateModelFactory(customizedPoolSize, 0), TEST_FACTORY);
}
// add db with default thread pool
_gSetupTool.addResourceToCluster(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB + "1", 64,
STATE_MODEL);
_gSetupTool.rebalanceStorageCluster(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB + "1", 3);
// add db with customized thread pool
IdealState idealState = new FullAutoModeISBuilder(WorkflowGenerator.DEFAULT_TGT_DB + "2")
.setStateModel(ONLINE_OFFLINE).setStateModelFactoryName(TEST_FACTORY).setNumPartitions(10)
.setNumReplica(1).build();
_gSetupTool.getClusterManagementTool()
.addResource(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB + "2", idealState);
_gSetupTool.rebalanceStorageCluster(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB + "2", 1);
// add db with configured pool size
idealState = new FullAutoModeISBuilder(WorkflowGenerator.DEFAULT_TGT_DB + "3")
.setStateModel(ONLINE_OFFLINE).setStateModelFactoryName(TEST_FACTORY).setNumPartitions(10)
.setNumReplica(1).build();
_gSetupTool.getClusterManagementTool()
.addResource(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB + "3", idealState);
setResourceThreadPoolSize(WorkflowGenerator.DEFAULT_TGT_DB + "3", configuredPoolSize);
_gSetupTool.rebalanceStorageCluster(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB + "3", 1);
Assert.assertTrue(_clusterVerifier.verifyByPolling());
for (int i = 0; i < NODE_NR; i++) {
DefaultMessagingService svc =
(DefaultMessagingService) (_participants[i].getMessagingService());
HelixTaskExecutor helixExecutor = svc.getExecutor();
ThreadPoolExecutor executor = (ThreadPoolExecutor) (helixExecutor._executorMap
.get(MessageType.STATE_TRANSITION + "." + WorkflowGenerator.DEFAULT_TGT_DB + "1"));
Assert.assertNull(executor);
executor = (ThreadPoolExecutor) (helixExecutor._executorMap
.get(MessageType.STATE_TRANSITION + "." + WorkflowGenerator.DEFAULT_TGT_DB + "2"));
Assert.assertNotNull(executor);
Assert.assertEquals(customizedPoolSize, executor.getMaximumPoolSize());
executor = (ThreadPoolExecutor) (helixExecutor._executorMap
.get(MessageType.STATE_TRANSITION + "." + WorkflowGenerator.DEFAULT_TGT_DB + "3"));
Assert.assertNotNull(executor);
Assert.assertEquals(configuredPoolSize, executor.getMaximumPoolSize());
}
}
@Test (dependsOnMethods = "TestCustomizedResourceThreadPool")
public void TestPerStateTransitionTypeThreadPool() throws InterruptedException {
String MASTER_SLAVE = "MasterSlave";
int customizedPoolSize = 22;
for (MockParticipantManager participant : _participants) {
participant.getStateMachineEngine().registerStateModelFactory(MASTER_SLAVE,
new TestMasterSlaveStateModelFactory(customizedPoolSize), TEST_FACTORY);
}
// add db with customized thread pool
IdealState idealState = new FullAutoModeISBuilder(WorkflowGenerator.DEFAULT_TGT_DB + "4")
.setStateModel(MASTER_SLAVE).setStateModelFactoryName(TEST_FACTORY).setNumPartitions(10)
.setNumReplica(1).build();
_gSetupTool.getClusterManagementTool()
.addResource(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB + "4", idealState);
_gSetupTool.rebalanceStorageCluster(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB + "4", 1);
Assert.assertTrue(_clusterVerifier.verifyByPolling());
// Verify OFFLINE -> SLAVE and SLAVE -> MASTER have different threadpool size.
for (int i = 0; i < NODE_NR; i++) {
DefaultMessagingService svc =
(DefaultMessagingService) (_participants[i].getMessagingService());
HelixTaskExecutor helixExecutor = svc.getExecutor();
ThreadPoolExecutor executorOfflineToSlave = (ThreadPoolExecutor) (helixExecutor._executorMap
.get(MessageType.STATE_TRANSITION + "." + WorkflowGenerator.DEFAULT_TGT_DB + "4" + "."
+ OFFLINE_TO_SLAVE));
Assert.assertNotNull(executorOfflineToSlave);
Assert.assertEquals(customizedPoolSize, executorOfflineToSlave.getMaximumPoolSize());
ThreadPoolExecutor executorSlaveToMaster = (ThreadPoolExecutor) (helixExecutor._executorMap
.get(MessageType.STATE_TRANSITION + "." + WorkflowGenerator.DEFAULT_TGT_DB + "4" + "."
+ SLAVE_TO_MASTER));
Assert.assertNotNull(executorSlaveToMaster);
Assert.assertEquals(customizedPoolSize + 5, executorSlaveToMaster.getMaximumPoolSize());
}
}
@Test (dependsOnMethods = "TestPerStateTransitionTypeThreadPool")
public void testBatchMessageThreadPoolSize() throws InterruptedException {
int customizedPoolSize = 5;
_participants[0].getStateMachineEngine().registerStateModelFactory("OnlineOffline",
new TestOnlineOfflineStateModelFactory(customizedPoolSize, 2000), "TestFactory");
for (int i = 1; i < _participants.length; i++) {
_participants[i].syncStop();
}
Assert.assertTrue(_clusterVerifier.verifyByPolling());
// Add 10 dbs with batch message enabled. Each db has 10 partitions.
// So it will have 10 batch messages and each batch message has 10 sub messages.
int numberOfDbs = 10;
for (int i = 0; i < numberOfDbs; i++) {
String dbName = "TestDBABatch" + i;
IdealState idealState = new FullAutoModeISBuilder(dbName).setStateModel("OnlineOffline")
.setStateModelFactoryName("TestFactory").setNumPartitions(10).setNumReplica(1).build();
idealState.setBatchMessageMode(true);
_gSetupTool.getClusterManagementTool().addResource(CLUSTER_NAME, dbName, idealState);
_gSetupTool.rebalanceStorageCluster(CLUSTER_NAME, dbName, 1);
}
Assert.assertTrue(_clusterVerifier.verifyByPolling());
DefaultMessagingService svc =
(DefaultMessagingService) (_participants[0].getMessagingService());
HelixTaskExecutor helixExecutor = svc.getExecutor();
ThreadPoolExecutor executor = (ThreadPoolExecutor) (helixExecutor._batchMessageExecutorService);
Assert.assertNotNull(executor);
// This ASSERT invariant is not true.
// _batchMessageExecutorService is created as newCachedThreadPool().
// which will re-use existing threads if they are available.
// So there is no gurantee that new threads will be created for each new database
// Assert.assertTrue(executor.getPoolSize() >= numberOfDbs);
BestPossibleExternalViewVerifier verifier =
new BestPossibleExternalViewVerifier.Builder(CLUSTER_NAME)
.setZkClient(_gZkClient)
.setWaitTillVerify(TestHelper.DEFAULT_REBALANCE_PROCESSING_WAIT_TIME)
.build();
Assert.assertTrue(verifier.verifyByPolling());
}
private void setResourceThreadPoolSize(String resourceName, int threadPoolSize) {
HelixManager manager = _participants[0];
ConfigAccessor accessor = manager.getConfigAccessor();
HelixConfigScope scope =
new HelixConfigScopeBuilder(HelixConfigScope.ConfigScopeProperty.RESOURCE)
.forCluster(manager.getClusterName()).forResource(resourceName).build();
accessor.set(scope, HelixTaskExecutor.MAX_THREADS, "" + threadPoolSize);
}
public static class TestOnlineOfflineStateModelFactory
extends DummyProcess.DummyOnlineOfflineStateModelFactory {
int _threadPoolSize;
ExecutorService _threadPoolExecutor;
public TestOnlineOfflineStateModelFactory(int threadPoolSize, int delay) {
super(0);
if (threadPoolSize > 0) {
_threadPoolExecutor = Executors.newFixedThreadPool(threadPoolSize);
}
}
@Override public ExecutorService getExecutorService(String resourceName) {
return _threadPoolExecutor;
}
}
public static class TestMasterSlaveStateModelFactory
extends DummyProcess.DummyMasterSlaveStateModelFactory {
int _startThreadPoolSize;
Map<String, ExecutorService> _threadPoolExecutorMap;
public TestMasterSlaveStateModelFactory(int startThreadPoolSize) {
super(0);
_startThreadPoolSize = startThreadPoolSize;
_threadPoolExecutorMap = new HashMap<String, ExecutorService>();
if (_startThreadPoolSize > 0) {
_threadPoolExecutorMap
.put(OFFLINE_TO_SLAVE, Executors.newFixedThreadPool(_startThreadPoolSize));
_threadPoolExecutorMap
.put(SLAVE_TO_MASTER, Executors.newFixedThreadPool(_startThreadPoolSize + 5));
}
}
@Override
public ExecutorService getExecutorService(String resourceName, String fromState,
String toState) {
return _threadPoolExecutorMap.get(fromState + "." + toState);
}
}
}
| 9,890 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging/handling/TestHelixTaskExecutor.java | package org.apache.helix.messaging.handling;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.ExecutorService;
import com.google.common.collect.ImmutableList;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.helix.HelixConstants;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixDefinedState;
import org.apache.helix.HelixException;
import org.apache.helix.HelixManager;
import org.apache.helix.InstanceType;
import org.apache.helix.MockAccessor;
import org.apache.helix.NotificationContext;
import org.apache.helix.PropertyKey;
import org.apache.helix.TestHelper;
import org.apache.helix.examples.OnlineOfflineStateModelFactory;
import org.apache.helix.manager.zk.ZKHelixManager;
import org.apache.helix.mock.MockClusterMessagingService;
import org.apache.helix.mock.MockManager;
import org.apache.helix.mock.statemodel.MockMasterSlaveStateModel;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.Message;
import org.apache.helix.model.Message.MessageState;
import org.apache.helix.participant.HelixStateMachineEngine;
import org.apache.helix.participant.StateMachineEngine;
import org.apache.helix.participant.statemachine.StateModel;
import org.apache.helix.participant.statemachine.StateModelFactory;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.AssertJUnit;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import static org.mockito.Mockito.*;
public class TestHelixTaskExecutor {
@BeforeClass
public void beforeClass() {
System.out.println("START " + TestHelper.getTestClassName());
}
@AfterClass
public void afterClass() {
System.out.println("End " + TestHelper.getTestClassName());
}
public static class MockClusterManager extends MockManager {
@Override
public String getSessionId() {
return "123";
}
}
class TestMessageHandlerFactory implements MultiTypeMessageHandlerFactory {
final int _messageDelay;
int _handlersCreated = 0;
ConcurrentHashMap<String, String> _processedMsgIds = new ConcurrentHashMap<>();
ConcurrentSkipListSet<String> _completedMsgIds = new ConcurrentSkipListSet<>();
TestMessageHandlerFactory(int messageDelay) {
_messageDelay = messageDelay;
}
TestMessageHandlerFactory() {
_messageDelay = 100;
}
class TestMessageHandler extends MessageHandler {
public TestMessageHandler(Message message, NotificationContext context) {
super(message, context);
}
@Override
public HelixTaskResult handleMessage() throws InterruptedException {
HelixTaskResult result = new HelixTaskResult();
_processedMsgIds.put(_message.getMsgId(), _message.getMsgId());
Thread.sleep(_messageDelay);
result.setSuccess(true);
_completedMsgIds.add(_message.getMsgId());
return result;
}
@Override
public void onError(Exception e, ErrorCode code, ErrorType type) {
}
}
@Override
public MessageHandler createHandler(Message message, NotificationContext context) {
if (message.getMsgSubType() != null && message.getMsgSubType().equals("EXCEPTION")) {
throw new HelixException("Test Message handler exception, can ignore");
}
_handlersCreated++;
return new TestMessageHandler(message, context);
}
@Override
public List<String> getMessageTypes() {
return Collections.singletonList("TestingMessageHandler");
}
@Override
public void reset() {
}
}
class TestMessageHandlerFactory2 extends TestMessageHandlerFactory {
@Override
public List<String> getMessageTypes() {
return ImmutableList.of("TestingMessageHandler2");
}
}
private class TestMessageHandlerFactory3 extends TestMessageHandlerFactory {
private boolean _resetDone = false;
@Override
public List<String> getMessageTypes() {
return ImmutableList.of("msgType1", "msgType2", "msgType3");
}
@Override
public void reset() {
Assert.assertFalse(_resetDone, "reset() should only be triggered once in TestMessageHandlerFactory3");
_resetDone = true;
}
}
class CancellableHandlerFactory implements MultiTypeMessageHandlerFactory {
int _handlersCreated = 0;
ConcurrentHashMap<String, String> _processedMsgIds = new ConcurrentHashMap<String, String>();
ConcurrentHashMap<String, String> _processingMsgIds = new ConcurrentHashMap<String, String>();
ConcurrentHashMap<String, String> _timedOutMsgIds = new ConcurrentHashMap<String, String>();
class CancellableHandler extends MessageHandler {
public CancellableHandler(Message message, NotificationContext context) {
super(message, context);
}
public boolean _interrupted = false;
@Override
public HelixTaskResult handleMessage() throws InterruptedException {
HelixTaskResult result = new HelixTaskResult();
int sleepTimes = 15;
if (_message.getRecord().getSimpleFields().containsKey("Cancelcount")) {
sleepTimes = 10;
}
_processingMsgIds.put(_message.getMsgId(), _message.getMsgId());
try {
for (int i = 0; i < sleepTimes; i++) {
Thread.sleep(100);
}
} catch (InterruptedException e) {
_interrupted = true;
_timedOutMsgIds.put(_message.getMsgId(), "");
result.setInterrupted(true);
if (!_message.getRecord().getSimpleFields().containsKey("Cancelcount")) {
_message.getRecord().setSimpleField("Cancelcount", "1");
} else {
int c = Integer.parseInt(_message.getRecord().getSimpleField("Cancelcount"));
_message.getRecord().setSimpleField("Cancelcount", "" + (c + 1));
}
throw e;
}
_processedMsgIds.put(_message.getMsgId(), _message.getMsgId());
result.setSuccess(true);
return result;
}
@Override
public void onError(Exception e, ErrorCode code, ErrorType type) {
_message.getRecord().setSimpleField("exception", e.getMessage());
}
}
@Override
public MessageHandler createHandler(Message message, NotificationContext context) {
_handlersCreated++;
return new CancellableHandler(message, context);
}
@Override public List<String> getMessageTypes() {
return ImmutableList.of("Cancellable");
}
@Override
public void reset() {
_handlersCreated = 0;
_processedMsgIds.clear();
_processingMsgIds.clear();
_timedOutMsgIds.clear();
}
}
class TestStateTransitionHandlerFactory implements MultiTypeMessageHandlerFactory {
ConcurrentHashMap<String, String> _processedMsgIds = new ConcurrentHashMap<String, String>();
private final String _msgType;
private final long _delay;
public TestStateTransitionHandlerFactory(String msgType) {
this(msgType, -1);
}
public TestStateTransitionHandlerFactory(String msgType, long delay) {
_msgType = msgType;
_delay = delay;
}
class TestStateTransitionMessageHandler extends HelixStateTransitionHandler {
public TestStateTransitionMessageHandler(Message message, NotificationContext context,
CurrentState currentStateDelta) {
super(new StateModelFactory<StateModel>() {
// Empty no-op state model factory is good enough for the test.
}, new StateModel() {
// Empty no-op state model is good enough for the test.
}, message, context, currentStateDelta);
}
@Override
public HelixTaskResult handleMessage() {
HelixTaskResult result = new HelixTaskResult();
_processedMsgIds.put(_message.getMsgId(), _message.getMsgId());
if (_delay > 0) {
System.out.println("Sleeping..." + _delay);
try {
Thread.sleep(_delay);
} catch (Exception e) {
assert (false);
}
}
result.setSuccess(true);
return result;
}
@Override
public StaleMessageValidateResult staleMessageValidator() {
return super.staleMessageValidator();
}
}
@Override
public MessageHandler createHandler(Message message, NotificationContext context) {
CurrentState currentStateDelta = new CurrentState(message.getResourceName());
currentStateDelta.setSessionId(message.getTgtSessionId());
currentStateDelta.setStateModelDefRef(message.getStateModelDef());
currentStateDelta.setStateModelFactoryName(message.getStateModelFactoryName());
currentStateDelta.setBucketSize(message.getBucketSize());
if (!message.getResourceName().equals("testStaledMessageResource")) {
// set the current state same as from state in the message in test testStaledMessage.
currentStateDelta.setState(message.getPartitionName(), "SLAVE");
} else {
// set the current state same as to state in the message in test testStaledMessage.
currentStateDelta.setState(message.getPartitionName(), "MASTER");
}
return new TestStateTransitionMessageHandler(message, context, currentStateDelta);
}
@Override
public List<String> getMessageTypes() {
return ImmutableList.of(_msgType);
}
@Override
public void reset() {
}
}
@Test()
public void testNormalMsgExecution() throws InterruptedException {
System.out.println("START TestCMTaskExecutor.testNormalMsgExecution()");
HelixTaskExecutor executor = new HelixTaskExecutor();
HelixManager manager = new MockClusterManager();
TestMessageHandlerFactory factory = new TestMessageHandlerFactory();
for (String type : factory.getMessageTypes()) {
executor.registerMessageHandlerFactory(type, factory);
}
TestMessageHandlerFactory2 factory2 = new TestMessageHandlerFactory2();
for (String type : factory2.getMessageTypes()) {
executor.registerMessageHandlerFactory(type, factory2);
}
NotificationContext changeContext = new NotificationContext(manager);
List<Message> msgList = new ArrayList<Message>();
int nMsgs1 = 5;
for (int i = 0; i < nMsgs1; i++) {
Message msg = new Message(factory.getMessageTypes().get(0), UUID.randomUUID().toString());
msg.setTgtSessionId(manager.getSessionId());
msg.setTgtName("Localhost_1123");
msg.setSrcName("127.101.1.23_2234");
msg.setCorrelationId(UUID.randomUUID().toString());
msgList.add(msg);
}
int nMsgs2 = 6;
for (int i = 0; i < nMsgs2; i++) {
Message msg = new Message(factory2.getMessageTypes().get(0), UUID.randomUUID().toString());
msg.setTgtSessionId(manager.getSessionId());
msg.setTgtName("Localhost_1123");
msg.setSrcName("127.101.1.23_2234");
msg.setCorrelationId(UUID.randomUUID().toString());
msgList.add(msg);
}
changeContext.setChangeType(HelixConstants.ChangeType.MESSAGE);
executor.onMessage("someInstance", msgList, changeContext);
Thread.sleep(1000);
AssertJUnit.assertTrue(factory._processedMsgIds.size() == nMsgs1);
AssertJUnit.assertTrue(factory2._processedMsgIds.size() == nMsgs2);
AssertJUnit.assertTrue(factory._handlersCreated == nMsgs1);
AssertJUnit.assertTrue(factory2._handlersCreated == nMsgs2);
for (Message record : msgList) {
AssertJUnit.assertTrue(factory._processedMsgIds.containsKey(record.getId())
|| factory2._processedMsgIds.containsKey(record.getId()));
AssertJUnit.assertFalse(factory._processedMsgIds.containsKey(record.getId())
&& factory2._processedMsgIds.containsKey(record.getId()));
}
System.out.println("END TestCMTaskExecutor.testNormalMsgExecution()");
}
@Test()
public void testDuplicatedMessage() throws InterruptedException {
System.out.println("START TestHelixTaskExecutor.testDuplicatedMessage()");
HelixTaskExecutor executor = new HelixTaskExecutor();
HelixManager manager = new MockClusterManager();
HelixDataAccessor dataAccessor = manager.getHelixDataAccessor();
PropertyKey.Builder keyBuilder = dataAccessor.keyBuilder();
TestStateTransitionHandlerFactory stateTransitionFactory =
new TestStateTransitionHandlerFactory(Message.MessageType.STATE_TRANSITION.name(), 1000);
executor.registerMessageHandlerFactory(Message.MessageType.STATE_TRANSITION.name(),
stateTransitionFactory);
NotificationContext changeContext = new NotificationContext(manager);
List<Message> msgList = new ArrayList<Message>();
int nMsgs = 3;
String instanceName = manager.getInstanceName();
for (int i = 0; i < nMsgs; i++) {
Message msg =
new Message(Message.MessageType.STATE_TRANSITION.name(), UUID.randomUUID().toString());
msg.setTgtSessionId(manager.getSessionId());
msg.setCreateTimeStamp((long) i);
msg.setTgtName("Localhost_1123");
msg.setSrcName("127.101.1.23_2234");
msg.setPartitionName("Partition");
msg.setResourceName("Resource");
msg.setStateModelDef("DummyMasterSlave");
msg.setFromState("SLAVE");
msg.setToState("MASTER");
dataAccessor.setProperty(msg.getKey(keyBuilder, instanceName), msg);
msgList.add(msg);
}
AssertJUnit
.assertEquals(dataAccessor.getChildValues(keyBuilder.messages(instanceName), true).size(),
nMsgs);
changeContext.setChangeType(HelixConstants.ChangeType.MESSAGE);
executor.onMessage(instanceName, msgList, changeContext);
Thread.sleep(200);
// only 1 message is left over - state transition takes 1sec
Assert.assertEquals(dataAccessor.getChildValues(keyBuilder.messages(instanceName), true).size(),
1);
// While a state transition message is going on, another state transition message for same
// resource / partition comes in, it should be discarded by message handler
// Mock accessor is modifying message state in memory so we set it back to NEW
msgList.get(2).setMsgState(MessageState.NEW);
dataAccessor.setProperty(msgList.get(2).getKey(keyBuilder, instanceName), msgList.get(2));
executor.onMessage(instanceName, Arrays.asList(msgList.get(2)), changeContext);
Thread.sleep(200);
Assert.assertEquals(dataAccessor.getChildValues(keyBuilder.messages(instanceName), true).size(),
1);
Thread.sleep(1000);
Assert.assertEquals(dataAccessor.getChildValues(keyBuilder.messages(instanceName), true).size(),
0);
System.out.println("END TestHelixTaskExecutor.testDuplicatedMessage()");
}
@Test()
public void testStaledMessage() throws InterruptedException {
System.out.println("START TestHelixTaskExecutor.testStaledMessage()");
HelixTaskExecutor executor = new HelixTaskExecutor();
HelixManager manager = new MockClusterManager();
HelixDataAccessor dataAccessor = manager.getHelixDataAccessor();
PropertyKey.Builder keyBuilder = dataAccessor.keyBuilder();
TestStateTransitionHandlerFactory stateTransitionFactory =
new TestStateTransitionHandlerFactory(Message.MessageType.STATE_TRANSITION.name(), 1000);
executor.registerMessageHandlerFactory(Message.MessageType.STATE_TRANSITION.name(),
stateTransitionFactory);
NotificationContext changeContext = new NotificationContext(manager);
List<Message> msgList = new ArrayList<Message>();
int nMsgs = 1;
String instanceName = manager.getInstanceName();
for (int i = 0; i < nMsgs; i++) {
Message msg =
new Message(Message.MessageType.STATE_TRANSITION.name(), UUID.randomUUID().toString());
msg.setTgtSessionId(manager.getSessionId());
msg.setCreateTimeStamp((long) i);
msg.setTgtName("Localhost_1123");
msg.setSrcName("127.101.1.23_2234");
msg.setPartitionName("Partition");
msg.setResourceName("testStaledMessageResource");
msg.setStateModelDef("DummyMasterSlave");
msg.setFromState("SLAVE");
msg.setToState("MASTER");
dataAccessor.setProperty(msg.getKey(keyBuilder, instanceName), msg);
msgList.add(msg);
}
Assert.assertEquals(dataAccessor.getChildValues(keyBuilder.messages(instanceName), true).size(),
nMsgs);
changeContext.setChangeType(HelixConstants.ChangeType.MESSAGE);
executor.onMessage(instanceName, msgList, changeContext);
Thread.sleep(200);
// The message should be ignored since toState is the same as current state.
Assert.assertEquals(dataAccessor.getChildValues(keyBuilder.messages(instanceName), true).size(),
0);
System.out.println("END TestHelixTaskExecutor.testStaledMessage()");
}
@Test()
public void testUnknownTypeMsgExecution() throws InterruptedException {
System.out.println("START " + TestHelper.getTestMethodName());
HelixTaskExecutor executor = new HelixTaskExecutor();
HelixManager manager = new MockClusterManager();
TestMessageHandlerFactory factory = new TestMessageHandlerFactory();
for (String type : factory.getMessageTypes()) {
executor.registerMessageHandlerFactory(type, factory);
}
TestMessageHandlerFactory2 factory2 = new TestMessageHandlerFactory2();
NotificationContext changeContext = new NotificationContext(manager);
List<Message> msgList = new ArrayList<Message>();
int nMsgs1 = 5;
for (int i = 0; i < nMsgs1; i++) {
Message msg = new Message(factory.getMessageTypes().get(0), UUID.randomUUID().toString());
msg.setTgtSessionId(manager.getSessionId());
msg.setTgtName("Localhost_1123");
msg.setSrcName("127.101.1.23_2234");
msgList.add(msg);
}
int nMsgs2 = 4;
for (int i = 0; i < nMsgs2; i++) {
Message msg = new Message(factory2.getMessageTypes().get(0), UUID.randomUUID().toString());
msg.setTgtSessionId(manager.getSessionId());
msg.setTgtName("Localhost_1123");
msg.setSrcName("127.101.1.23_2234");
msgList.add(msg);
}
changeContext.setChangeType(HelixConstants.ChangeType.MESSAGE);
executor.onMessage("someInstance", msgList, changeContext);
Thread.sleep(1000);
AssertJUnit.assertTrue(factory._processedMsgIds.size() == nMsgs1);
AssertJUnit.assertTrue(factory2._processedMsgIds.size() == 0);
AssertJUnit.assertTrue(factory._handlersCreated == nMsgs1);
AssertJUnit.assertTrue(factory2._handlersCreated == 0);
for (Message message : msgList) {
if (factory.getMessageTypes().contains(message.getMsgType())) {
AssertJUnit.assertTrue(factory._processedMsgIds.containsKey(message.getId()));
}
}
System.out.println("END " + TestHelper.getTestMethodName());
}
@Test()
public void testMsgSessionId() throws InterruptedException {
System.out.println("START " + TestHelper.getTestMethodName());
HelixTaskExecutor executor = new HelixTaskExecutor();
HelixManager manager = new MockClusterManager();
TestMessageHandlerFactory factory = new TestMessageHandlerFactory();
for (String type : factory.getMessageTypes()) {
executor.registerMessageHandlerFactory(type, factory);
}
TestMessageHandlerFactory2 factory2 = new TestMessageHandlerFactory2();
for (String type : factory2.getMessageTypes()) {
executor.registerMessageHandlerFactory(type, factory2);
}
NotificationContext changeContext = new NotificationContext(manager);
List<Message> msgList = new ArrayList<Message>();
int nMsgs1 = 5;
for (int i = 0; i < nMsgs1; i++) {
Message msg = new Message(factory.getMessageTypes().get(0), UUID.randomUUID().toString());
msg.setTgtSessionId("*");
msg.setTgtName("");
msgList.add(msg);
}
int nMsgs2 = 4;
for (int i = 0; i < nMsgs2; i++) {
Message msg = new Message(factory2.getMessageTypes().get(0), UUID.randomUUID().toString());
msg.setTgtSessionId("some other session id");
msg.setTgtName("");
msgList.add(msg);
}
changeContext.setChangeType(HelixConstants.ChangeType.MESSAGE);
executor.onMessage("someInstance", msgList, changeContext);
Thread.sleep(1000);
AssertJUnit.assertTrue(factory._processedMsgIds.size() == nMsgs1);
AssertJUnit.assertTrue(factory2._processedMsgIds.size() == 0);
AssertJUnit.assertTrue(factory._handlersCreated == nMsgs1);
AssertJUnit.assertTrue(factory2._handlersCreated == 0);
for (Message message : msgList) {
if (factory.getMessageTypes().contains(message.getMsgType())) {
AssertJUnit.assertTrue(factory._processedMsgIds.containsKey(message.getId()));
}
}
System.out.println("END " + TestHelper.getTestMethodName());
}
@Test()
public void testCreateHandlerException() throws Exception {
System.out.println("START TestCMTaskExecutor.testCreateHandlerException()");
HelixTaskExecutor executor = new HelixTaskExecutor();
HelixManager manager = new MockClusterManager();
HelixDataAccessor dataAccessor = manager.getHelixDataAccessor();
PropertyKey.Builder keyBuilder = dataAccessor.keyBuilder();
NotificationContext changeContext = new NotificationContext(manager);
TestMessageHandlerFactory factory = new TestMessageHandlerFactory();
// Sending message without registering the factory.
// The message won't be processed since creating handler returns null.
int nMsgs1 = 5;
List<Message> msgList = new ArrayList<>();
for (int i = 0; i < nMsgs1; i++) {
Message msg = new Message(factory.getMessageTypes().get(0), UUID.randomUUID().toString());
msg.setTgtSessionId(manager.getSessionId());
msg.setTgtName("Localhost_1123");
msg.setSrcName("127.101.1.23_2234");
msg.setCorrelationId(UUID.randomUUID().toString());
dataAccessor.setProperty(keyBuilder.message(manager.getInstanceName(), msg.getMsgId()), msg);
msgList.add(msg);
}
changeContext.setChangeType(HelixConstants.ChangeType.MESSAGE);
executor.onMessage(manager.getInstanceName(), Collections.emptyList(), changeContext);
for (Message message : msgList) {
message = dataAccessor
.getProperty(keyBuilder.message(manager.getInstanceName(), message.getMsgId()));
Assert.assertNotNull(message);
Assert.assertEquals(message.getMsgState(), MessageState.NEW);
Assert.assertEquals(message.getRetryCount(), 0);
}
// Test with a factory that throws Exception on certain message. The invalid message will be
// remain UNPROCESSABLE due to the Exception.
for (String type : factory.getMessageTypes()) {
executor.registerMessageHandlerFactory(type, factory);
}
Message exceptionMsg =
new Message(factory.getMessageTypes().get(0), UUID.randomUUID().toString());
exceptionMsg.setTgtSessionId(manager.getSessionId());
exceptionMsg.setMsgSubType("EXCEPTION");
exceptionMsg.setTgtName("Localhost_1123");
exceptionMsg.setSrcName("127.101.1.23_2234");
exceptionMsg.setCorrelationId(UUID.randomUUID().toString());
dataAccessor.setProperty(keyBuilder.message(manager.getInstanceName(), exceptionMsg.getMsgId()),
exceptionMsg);
changeContext.setChangeType(HelixConstants.ChangeType.MESSAGE);
executor.onMessage(manager.getInstanceName(), Collections.emptyList(), changeContext);
Assert.assertTrue(TestHelper.verify(() -> {
Message tmpExceptionMsg = dataAccessor
.getProperty(keyBuilder.message(manager.getInstanceName(), exceptionMsg.getMsgId()));
if (tmpExceptionMsg == null || !tmpExceptionMsg.getMsgState()
.equals(MessageState.UNPROCESSABLE) || tmpExceptionMsg.getRetryCount() != -1) {
return false;
}
return true;
}, TestHelper.WAIT_DURATION),
"The exception message should be retied once and in UNPROCESSABLE state.");
Assert.assertTrue(TestHelper.verify(() -> {
for (Message message : msgList) {
message = dataAccessor
.getProperty(keyBuilder.message(manager.getInstanceName(), message.getMsgId()));
if (message != null) {
return false;
}
}
return true;
}, TestHelper.WAIT_DURATION), "The normal messages should be all processed normally.");
Assert.assertEquals(factory._processedMsgIds.size(), nMsgs1);
Assert.assertEquals(factory._handlersCreated, nMsgs1);
System.out.println("END TestCMTaskExecutor.testCreateHandlerException()");
}
@Test()
public void testTaskCancellation() throws InterruptedException {
System.out.println("START " + TestHelper.getTestMethodName());
HelixTaskExecutor executor = new HelixTaskExecutor();
HelixManager manager = new MockClusterManager();
CancellableHandlerFactory factory = new CancellableHandlerFactory();
for (String type : factory.getMessageTypes()) {
executor.registerMessageHandlerFactory(type, factory);
}
NotificationContext changeContext = new NotificationContext(manager);
List<Message> msgList = new ArrayList<Message>();
int nMsgs1 = 0;
for (int i = 0; i < nMsgs1; i++) {
Message msg = new Message(factory.getMessageTypes().get(0), UUID.randomUUID().toString());
msg.setTgtSessionId("*");
msg.setTgtName("Localhost_1123");
msg.setSrcName("127.101.1.23_2234");
msgList.add(msg);
}
List<Message> msgListToCancel = new ArrayList<Message>();
int nMsgs2 = 4;
for (int i = 0; i < nMsgs2; i++) {
Message msg = new Message(factory.getMessageTypes().get(0), UUID.randomUUID().toString());
msg.setTgtSessionId("*");
msgList.add(msg);
msg.setTgtName("Localhost_1123");
msg.setSrcName("127.101.1.23_2234");
msgListToCancel.add(msg);
}
changeContext.setChangeType(HelixConstants.ChangeType.MESSAGE);
executor.onMessage("someInstance", msgList, changeContext);
Thread.sleep(500);
for (int i = 0; i < nMsgs2; i++) {
// executor.cancelTask(msgListToCancel.get(i), changeContext);
HelixTask task = new HelixTask(msgListToCancel.get(i), changeContext, null, null);
executor.cancelTask(task);
}
Thread.sleep(1500);
AssertJUnit.assertTrue(factory._processedMsgIds.size() == nMsgs1);
AssertJUnit.assertTrue(factory._handlersCreated == nMsgs1 + nMsgs2);
AssertJUnit.assertTrue(factory._processingMsgIds.size() == nMsgs1 + nMsgs2);
for (Message message : msgList) {
if (factory.getMessageTypes().contains(message.getMsgType())) {
AssertJUnit.assertTrue(factory._processingMsgIds.containsKey(message.getId()));
}
}
System.out.println("END " + TestHelper.getTestMethodName());
}
@Test()
public void testShutdown() throws InterruptedException {
System.out.println("START TestCMTaskExecutor.testShutdown()");
HelixTaskExecutor executor = new HelixTaskExecutor();
HelixManager manager = new MockClusterManager();
TestMessageHandlerFactory factory = new TestMessageHandlerFactory();
for (String type : factory.getMessageTypes()) {
executor.registerMessageHandlerFactory(type, factory);
}
TestMessageHandlerFactory2 factory2 = new TestMessageHandlerFactory2();
for (String type : factory2.getMessageTypes()) {
executor.registerMessageHandlerFactory(type, factory2);
}
CancellableHandlerFactory factory3 = new CancellableHandlerFactory();
for (String type : factory3.getMessageTypes()) {
executor.registerMessageHandlerFactory(type, factory3);
}
int nMsg1 = 10, nMsg2 = 10, nMsg3 = 10;
List<Message> msgList = new ArrayList<Message>();
for (int i = 0; i < nMsg1; i++) {
Message msg = new Message(factory.getMessageTypes().get(0), UUID.randomUUID().toString());
msg.setTgtSessionId("*");
msg.setTgtName("Localhost_1123");
msg.setSrcName("127.101.1.23_2234");
msgList.add(msg);
}
for (int i = 0; i < nMsg2; i++) {
Message msg = new Message(factory2.getMessageTypes().get(0), UUID.randomUUID().toString());
msg.setTgtSessionId("*");
msgList.add(msg);
msg.setTgtName("Localhost_1123");
msg.setSrcName("127.101.1.23_2234");
msgList.add(msg);
}
for (int i = 0; i < nMsg3; i++) {
Message msg = new Message(factory3.getMessageTypes().get(0), UUID.randomUUID().toString());
msg.setTgtSessionId("*");
msgList.add(msg);
msg.setTgtName("Localhost_1123");
msg.setSrcName("127.101.1.23_2234");
msgList.add(msg);
}
NotificationContext changeContext = new NotificationContext(manager);
changeContext.setChangeType(HelixConstants.ChangeType.MESSAGE);
executor.onMessage("some", msgList, changeContext);
Thread.sleep(500);
for (ExecutorService svc : executor._executorMap.values()) {
Assert.assertFalse(svc.isShutdown());
}
Assert.assertTrue(factory._processedMsgIds.size() > 0);
executor.shutdown();
for (ExecutorService svc : executor._executorMap.values()) {
Assert.assertTrue(svc.isShutdown());
}
System.out.println("END TestCMTaskExecutor.testShutdown()");
}
@Test(dependsOnMethods = "testShutdown")
public void testHandlerResetTimeout() throws Exception {
System.out.println("START TestCMTaskExecutor.testHandlerResetTimeout()");
HelixTaskExecutor executor = new HelixTaskExecutor();
HelixManager manager = new MockClusterManager();
int messageDelay = 2 * 1000; // 2 seconds
TestMessageHandlerFactory factory = new TestMessageHandlerFactory(messageDelay);
// Execute a message with short reset timeout
int shortTimeout = 100; // 100 ms
executor.registerMessageHandlerFactory(factory, HelixTaskExecutor.DEFAULT_PARALLEL_TASKS, shortTimeout);
final Message msg = new Message(factory.getMessageTypes().get(0), UUID.randomUUID().toString());
msg.setTgtSessionId("*");
msg.setTgtName("Localhost_1123");
msg.setSrcName("127.101.1.23_2234");
NotificationContext changeContext = new NotificationContext(manager);
changeContext.setChangeType(HelixConstants.ChangeType.MESSAGE);
executor.onMessage("some", Arrays.asList(msg), changeContext);
Assert.assertTrue(
TestHelper.verify(() -> factory._processedMsgIds.containsKey(msg.getMsgId()), TestHelper.WAIT_DURATION));
executor.shutdown();
for (ExecutorService svc : executor._executorMap.values()) {
Assert.assertTrue(svc.isShutdown());
}
Assert.assertEquals(factory._completedMsgIds.size(), 0);
// Execute a message with proper reset timeout, so it will wait enough time until the message is processed.
executor = new HelixTaskExecutor();
int longTimeout = messageDelay * 2; // 4 seconds
executor.registerMessageHandlerFactory(factory, HelixTaskExecutor.DEFAULT_PARALLEL_TASKS, longTimeout);
final Message msg2 = new Message(factory.getMessageTypes().get(0), UUID.randomUUID().toString());
msg2.setTgtSessionId("*");
msg2.setTgtName("Localhost_1123");
msg2.setSrcName("127.101.1.23_2234");
executor.onMessage("some", Arrays.asList(msg2), changeContext);
Assert.assertTrue(
TestHelper.verify(() -> factory._processedMsgIds.containsKey(msg2.getMsgId()), TestHelper.WAIT_DURATION));
executor.shutdown();
for (ExecutorService svc : executor._executorMap.values()) {
Assert.assertTrue(svc.isShutdown());
}
Assert.assertEquals(factory._completedMsgIds.size(), 1);
Assert.assertTrue(factory._completedMsgIds.contains(msg2.getMsgId()));
System.out.println("END TestCMTaskExecutor.testHandlerResetTimeout()");
}
@Test
public void testMsgHandlerRegistryAndShutdown() {
HelixTaskExecutor executor = new HelixTaskExecutor();
HelixManager manager = new MockClusterManager();
TestMessageHandlerFactory factory = new TestMessageHandlerFactory();
TestMessageHandlerFactory3 factoryMulti = new TestMessageHandlerFactory3();
executor.registerMessageHandlerFactory(factory, HelixTaskExecutor.DEFAULT_PARALLEL_TASKS, 200);
executor.registerMessageHandlerFactory(factoryMulti, HelixTaskExecutor.DEFAULT_PARALLEL_TASKS, 200);
final Message msg = new Message(factory.getMessageTypes().get(0), UUID.randomUUID().toString());
msg.setTgtSessionId("*");
msg.setTgtName("Localhost_1123");
msg.setSrcName("127.101.1.23_2234");
NotificationContext changeContext = new NotificationContext(manager);
changeContext.setChangeType(HelixConstants.ChangeType.MESSAGE);
executor.onMessage("some", Collections.singletonList(msg), changeContext);
Assert.assertEquals(executor._hdlrFtyRegistry.size(), 4);
// Ensure TestMessageHandlerFactory3 instance is reset and reset exactly once
executor.shutdown();
Assert.assertTrue(factoryMulti._resetDone, "TestMessageHandlerFactory3 should be reset");
}
@Test()
public void testNoRetry() throws InterruptedException {
System.out.println("START " + TestHelper.getTestMethodName());
HelixTaskExecutor executor = new HelixTaskExecutor();
HelixManager manager = new MockClusterManager();
CancellableHandlerFactory factory = new CancellableHandlerFactory();
for (String type : factory.getMessageTypes()) {
executor.registerMessageHandlerFactory(type, factory);
}
NotificationContext changeContext = new NotificationContext(manager);
List<Message> msgList = new ArrayList<Message>();
int nMsgs2 = 4;
// Test the case in which retry = 0
for (int i = 0; i < nMsgs2; i++) {
Message msg = new Message(factory.getMessageTypes().get(0), UUID.randomUUID().toString());
msg.setTgtSessionId("*");
msg.setTgtName("Localhost_1123");
msg.setSrcName("127.101.1.23_2234");
msg.setExecutionTimeout((i + 1) * 600);
msgList.add(msg);
}
changeContext.setChangeType(HelixConstants.ChangeType.MESSAGE);
executor.onMessage("someInstance", msgList, changeContext);
Thread.sleep(4000);
AssertJUnit.assertTrue(factory._handlersCreated == nMsgs2);
AssertJUnit.assertEquals(factory._timedOutMsgIds.size(), 2);
// AssertJUnit.assertFalse(msgList.get(0).getRecord().getSimpleFields().containsKey("TimeOut"));
for (int i = 0; i < nMsgs2 - 2; i++) {
if (factory.getMessageTypes().contains(msgList.get(i).getMsgType())) {
AssertJUnit.assertTrue(msgList.get(i).getRecord().getSimpleFields()
.containsKey("Cancelcount"));
AssertJUnit.assertTrue(factory._timedOutMsgIds.containsKey(msgList.get(i).getId()));
}
}
System.out.println("END " + TestHelper.getTestMethodName());
}
@Test()
public void testRetryOnce() throws InterruptedException {
System.out.println("START " + TestHelper.getTestMethodName());
HelixTaskExecutor executor = new HelixTaskExecutor();
HelixManager manager = new MockClusterManager();
CancellableHandlerFactory factory = new CancellableHandlerFactory();
for (String type : factory.getMessageTypes()) {
executor.registerMessageHandlerFactory(type, factory);
}
NotificationContext changeContext = new NotificationContext(manager);
List<Message> msgList = new ArrayList<Message>();
// Test the case that the message are executed for the second time
int nMsgs2 = 4;
for (int i = 0; i < nMsgs2; i++) {
Message msg = new Message(factory.getMessageTypes().get(0), UUID.randomUUID().toString());
msg.setTgtSessionId("*");
msg.setTgtName("Localhost_1123");
msg.setSrcName("127.101.1.23_2234");
msg.setExecutionTimeout((i + 1) * 600);
msg.setRetryCount(1);
msgList.add(msg);
}
changeContext.setChangeType(HelixConstants.ChangeType.MESSAGE);
executor.onMessage("someInstance", msgList, changeContext);
Thread.sleep(3500);
AssertJUnit.assertEquals(factory._processedMsgIds.size(), 3);
AssertJUnit.assertTrue(msgList.get(0).getRecord().getSimpleField("Cancelcount").equals("2"));
AssertJUnit.assertTrue(msgList.get(1).getRecord().getSimpleField("Cancelcount").equals("1"));
AssertJUnit.assertEquals(factory._timedOutMsgIds.size(), 2);
AssertJUnit.assertTrue(executor._taskMap.size() == 0);
System.out.println("END " + TestHelper.getTestMethodName());
}
@Test
public void testStateTransitionCancellationMsg() throws InterruptedException {
System.out.println("START " + TestHelper.getTestMethodName());
HelixTaskExecutor executor = new HelixTaskExecutor();
HelixManager manager = new MockClusterManager();
TestStateTransitionHandlerFactory stateTransitionFactory = new TestStateTransitionHandlerFactory(Message.MessageType.STATE_TRANSITION.name());
TestStateTransitionHandlerFactory cancelFactory = new TestStateTransitionHandlerFactory(Message.MessageType.STATE_TRANSITION_CANCELLATION
.name());
executor.registerMessageHandlerFactory(Message.MessageType.STATE_TRANSITION.name(), stateTransitionFactory);
executor.registerMessageHandlerFactory(Message.MessageType.STATE_TRANSITION_CANCELLATION.name(), cancelFactory);
NotificationContext changeContext = new NotificationContext(manager);
List<Message> msgList = new ArrayList<Message>();
Message msg1 = new Message(Message.MessageType.STATE_TRANSITION, UUID.randomUUID().toString());
msg1.setTgtSessionId("*");
msg1.setPartitionName("P1");
msg1.setResourceName("R1");
msg1.setTgtName("Localhost_1123");
msg1.setSrcName("127.101.1.23_2234");
msg1.setFromState("SLAVE");
msg1.setToState("MASTER");
msgList.add(msg1);
Message msg2 = new Message(Message.MessageType.STATE_TRANSITION_CANCELLATION, UUID.randomUUID().toString());
msg2.setTgtSessionId("*");
msg2.setPartitionName("P1");
msg2.setResourceName("R1");
msg2.setTgtName("Localhost_1123");
msg2.setSrcName("127.101.1.23_2234");
msg2.setFromState("SLAVE");
msg2.setToState("MASTER");
msgList.add(msg2);
changeContext.setChangeType(HelixConstants.ChangeType.MESSAGE);
executor.onMessage("someInstance", msgList, changeContext);
Thread.sleep(3000);
AssertJUnit.assertEquals(cancelFactory._processedMsgIds.size(), 0);
AssertJUnit.assertEquals(stateTransitionFactory._processedMsgIds.size(), 0);
System.out.println("END " + TestHelper.getTestMethodName());
}
@Test
public void testMessageReadOptimization() throws InterruptedException {
System.out.println("START " + TestHelper.getTestMethodName());
HelixTaskExecutor executor = new HelixTaskExecutor();
HelixManager manager = new MockClusterManager();
TestMessageHandlerFactory factory = new TestMessageHandlerFactory();
for (String type : factory.getMessageTypes()) {
executor.registerMessageHandlerFactory(type, factory);
}
HelixDataAccessor accessor = manager.getHelixDataAccessor();
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
List<String> messageIds = new ArrayList<>();
int nMsgs1 = 5;
for (int i = 0; i < nMsgs1; i++) {
Message msg = new Message(factory.getMessageTypes().get(0), UUID.randomUUID().toString());
msg.setTgtSessionId(manager.getSessionId());
msg.setTgtName("Localhost_1123");
msg.setSrcName("127.101.1.23_2234");
msg.setCorrelationId(UUID.randomUUID().toString());
accessor.setProperty(keyBuilder.message("someInstance", msg.getId()), msg);
messageIds.add(msg.getId());
}
NotificationContext changeContext = new NotificationContext(manager);
changeContext.setChangeType(HelixConstants.ChangeType.MESSAGE);
// Simulate read message already, then processing message. Should read and handle no message.
executor._knownMessageIds.addAll(messageIds);
executor.onMessage("someInstance", Collections.EMPTY_LIST, changeContext);
Thread.sleep(3000);
AssertJUnit.assertEquals(0, factory._processedMsgIds.size());
executor._knownMessageIds.clear();
// Processing message normally
executor.onMessage("someInstance", Collections.EMPTY_LIST, changeContext);
Thread.sleep(3000);
AssertJUnit.assertEquals(nMsgs1, factory._processedMsgIds.size());
// After all messages are processed, _knownMessageIds should be empty.
Assert.assertTrue(executor._knownMessageIds.isEmpty());
System.out.println("END " + TestHelper.getTestMethodName());
}
@Test
public void testNoWriteReadStateForRemovedMessage()
throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
System.out.println("START " + TestHelper.getTestMethodName());
HelixTaskExecutor executor = new HelixTaskExecutor();
HelixManager manager = new MockClusterManager();
TestMessageHandlerFactory factory = new TestMessageHandlerFactory();
for (String type : factory.getMessageTypes()) {
executor.registerMessageHandlerFactory(type, factory);
}
HelixDataAccessor accessor = manager.getHelixDataAccessor();
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
String instanceName = "someInstance";
List<String> messageIds = new ArrayList<>();
List<Message> messages = new ArrayList<>();
int nMsgs1 = 5;
for (int i = 0; i < nMsgs1; i++) {
Message msg = new Message(factory.getMessageTypes().get(0), UUID.randomUUID().toString());
msg.setTgtSessionId(manager.getSessionId());
msg.setTgtName("Localhost_1123");
msg.setSrcName("127.101.1.23_2234");
msg.setCorrelationId(UUID.randomUUID().toString());
accessor.setProperty(keyBuilder.message(instanceName, msg.getId()), msg);
messageIds.add(msg.getId());
// Set for testing the update operation later
msg.setMsgState(MessageState.READ);
messages.add(msg);
}
Method updateMessageState = HelixTaskExecutor.class
.getDeclaredMethod("updateMessageState", Collection.class, HelixDataAccessor.class,
String.class);
updateMessageState.setAccessible(true);
updateMessageState.invoke(executor, messages, accessor, instanceName);
Assert.assertEquals(accessor.getChildNames(keyBuilder.messages(instanceName)).size(), nMsgs1);
accessor.removeProperty(keyBuilder.message(instanceName, messageIds.get(0)));
System.out.println(accessor.getChildNames(keyBuilder.messages(instanceName)).size());
for (Message message : messages) {
// Mock a change to ensure there will be some delta on the message node after update
message.setCorrelationId(UUID.randomUUID().toString());
}
updateMessageState.invoke(executor, messages, accessor, instanceName);
Assert
.assertEquals(accessor.getChildNames(keyBuilder.messages(instanceName)).size(), nMsgs1 - 1);
System.out.println("END " + TestHelper.getTestMethodName());
}
@Test(dependsOnMethods = "testStateTransitionCancellationMsg")
public void testStateTransitionMsgScheduleFailure() {
System.out.println("START " + TestHelper.getTestMethodName());
// Create a mock executor that fails the task scheduling.
HelixTaskExecutor executor = new HelixTaskExecutor() {
@Override
public boolean scheduleTask(MessageTask task) {
return false;
}
};
HelixManager manager = new MockClusterManager();
HelixDataAccessor dataAccessor = manager.getHelixDataAccessor();
PropertyKey.Builder keyBuilder = dataAccessor.keyBuilder();
TestStateTransitionHandlerFactory stateTransitionFactory =
new TestStateTransitionHandlerFactory(Message.MessageType.STATE_TRANSITION.name());
executor.registerMessageHandlerFactory(Message.MessageType.STATE_TRANSITION.name(),
stateTransitionFactory);
NotificationContext changeContext = new NotificationContext(manager);
Message msg = new Message(Message.MessageType.STATE_TRANSITION, UUID.randomUUID().toString());
msg.setTgtSessionId(manager.getSessionId());
msg.setPartitionName("P1");
msg.setResourceName("R1");
msg.setTgtName("Localhost_1123");
msg.setSrcName("127.101.1.23_2234");
msg.setFromState("SLAVE");
msg.setToState("MASTER");
dataAccessor.setProperty(keyBuilder.message(manager.getInstanceName(), msg.getMsgId()), msg);
changeContext.setChangeType(HelixConstants.ChangeType.MESSAGE);
executor.onMessage(manager.getInstanceName(), Collections.emptyList(), changeContext);
Assert.assertEquals(stateTransitionFactory._processedMsgIds.size(), 0);
// Message should have been removed
Assert.assertNull(
dataAccessor.getProperty(keyBuilder.message(manager.getInstanceName(), msg.getMsgId())));
// Current state would be ERROR due to the failure of task scheduling.
CurrentState currentState = dataAccessor.getProperty(keyBuilder
.currentState(manager.getInstanceName(), manager.getSessionId(), msg.getResourceName()));
Assert.assertNotNull(currentState);
Assert.assertEquals(currentState.getState(msg.getPartitionName()),
HelixDefinedState.ERROR.toString());
System.out.println("END " + TestHelper.getTestMethodName());
}
@Test
public void testUpdateAndFindMessageThreadpool() throws Exception {
// Using ThreadPoolExecutor interface because it allows counting task number
ThreadPoolExecutor executor0 =
new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>());
ThreadPoolExecutor executor1 =
new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>());
ThreadPoolExecutor executor2 =
new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>());
class MockStateModelFactory_ResourceName
extends StateModelFactory<OnlineOfflineStateModelFactory.OnlineOfflineStateModel> {
@Override
public ExecutorService getExecutorService(String resourceName) {
return executor0;
}
}
class MockStateModelFactory_STType
extends StateModelFactory<OnlineOfflineStateModelFactory.OnlineOfflineStateModel> {
@Override
public ExecutorService getExecutorService(String resourceName, String fromState, String toState) {
return executor1;
}
}
class MockStateModelFactory_MsgInfo
extends StateModelFactory<OnlineOfflineStateModelFactory.OnlineOfflineStateModel> {
@Override
public CustomizedExecutorService getExecutorService(Message.MessageInfo msgInfo) {
return new CustomizedExecutorService(Message.MessageInfo.MessageIdentifierBase.PER_REBALANCE_TYPE, executor2);
}
}
System.out.println("START " + TestHelper.getTestMethodName());
String sessionId = UUID.randomUUID().toString();
String resourceName = "testDB";
String msgId = "testMsgId";
String fromState = "Offline";
String toState = "Online";
String stateModelDef = "OnlineOffline";
HelixManager manager = mock(ZKHelixManager.class);
StateMachineEngine engine = mock(HelixStateMachineEngine.class);
when(manager.getStateMachineEngine()).thenReturn(engine);
when(manager.getInstanceType()).thenReturn(InstanceType.PARTICIPANT);
when(manager.getHelixDataAccessor()).thenReturn(new MockAccessor());
when(manager.getSessionId()).thenReturn(sessionId);
when(manager.getInstanceName()).thenReturn("TestInstance");
when(manager.getMessagingService()).thenReturn(new MockClusterMessagingService());
when(manager.getClusterName()).thenReturn(TestHelper.getTestMethodName());
StateModel stateModel = new MockMasterSlaveStateModel();
NotificationContext context = new NotificationContext(manager);
Message message = new Message(Message.MessageType.STATE_TRANSITION, msgId);
message.setFromState(fromState);
message.setToState(toState);
message.setResourceName(resourceName);
message.setSTRebalanceType(Message.STRebalanceType.LOAD_REBALANCE);
message.setStateModelDef(stateModelDef);
message.setPartitionName("TestPartition");
message.setTgtName("TgtInstance");
message.setStateModelFactoryName("DEFAULT");
message.setTgtSessionId(sessionId);
// State transition type based
HelixTaskExecutor executor =
new HelixTaskExecutor();
StateModelFactory<? extends StateModel> factory = new MockStateModelFactory_STType();
Mockito.doReturn(factory)
.when(engine)
.getStateModelFactory(stateModelDef, HelixConstants.DEFAULT_STATE_MODEL_FACTORY);
HelixStateTransitionHandler handler = new HelixStateTransitionHandler(factory, stateModel, message, context, new CurrentState(resourceName));
HelixTask task = new HelixTask(message, context, handler, executor);
executor.scheduleTask(task);
Assert.assertTrue(TestHelper.verify(() -> {
return executor1.getTaskCount() == 1;
}, TestHelper.WAIT_DURATION));
System.out.println(TestHelper.getTestMethodName() + ": State transition based test passed.");
// Resource name based
executor = new HelixTaskExecutor(); // Re-initialize it because if the message exists in _taskMap, it won't be assigned again
factory = new MockStateModelFactory_ResourceName();
Mockito.doReturn(factory)
.when(engine)
.getStateModelFactory(stateModelDef, HelixConstants.DEFAULT_STATE_MODEL_FACTORY);
handler = new HelixStateTransitionHandler(factory, stateModel, message, context, new CurrentState(resourceName));
engine.registerStateModelFactory(stateModelDef, factory);
task = new HelixTask(message, context, handler, executor);
executor.scheduleTask(task);
Assert.assertTrue(TestHelper.verify(() -> {
return executor0.getTaskCount() == 1;
}, TestHelper.WAIT_DURATION));
System.out.println(TestHelper.getTestMethodName() + ": Resource name based test passed.");
// Message Info based
executor = new HelixTaskExecutor();
factory = new MockStateModelFactory_MsgInfo();
handler =
new HelixStateTransitionHandler(factory, stateModel, message, context, new CurrentState(resourceName));
Mockito.doReturn(factory)
.when(engine)
.getStateModelFactory(stateModelDef, HelixConstants.DEFAULT_STATE_MODEL_FACTORY);
task = new HelixTask(message, context, handler, executor);
executor.scheduleTask(task);
Assert.assertTrue(TestHelper.verify(() -> {
return executor2.getTaskCount() == 1;
}, TestHelper.WAIT_DURATION));
System.out.println(TestHelper.getTestMethodName() + ": Message Info based test passed.");
System.out.println("END " + TestHelper.getTestMethodName());
}
}
| 9,891 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging/handling/MockHelixTaskExecutor.java | package org.apache.helix.messaging.handling;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.NotificationContext;
import org.apache.helix.PropertyKey;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.Message;
import org.apache.helix.monitoring.mbeans.MessageQueueMonitor;
import org.apache.helix.monitoring.mbeans.ParticipantStatusMonitor;
public class MockHelixTaskExecutor extends HelixTaskExecutor {
public static int duplicatedMessages = 0;
public static int extraStateTransition = 0;
public static int duplicatedMessagesInProgress = 0;
HelixManager manager;
public MockHelixTaskExecutor(ParticipantStatusMonitor participantStatusMonitor,
MessageQueueMonitor messageQueueMonitor) {
super(participantStatusMonitor, messageQueueMonitor);
}
@Override
public void onMessage(String instanceName, List<Message> messages,
NotificationContext changeContext) {
manager = changeContext.getManager();
checkDuplicatedMessages(messages);
super.onMessage(instanceName, messages, changeContext);
}
void checkDuplicatedMessages(List<Message> messages) {
HelixDataAccessor accessor = manager.getHelixDataAccessor();
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
PropertyKey path = keyBuilder.currentStates(manager.getInstanceName(), manager.getSessionId());
Map<String, CurrentState> currentStateMap = accessor.getChildValuesMap(path, true);
// Also add the task path
PropertyKey taskPath =
keyBuilder.taskCurrentStates(manager.getInstanceName(), manager.getSessionId());
Map<String, CurrentState> taskCurrentStateMap = accessor.getChildValuesMap(taskPath, true);
taskCurrentStateMap.forEach(currentStateMap::putIfAbsent);
Set<String> seenPartitions = new HashSet<>();
for (Message message : messages) {
if (message.getMsgType().equals(Message.MessageType.STATE_TRANSITION.name())) {
String resource = message.getResourceName();
String partition = message.getPartitionName();
//System.err.println(message.getMsgId());
String key = resource + "-" + partition;
if (seenPartitions.contains(key)) {
//System.err.println("Duplicated message received for " + resource + ":" + partition);
duplicatedMessages++;
}
seenPartitions.add(key);
String toState = message.getToState();
String state = null;
if (currentStateMap.containsKey(resource)) {
CurrentState currentState = currentStateMap.get(resource);
state = currentState.getState(partition);
}
if (toState.equals(state) && message.getMsgState() == Message.MessageState.NEW) {
// logger.error(
// "Extra message: " + message.getMsgId() + ", Partition is already in target state "
// + toState + " for " + resource + ":" + partition);
extraStateTransition++;
}
String messageTarget =
getMessageTarget(message.getResourceName(), message.getPartitionName());
if (message.getMsgState() == Message.MessageState.NEW &&
_messageTaskMap.containsKey(messageTarget)) {
String taskId = _messageTaskMap.get(messageTarget);
MessageTaskInfo messageTaskInfo = _taskMap.get(taskId);
Message existingMsg = messageTaskInfo.getTask().getMessage();
if (existingMsg.getMsgId() != message.getMsgId())
// logger.error("Duplicated message In Progress: " + message.getMsgId()
// + ", state transition in progress with message " + existingMsg.getMsgId()
// + " to " + toState + " for " + resource + ":" + partition);
duplicatedMessagesInProgress ++;
}
}
}
}
public static void resetStats() {
duplicatedMessages = 0;
extraStateTransition = 0;
duplicatedMessagesInProgress = 0;
}
}
| 9,892 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging/handling/TestConfigThreadpoolSize.java | package org.apache.helix.messaging.handling;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.HashSet;
import java.util.List;
import java.util.concurrent.ThreadPoolExecutor;
import com.google.common.collect.ImmutableList;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.NotificationContext;
import org.apache.helix.integration.common.ZkStandAloneCMTestBase;
import org.apache.helix.messaging.DefaultMessagingService;
import org.apache.helix.model.ConfigScope;
import org.apache.helix.model.Message;
import org.apache.helix.model.builder.ConfigScopeBuilder;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestConfigThreadpoolSize extends ZkStandAloneCMTestBase {
public static class TestMessagingHandlerFactory implements MultiTypeMessageHandlerFactory {
public static HashSet<String> _processedMsgIds = new HashSet<String>();
@Override
public MessageHandler createHandler(Message message, NotificationContext context) {
return null;
}
@Override public List<String> getMessageTypes() {
return ImmutableList.of("TestMsg");
}
@Override
public void reset() {
// TODO Auto-generated method stub
}
}
public static class TestMessagingHandlerFactory2 implements MultiTypeMessageHandlerFactory {
public static HashSet<String> _processedMsgIds = new HashSet<String>();
@Override
public MessageHandler createHandler(Message message, NotificationContext context) {
return null;
}
@Override public List<String> getMessageTypes() {
return ImmutableList.of("TestMsg2");
}
@Override
public void reset() {
// TODO Auto-generated method stub
}
}
@Test
public void TestThreadPoolSizeConfig() {
String instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + 0);
HelixManager manager = _participants[0];
ConfigAccessor accessor = manager.getConfigAccessor();
ConfigScope scope =
new ConfigScopeBuilder().forCluster(manager.getClusterName()).forParticipant(instanceName)
.build();
accessor.set(scope, "TestMsg." + HelixTaskExecutor.MAX_THREADS, "" + 12);
scope = new ConfigScopeBuilder().forCluster(manager.getClusterName()).build();
accessor.set(scope, "TestMsg." + HelixTaskExecutor.MAX_THREADS, "" + 8);
for (int i = 0; i < NODE_NR; i++) {
instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + i);
_participants[i].getMessagingService().registerMessageHandlerFactory("TestMsg",
new TestMessagingHandlerFactory());
_participants[i].getMessagingService()
.registerMessageHandlerFactory("TestMsg2", new TestMessagingHandlerFactory2());
}
for (int i = 0; i < NODE_NR; i++) {
instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + i);
DefaultMessagingService svc =
(DefaultMessagingService) (_participants[i]
.getMessagingService());
HelixTaskExecutor helixExecutor = svc.getExecutor();
ThreadPoolExecutor executor =
(ThreadPoolExecutor) (helixExecutor._executorMap.get("TestMsg"));
ThreadPoolExecutor executor2 =
(ThreadPoolExecutor) (helixExecutor._executorMap.get("TestMsg2"));
if (i != 0) {
Assert.assertEquals(8, executor.getMaximumPoolSize());
} else {
Assert.assertEquals(12, executor.getMaximumPoolSize());
}
Assert.assertEquals(HelixTaskExecutor.DEFAULT_PARALLEL_TASKS, executor2.getMaximumPoolSize());
}
}
}
| 9,893 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging/handling/TestBatchMessageModeConfigs.java | package org.apache.helix.messaging.handling;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.concurrent.ThreadPoolExecutor;
import org.apache.helix.integration.common.ZkStandAloneCMTestBase;
import org.apache.helix.messaging.DefaultMessagingService;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.builder.FullAutoModeISBuilder;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class TestBatchMessageModeConfigs extends ZkStandAloneCMTestBase {
static final String TEST_DB_PREFIX = "TestDBABatch";
@BeforeClass
public void beforeClass() throws Exception {
super.beforeClass();
_participants[0].getStateMachineEngine().registerStateModelFactory("OnlineOffline",
new TestResourceThreadpoolSize.TestOnlineOfflineStateModelFactory(5, 2000), "TestFactory");
// Use one node for testing
for (int i = 1; i < _participants.length; i++) {
_participants[i].syncStop();
}
Assert.assertTrue(_clusterVerifier.verifyByPolling());
}
@Test
public void testEnableBatchModeForCluster() throws InterruptedException {
_gSetupTool.getClusterManagementTool().enableBatchMessageMode(CLUSTER_NAME, true);
String dbName = TEST_DB_PREFIX + "Cluster";
setupResource(dbName);
_gSetupTool.rebalanceStorageCluster(CLUSTER_NAME, dbName, 1);
Assert.assertTrue(_clusterVerifier.verifyByPolling());
verify();
_gSetupTool.getClusterManagementTool().enableBatchMessageMode(CLUSTER_NAME, false);
}
@Test
public void testEnableBatchModeForResource() throws InterruptedException {
String dbName = TEST_DB_PREFIX + "Resource";
setupResource(dbName);
_gSetupTool.getClusterManagementTool().enableBatchMessageMode(CLUSTER_NAME, dbName, true);
_gSetupTool.rebalanceStorageCluster(CLUSTER_NAME, dbName, 1);
Assert.assertTrue(_clusterVerifier.verifyByPolling());
verify();
_gSetupTool.getClusterManagementTool().enableBatchMessageMode(CLUSTER_NAME, dbName, false);
}
private void setupResource(String dbName) throws InterruptedException {
IdealState idealState = new FullAutoModeISBuilder(dbName).setStateModel("OnlineOffline")
.setStateModelFactoryName("TestFactory").setNumPartitions(10).setNumReplica(1).build();
_gSetupTool.getClusterManagementTool().addResource(CLUSTER_NAME, dbName, idealState);
}
private void verify() {
DefaultMessagingService svc =
(DefaultMessagingService) (_participants[0].getMessagingService());
HelixTaskExecutor helixExecutor = svc.getExecutor();
ThreadPoolExecutor executor = (ThreadPoolExecutor) (helixExecutor._batchMessageExecutorService);
Assert.assertNotNull(executor);
Assert.assertTrue(executor.getPoolSize() > 0);
}
}
| 9,894 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging/p2pMessage/TestP2PMessagesAvoidDuplicatedMessage.java | package org.apache.helix.messaging.p2pMessage;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executors;
import org.apache.helix.HelixConstants;
import org.apache.helix.controller.common.PartitionStateMap;
import org.apache.helix.controller.common.ResourcesStateMap;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.pipeline.Pipeline;
import org.apache.helix.controller.stages.AttributeName;
import org.apache.helix.controller.stages.BaseStageTest;
import org.apache.helix.controller.stages.BestPossibleStateCalcStage;
import org.apache.helix.controller.stages.CurrentStateOutput;
import org.apache.helix.controller.stages.IntermediateStateCalcStage;
import org.apache.helix.controller.stages.MessageGenerationPhase;
import org.apache.helix.controller.stages.MessageOutput;
import org.apache.helix.controller.stages.MessageSelectionStage;
import org.apache.helix.controller.stages.MessageThrottleStage;
import org.apache.helix.controller.stages.ReadClusterDataStage;
import org.apache.helix.model.BuiltInStateModelDefinitions;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.MasterSlaveSMD;
import org.apache.helix.model.Message;
import org.apache.helix.model.Partition;
import org.apache.helix.model.Resource;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestP2PMessagesAvoidDuplicatedMessage extends BaseStageTest {
String _db = "testDB";
int _numPartition = 1;
int _numReplica = 3;
Partition _partition = new Partition(_db + "_0");
ResourceControllerDataProvider _dataCache;
Pipeline _fullPipeline;
Pipeline _messagePipeline;
ResourcesStateMap _bestpossibleState;
private void preSetup() throws Exception {
setupIdealState(3, new String[] { _db }, _numPartition, _numReplica,
IdealState.RebalanceMode.SEMI_AUTO, BuiltInStateModelDefinitions.MasterSlave.name());
setupStateModel();
setupInstances(3);
setupLiveInstances(3);
ClusterConfig clusterConfig = new ClusterConfig(_clusterName);
clusterConfig.enableP2PMessage(true);
setClusterConfig(clusterConfig);
Map<String, Resource> resourceMap = getResourceMap(new String[] { _db }, _numPartition,
BuiltInStateModelDefinitions.MasterSlave.name(), clusterConfig, null);
_dataCache = new ResourceControllerDataProvider();
_dataCache.setAsyncTasksThreadPool(Executors.newSingleThreadExecutor());
event.addAttribute(AttributeName.ControllerDataProvider.name(), _dataCache);
event.addAttribute(AttributeName.RESOURCES.name(), resourceMap);
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), resourceMap);
event.addAttribute(AttributeName.CURRENT_STATE.name(), new CurrentStateOutput());
event.addAttribute(AttributeName.helixmanager.name(), manager);
_fullPipeline = new Pipeline("FullPipeline");
_fullPipeline.addStage(new ReadClusterDataStage());
_fullPipeline.addStage(new BestPossibleStateCalcStage());
_fullPipeline.addStage(new MessageGenerationPhase());
_fullPipeline.addStage(new MessageSelectionStage());
_fullPipeline.addStage(new IntermediateStateCalcStage());
_fullPipeline.addStage(new MessageThrottleStage());
_messagePipeline = new Pipeline("MessagePipeline");
_messagePipeline.addStage(new MessageGenerationPhase());
_messagePipeline.addStage(new MessageSelectionStage());
_messagePipeline.addStage(new IntermediateStateCalcStage());
_messagePipeline.addStage(new MessageThrottleStage());
_fullPipeline.handle(event);
_bestpossibleState =
event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.name());
}
@Test
public void testP2PAvoidDuplicatedMessage() throws Exception {
preSetup();
// Scenario 1:
// Disable old master ((initialMaster) instance,
// Validate: a M->S message should be sent to initialMaster with a P2P message attached for secondMaster.
String initialMaster = getTopStateInstance(_bestpossibleState.getInstanceStateMap(_db, _partition),
MasterSlaveSMD.States.MASTER.name());
Assert.assertNotNull(initialMaster);
// disable existing master instance
admin.enableInstance(_clusterName, initialMaster, false);
_dataCache = event.getAttribute(AttributeName.ControllerDataProvider.name());
_dataCache.notifyDataChange(HelixConstants.ChangeType.INSTANCE_CONFIG);
CurrentStateOutput currentStateOutput =
populateCurrentStateFromBestPossible(_bestpossibleState);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
_fullPipeline.handle(event);
_bestpossibleState = event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.name());
MessageOutput messageOutput =
event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
List<Message> messages = messageOutput.getMessages(_db, _partition);
Assert.assertEquals(messages.size(), 1);
Message toSlaveMessage = messages.get(0);
Assert.assertEquals(toSlaveMessage.getTgtName(), initialMaster);
Assert.assertEquals(toSlaveMessage.getFromState(), MasterSlaveSMD.States.MASTER.name());
Assert.assertEquals(toSlaveMessage.getToState(), MasterSlaveSMD.States.SLAVE.name());
// verify p2p message are attached to the M->S message sent to the old master instance
Assert.assertEquals(toSlaveMessage.getRelayMessages().entrySet().size(), 1);
String secondMaster =
getTopStateInstance(_bestpossibleState.getInstanceStateMap(_db, _partition), MasterSlaveSMD.States.MASTER.name());
Message relayMessage = toSlaveMessage.getRelayMessage(secondMaster);
Assert.assertNotNull(relayMessage);
Assert.assertEquals(relayMessage.getMsgSubType(), Message.MessageType.RELAYED_MESSAGE.name());
Assert.assertEquals(relayMessage.getTgtName(), secondMaster);
Assert.assertEquals(relayMessage.getRelaySrcHost(), initialMaster);
Assert.assertEquals(relayMessage.getFromState(), MasterSlaveSMD.States.SLAVE.name());
Assert.assertEquals(relayMessage.getToState(), MasterSlaveSMD.States.MASTER.name());
// Scenario 2A:
// Old master (initialMaster) completes the M->S transition,
// but has not forward p2p message to new master (secondMaster) yet.
// Validate: Controller should not send S->M message to new master.
currentStateOutput.setCurrentState(_db, _partition, initialMaster, "SLAVE");
currentStateOutput.setPendingMessage(_db, _partition, initialMaster, toSlaveMessage);
currentStateOutput.setPendingRelayMessage(_db, _partition, initialMaster, relayMessage);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
_fullPipeline.handle(event);
messageOutput = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = messageOutput.getMessages(_db, _partition);
Assert.assertEquals(messages.size(), 0);
// Scenario 2B:
// Old master (initialMaster) completes the M->S transition,
// There is a pending p2p message to new master (secondMaster).
// Validate: Controller should send S->M message to new master at same time.
currentStateOutput.setCurrentState(_db, _partition, initialMaster, "SLAVE");
currentStateOutput.getPendingMessageMap(_db, _partition).clear();
currentStateOutput.setPendingRelayMessage(_db, _partition, initialMaster, relayMessage);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
_messagePipeline.handle(event);
messageOutput = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = messageOutput.getMessages(_db, _partition);
Assert.assertEquals(messages.size(), 2);
boolean hasToOffline = false;
boolean hasToMaster = false;
for (Message msg : messages) {
if (msg.getToState().equals(MasterSlaveSMD.States.MASTER.name()) && msg.getTgtName()
.equals(secondMaster)) {
hasToMaster = true;
}
if (msg.getToState().equals(MasterSlaveSMD.States.OFFLINE.name()) && msg.getTgtName()
.equals(initialMaster)) {
hasToOffline = true;
}
}
Assert.assertTrue(hasToMaster);
Assert.assertTrue(hasToOffline);
// Secenario 2C
// Old master (initialMaster) completes the M->S transition,
// There is a pending p2p message to new master (secondMaster).
// However, the new master has been changed in bestPossible
// Validate: Controller should not send S->M message to the third master at same time.
String thirdMaster =
getTopStateInstance(_bestpossibleState.getInstanceStateMap(_db, _partition),
MasterSlaveSMD.States.SLAVE.name());
Map<String, String> instanceStateMap = _bestpossibleState.getInstanceStateMap(_db, _partition);
instanceStateMap.put(secondMaster, "SLAVE");
instanceStateMap.put(thirdMaster, "MASTER");
_bestpossibleState.setState(_db, _partition, instanceStateMap);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
event.addAttribute(AttributeName.BEST_POSSIBLE_STATE.name(), _bestpossibleState);
_messagePipeline.handle(event);
messageOutput = event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = messageOutput.getMessages(_db, _partition);
Assert.assertEquals(messages.size(), 1);
Assert.assertTrue(messages.get(0).getToState().equals("OFFLINE"));
Assert.assertTrue(messages.get(0).getTgtName().equals(initialMaster));
// Scenario 3:
// Old master (initialMaster) completes the M->S transition,
// and has already forwarded p2p message to new master (secondMaster)
// The original S->M message sent to old master has been removed.
// Validate: Controller should send S->O to old master, but not S->M message to new master.
instanceStateMap = _bestpossibleState.getInstanceStateMap(_db, _partition);
instanceStateMap.put(secondMaster, "MASTER");
instanceStateMap.put(thirdMaster, "SLAVE");
_bestpossibleState.setState(_db, _partition, instanceStateMap);
currentStateOutput =
populateCurrentStateFromBestPossible(_bestpossibleState);
currentStateOutput.setCurrentState(_db, _partition, initialMaster, "SLAVE");
currentStateOutput.setPendingMessage(_db, _partition, secondMaster, relayMessage);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
_fullPipeline.handle(event);
messageOutput =
event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = messageOutput.getMessages(_db, _partition);
Assert.assertEquals(messages.size(), 1);
Message toOfflineMessage = messages.get(0);
Assert.assertEquals(toOfflineMessage.getTgtName(), initialMaster);
Assert.assertEquals(toOfflineMessage.getFromState(), MasterSlaveSMD.States.SLAVE.name());
Assert.assertEquals(toOfflineMessage.getToState(), MasterSlaveSMD.States.OFFLINE.name());
// Scenario 4:
// The old master (initialMaster) finish state transition, but has not forward p2p message yet.
// Then the preference list has changed, so now the new master (thirdMaster) is different from previously calculated new master (secondMaster)
// Validate: controller should not send S->M to thirdMaster.
currentStateOutput.setCurrentState(_db, _partition, initialMaster, "OFFLINE");
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
thirdMaster =
getTopStateInstance(_bestpossibleState.getInstanceStateMap(_db, _partition),
MasterSlaveSMD.States.SLAVE.name());
instanceStateMap = _bestpossibleState.getInstanceStateMap(_db, _partition);
instanceStateMap.put(secondMaster, "SLAVE");
instanceStateMap.put(thirdMaster, "MASTER");
_bestpossibleState.setState(_db, _partition, instanceStateMap);
event.addAttribute(AttributeName.BEST_POSSIBLE_STATE.name(), _bestpossibleState);
_messagePipeline.handle(event);
messageOutput =
event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = messageOutput.getMessages(_db, _partition);
Assert.assertEquals(messages.size(), 0);
// Scenario 5:
// The initial master has forwarded the p2p message to secondMaster and deleted original M->S message on initialMaster,
// But the S->M state-transition has not completed yet in secondMaster.
// Validate: Controller should not send S->M to thirdMaster.
currentStateOutput.setPendingMessage(_db, _partition, secondMaster, relayMessage);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
event.addAttribute(AttributeName.BEST_POSSIBLE_STATE.name(), _bestpossibleState);
_messagePipeline.handle(event);
messageOutput =
event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = messageOutput.getMessages(_db, _partition);
Assert.assertEquals(messages.size(), 0);
// Scenario 5:
// The thirdMaster completed the state transition and deleted the p2p message.
// Validate: Controller should M->S message to secondMaster.
currentStateOutput =
populateCurrentStateFromBestPossible(_bestpossibleState);
currentStateOutput.setCurrentState(_db, _partition, secondMaster, "MASTER");
currentStateOutput.setCurrentState(_db, _partition, thirdMaster, "SLAVE");
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
_messagePipeline.handle(event);
messageOutput =
event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = messageOutput.getMessages(_db, _partition);
Assert.assertEquals(messages.size(), 1);
toSlaveMessage = messages.get(0);
Assert.assertEquals(toSlaveMessage.getTgtName(), secondMaster);
Assert.assertEquals(toSlaveMessage.getFromState(), MasterSlaveSMD.States.MASTER.name());
Assert.assertEquals(toSlaveMessage.getToState(), MasterSlaveSMD.States.SLAVE.name());
// verify p2p message are attached to the M->S message sent to the secondMaster
Assert.assertEquals(toSlaveMessage.getRelayMessages().entrySet().size(), 1);
relayMessage = toSlaveMessage.getRelayMessage(thirdMaster);
Assert.assertNotNull(relayMessage);
Assert.assertEquals(relayMessage.getMsgSubType(), Message.MessageType.RELAYED_MESSAGE.name());
Assert.assertEquals(relayMessage.getTgtName(), thirdMaster);
Assert.assertEquals(relayMessage.getRelaySrcHost(), secondMaster);
Assert.assertEquals(relayMessage.getFromState(), MasterSlaveSMD.States.SLAVE.name());
Assert.assertEquals(relayMessage.getToState(), MasterSlaveSMD.States.MASTER.name());
}
private String getTopStateInstance(Map<String, String> instanceStateMap, String topState) {
String masterInstance = null;
for (Map.Entry<String, String> e : instanceStateMap.entrySet()) {
if (topState.equals(e.getValue())) {
masterInstance = e.getKey();
}
}
return masterInstance;
}
private CurrentStateOutput populateCurrentStateFromBestPossible(
ResourcesStateMap bestPossibleStateOutput) {
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
for (String resource : bestPossibleStateOutput.getResourceStatesMap().keySet()) {
PartitionStateMap partitionStateMap = bestPossibleStateOutput.getPartitionStateMap(resource);
for (Partition p : partitionStateMap.partitionSet()) {
Map<String, String> stateMap = partitionStateMap.getPartitionMap(p);
for (Map.Entry<String, String> e : stateMap.entrySet()) {
currentStateOutput.setCurrentState(resource, p, e.getKey(), e.getValue());
}
}
}
return currentStateOutput;
}
}
| 9,895 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging/p2pMessage/TestP2PWithStateCancellationMessage.java | package org.apache.helix.messaging.p2pMessage;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.UUID;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.stages.AttributeName;
import org.apache.helix.controller.stages.BaseStageTest;
import org.apache.helix.controller.stages.BestPossibleStateOutput;
import org.apache.helix.controller.stages.ClusterEvent;
import org.apache.helix.controller.stages.ClusterEventType;
import org.apache.helix.controller.stages.CurrentStateOutput;
import org.apache.helix.controller.stages.MessageGenerationPhase;
import org.apache.helix.controller.stages.MessageOutput;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZKHelixManager;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.MasterSlaveSMD;
import org.apache.helix.model.Message;
import org.apache.helix.model.Partition;
import org.apache.helix.model.Resource;
import org.apache.helix.model.ResourceConfig;
import org.testng.Assert;
import org.testng.annotations.Test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestP2PWithStateCancellationMessage extends BaseStageTest {
private final static String CLUSTER_NAME = "MockCluster";
private final static String RESOURCE_NAME = "MockResource";
@Test
public void testP2PWithStateCancellationMessage() {
ClusterEvent event = generateClusterEvent();
runStage(event, new MessageGenerationPhase());
MessageOutput messageOutput = event.getAttribute(AttributeName.MESSAGES_ALL.name());
// No message should be sent for partition 0
Assert.assertEquals(messageOutput.getMessages(RESOURCE_NAME, new Partition("0")).size(), 0);
// One cancellation message should be sent out for partition 1
List<Message> messages = messageOutput.getMessages(RESOURCE_NAME, new Partition("1"));
Assert.assertEquals(messages.size(), 1);
Assert.assertEquals(messages.get(0).getMsgType(),
Message.MessageType.STATE_TRANSITION_CANCELLATION.name());
}
private ClusterEvent generateClusterEvent() {
Mock mock = new Mock();
ClusterEvent event =
new ClusterEvent(CLUSTER_NAME, ClusterEventType.IdealStateChange, "randomId");
ClusterConfig clusterConfig = new ClusterConfig(CLUSTER_NAME);
clusterConfig.stateTransitionCancelEnabled(true);
// mock manager
event.addAttribute(AttributeName.helixmanager.name(), mock.manager);
when(mock.manager.getHelixDataAccessor()).thenReturn(mock.accessor);
when(mock.manager.getSessionId()).thenReturn(UUID.randomUUID().toString());
when(mock.manager.getInstanceName()).thenReturn("CONTROLLER");
// mock resource
ResourceConfig resourceConfig = new ResourceConfig(RESOURCE_NAME);
Resource resource = new Resource(RESOURCE_NAME, clusterConfig, resourceConfig);
resource.addPartition("0");
resource.addPartition("1");
resource.setStateModelDefRef("MasterSlave");
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(),
Collections.singletonMap(RESOURCE_NAME, resource));
// mock cache with two live instances and session id.
LiveInstance l1 = new LiveInstance("localhost_1");
l1.setSessionId(UUID.randomUUID().toString());
LiveInstance l2 = new LiveInstance("localhost_2");
l2.setSessionId(UUID.randomUUID().toString());
event.addAttribute(AttributeName.ControllerDataProvider.name(), mock.cache);
when(mock.cache.getStateModelDef("MasterSlave")).thenReturn(MasterSlaveSMD.build());
when(mock.cache.getClusterConfig()).thenReturn(clusterConfig);
when(mock.cache.getLiveInstances()).thenReturn(Arrays.asList(l1, l2).stream().collect(
Collectors.toMap(LiveInstance::getId, Function.identity())));
// mock current state output. Generate 3 messages:
// 1. main message staying ZK contains #2 p2p message.
// 2. p2p message that should be hide in #1 message
// 3. message should be cancelled since target state changed.
Message message =
new Message(Message.MessageType.STATE_TRANSITION, UUID.randomUUID().toString());
message.setSrcName(manager.getInstanceName());
message.setTgtName("localhost_1");
message.setMsgState(Message.MessageState.NEW);
message.setPartitionName("0");
message.setResourceName(resource.getResourceName());
message.setFromState("MASTER");
message.setToState("SLAVE");
message.setTgtSessionId(UUID.randomUUID().toString());
message.setSrcSessionId(manager.getSessionId());
message.setStateModelDef("MasterSlave");
message.setTgtSessionId(UUID.randomUUID().toString());
Message relayMessage =
new Message(Message.MessageType.STATE_TRANSITION, UUID.randomUUID().toString());
relayMessage.setSrcName("localhost_1");
relayMessage.setTgtName("localhost_2");
relayMessage.setMsgState(Message.MessageState.NEW);
relayMessage.setPartitionName("0");
relayMessage.setResourceName(resource.getResourceName());
relayMessage.setFromState("SLAVE");
relayMessage.setToState("MASTER");
relayMessage.setTgtSessionId(UUID.randomUUID().toString());
relayMessage.setSrcSessionId(manager.getSessionId());
relayMessage.setStateModelDef("MasterSlave");
relayMessage.setTgtSessionId(UUID.randomUUID().toString());
Message messageToBeCancelled =
new Message(Message.MessageType.STATE_TRANSITION, UUID.randomUUID().toString());
messageToBeCancelled.setSrcName(manager.getInstanceName());
messageToBeCancelled.setTgtName("localhost_2");
messageToBeCancelled.setMsgState(Message.MessageState.NEW);
messageToBeCancelled.setPartitionName("1");
messageToBeCancelled.setResourceName(resource.getResourceName());
messageToBeCancelled.setFromState("MASTER");
messageToBeCancelled.setToState("SLAVE");
messageToBeCancelled.setTgtSessionId(UUID.randomUUID().toString());
messageToBeCancelled.setSrcSessionId(manager.getSessionId());
messageToBeCancelled.setStateModelDef("MasterSlave");
messageToBeCancelled.setTgtSessionId(UUID.randomUUID().toString());
// mock current state & intermediate state output
// Keep partition 0 same target state to make sure p2p message not be cancelled.
// Make partition 1 target state change so Helix should send cancellation message.
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
currentStateOutput.setPendingMessage(RESOURCE_NAME, new Partition("0"), "localhost_1", message);
currentStateOutput.setPendingMessage(RESOURCE_NAME, new Partition("0"), "localhost_2", relayMessage);
currentStateOutput
.setPendingMessage(RESOURCE_NAME, new Partition("1"), "localhost_2", messageToBeCancelled);
currentStateOutput.setCurrentState(RESOURCE_NAME, new Partition("0"), "localhost_1", "MASTER");
currentStateOutput.setCurrentState(RESOURCE_NAME, new Partition("0"), "localhost_2", "SLAVE");
currentStateOutput.setCurrentState(RESOURCE_NAME, new Partition("1"), "localhost_2", "MASTER");
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
BestPossibleStateOutput bestPossibleStateOutput = new BestPossibleStateOutput();
bestPossibleStateOutput.setState(RESOURCE_NAME, new Partition("0"), "localhost_1", "SLAVE");
bestPossibleStateOutput.setState(RESOURCE_NAME, new Partition("0"), "localhost_2", "MASTER");
bestPossibleStateOutput.setState(RESOURCE_NAME, new Partition("1"), "localhost_2", "MASTER");
event.addAttribute(AttributeName.BEST_POSSIBLE_STATE.name(), bestPossibleStateOutput);
return event;
}
private final class Mock {
private ResourceControllerDataProvider cache = mock(ResourceControllerDataProvider.class);
private HelixManager manager = mock(ZKHelixManager.class);
private HelixDataAccessor accessor = mock(ZKHelixDataAccessor.class);
}
}
| 9,896 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging/p2pMessage/TestP2PMessages.java | package org.apache.helix.messaging.p2pMessage;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.lang.reflect.Method;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Executors;
import org.apache.helix.HelixConstants;
import org.apache.helix.HelixDefinedState;
import org.apache.helix.PropertyKey;
import org.apache.helix.controller.common.ResourcesStateMap;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.pipeline.Pipeline;
import org.apache.helix.controller.stages.AttributeName;
import org.apache.helix.controller.stages.BaseStageTest;
import org.apache.helix.controller.stages.BestPossibleStateCalcStage;
import org.apache.helix.controller.stages.CurrentStateComputationStage;
import org.apache.helix.controller.stages.IntermediateStateCalcStage;
import org.apache.helix.controller.stages.MessageGenerationPhase;
import org.apache.helix.controller.stages.MessageSelectionStage;
import org.apache.helix.controller.stages.MessageThrottleStage;
import org.apache.helix.controller.stages.ReadClusterDataStage;
import org.apache.helix.controller.stages.ResourceComputationStage;
import org.apache.helix.controller.stages.resource.ResourceMessageDispatchStage;
import org.apache.helix.model.BuiltInStateModelDefinitions;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.MasterSlaveSMD;
import org.apache.helix.model.Message;
import org.apache.helix.model.Partition;
import org.testng.Assert;
import org.testng.ITestContext;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
public class TestP2PMessages extends BaseStageTest {
private String _db = "testDB";
private int _numPartition = 1;
private int _numReplica = 3;
private Partition _partition = new Partition(_db + "_0");
private ResourceControllerDataProvider _dataCache;
private Pipeline _fullPipeline;
private ResourcesStateMap _initialStateMap;
private Set<String> _instances;
private Map<String, LiveInstance> _liveInstanceMap;
private String _initialMaster;
@BeforeClass
public void beforeClass() {
super.beforeClass();
setup();
setupIdealState(3, new String[] { _db }, _numPartition, _numReplica,
IdealState.RebalanceMode.SEMI_AUTO, BuiltInStateModelDefinitions.MasterSlave.name());
setupStateModel();
setupInstances(3);
setupLiveInstances(3);
ClusterConfig clusterConfig = new ClusterConfig(_clusterName);
clusterConfig.enableP2PMessage(true);
setClusterConfig(clusterConfig);
_dataCache = new ResourceControllerDataProvider(_clusterName);
_dataCache.setAsyncTasksThreadPool(Executors.newSingleThreadExecutor());
_dataCache.refresh(manager.getHelixDataAccessor());
event.addAttribute(AttributeName.ControllerDataProvider.name(), _dataCache);
event.addAttribute(AttributeName.helixmanager.name(), manager);
_fullPipeline = new Pipeline("FullPipeline");
_fullPipeline.addStage(new ReadClusterDataStage());
_fullPipeline.addStage(new ResourceComputationStage());
_fullPipeline.addStage(new CurrentStateComputationStage());
_fullPipeline.addStage(new BestPossibleStateCalcStage());
_fullPipeline.addStage(new MessageGenerationPhase());
_fullPipeline.addStage(new MessageSelectionStage());
_fullPipeline.addStage(new IntermediateStateCalcStage());
_fullPipeline.addStage(new MessageThrottleStage());
_fullPipeline.addStage(new ResourceMessageDispatchStage());
try {
_fullPipeline.handle(event);
} catch (Exception e) {
e.printStackTrace();
}
_instances = _dataCache.getAllInstances();
_liveInstanceMap = _dataCache.getLiveInstances();
_initialStateMap = event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.name());
_initialMaster = getTopStateInstance(_initialStateMap.getInstanceStateMap(_db, _partition),
MasterSlaveSMD.States.MASTER.name());
Assert.assertNotNull(_initialMaster);
}
@BeforeMethod // just to overide the per-test setup in base class.
public void beforeTest(Method testMethod, ITestContext testContext) {
long startTime = System.currentTimeMillis();
System.out.println("START " + testMethod.getName() + " at " + new Date(startTime));
testContext.setAttribute("StartTime", System.currentTimeMillis());
}
@Test
public void testP2PSendAndTimeout() throws Exception {
reset(_initialStateMap);
// Disable old master ((initialMaster) instance,
// Validate: a M->S message should be sent to initialMaster with a P2P message attached for secondMaster.
admin.enableInstance(_clusterName, _initialMaster, false);
_dataCache = event.getAttribute(AttributeName.ControllerDataProvider.name());
_dataCache.notifyDataChange(HelixConstants.ChangeType.INSTANCE_CONFIG);
_fullPipeline.handle(event);
ResourcesStateMap bestpossibleState =
event.getAttribute(AttributeName.INTERMEDIATE_STATE.name());
List<Message> messages = getMessages(_initialMaster);
Assert.assertEquals(messages.size(), 1);
Message toSlaveMessage = messages.get(0);
Assert.assertEquals(toSlaveMessage.getTgtName(), _initialMaster);
Assert.assertEquals(toSlaveMessage.getFromState(), MasterSlaveSMD.States.MASTER.name());
Assert.assertEquals(toSlaveMessage.getToState(), MasterSlaveSMD.States.SLAVE.name());
// verify p2p message are attached to the M->S message sent to the old master instance
Assert.assertEquals(toSlaveMessage.getRelayMessages().entrySet().size(), 1);
String secondMaster =
getTopStateInstance(bestpossibleState.getInstanceStateMap(_db, _partition),
MasterSlaveSMD.States.MASTER.name());
Message relayMessage = toSlaveMessage.getRelayMessage(secondMaster);
Assert.assertNotNull(relayMessage);
Assert.assertEquals(relayMessage.getMsgSubType(), Message.MessageType.RELAYED_MESSAGE.name());
Assert.assertEquals(relayMessage.getTgtName(), secondMaster);
Assert.assertEquals(relayMessage.getRelaySrcHost(), _initialMaster);
Assert.assertEquals(relayMessage.getFromState(), MasterSlaveSMD.States.SLAVE.name());
Assert.assertEquals(relayMessage.getToState(), MasterSlaveSMD.States.MASTER.name());
// Old master (initialMaster) completed the M->S transition,
// but has not forward p2p message to new master (secondMaster) yet.
// Validate: Controller should not send S->M message to new master.
handleMessage(_initialMaster, _db);
_fullPipeline.handle(event);
messages = getMessages(secondMaster);
Assert.assertEquals(messages.size(), 0);
// Old master (initialMaster) completed the M->S transition,
// but has not forward p2p message to new master (secondMaster) yet, but p2p message should already timeout.
// Validate: Controller should send S->M message to new master.
Thread.sleep(Message.RELAY_MESSAGE_DEFAULT_EXPIRY);
_fullPipeline.handle(event);
messages = getMessages(secondMaster);
Assert.assertEquals(messages.size(), 1);
Assert.assertEquals(messages.get(0).getTgtName(), secondMaster);
Assert.assertEquals(messages.get(0).getFromState(), MasterSlaveSMD.States.SLAVE.name());
Assert.assertEquals(messages.get(0).getToState(), MasterSlaveSMD.States.MASTER.name());
}
@Test
public void testP2PWithErrorState() throws Exception {
reset(_initialStateMap);
// Disable old master ((initialMaster) instance,
// Validate: a M->S message should be sent to initialMaster with a P2P message attached for secondMaster.
// disable existing master instance
admin.enableInstance(_clusterName, _initialMaster, false);
_dataCache.notifyDataChange(HelixConstants.ChangeType.INSTANCE_CONFIG);
_fullPipeline.handle(event);
ResourcesStateMap bestpossibleState =
event.getAttribute(AttributeName.INTERMEDIATE_STATE.name());
List<Message> messages = getMessages(_initialMaster);
Assert.assertEquals(messages.size(), 1);
Message toSlaveMessage = messages.get(0);
// verify p2p message are attached to the M->S message sent to the old master instance
Assert.assertEquals(toSlaveMessage.getRelayMessages().entrySet().size(), 1);
String secondMaster =
getTopStateInstance(bestpossibleState.getInstanceStateMap(_db, _partition),
MasterSlaveSMD.States.MASTER.name());
Message relayMessage = toSlaveMessage.getRelayMessage(secondMaster);
Assert.assertNotNull(relayMessage);
Assert.assertEquals(relayMessage.getMsgSubType(), Message.MessageType.RELAYED_MESSAGE.name());
Assert.assertEquals(relayMessage.getTgtName(), secondMaster);
// Old master (initialMaster) failed the M->S transition,
// but has not forward p2p message to new master (secondMaster) yet.
// Validate: Controller should ignore the ERROR partition and send S->M message to new master.
String session = _dataCache.getLiveInstances().get(_initialMaster).getEphemeralOwner();
PropertyKey currentStateKey =
new PropertyKey.Builder(_clusterName).currentState(_initialMaster, session, _db);
CurrentState currentState = accessor.getProperty(currentStateKey);
currentState
.setPreviousState(_partition.getPartitionName(), MasterSlaveSMD.States.MASTER.name());
currentState.setState(_partition.getPartitionName(), HelixDefinedState.ERROR.name());
currentState.setEndTime(_partition.getPartitionName(), System.currentTimeMillis());
accessor.setProperty(currentStateKey, currentState);
PropertyKey messageKey =
new PropertyKey.Builder(_clusterName).message(_initialMaster, messages.get(0).getMsgId());
accessor.removeProperty(messageKey);
_fullPipeline.handle(event);
messages = getMessages(secondMaster);
Assert.assertEquals(messages.size(), 1);
Assert.assertEquals(messages.get(0).getTgtName(), secondMaster);
Assert.assertEquals(messages.get(0).getFromState(), MasterSlaveSMD.States.SLAVE.name());
Assert.assertEquals(messages.get(0).getToState(), MasterSlaveSMD.States.MASTER.name());
}
@Test
public void testP2PWithInstanceOffline() throws Exception {
reset(_initialStateMap);
// Disable old master ((initialMaster) instance,
// Validate: a M->S message should be sent to initialMaster with a P2P message attached for secondMaster.
// disable existing master instance
admin.enableInstance(_clusterName, _initialMaster, false);
_dataCache.notifyDataChange(HelixConstants.ChangeType.INSTANCE_CONFIG);
_fullPipeline.handle(event);
ResourcesStateMap bestpossibleState =
event.getAttribute(AttributeName.INTERMEDIATE_STATE.name());
List<Message> messages = getMessages(_initialMaster);
Assert.assertEquals(messages.size(), 1);
Message toSlaveMessage = messages.get(0);
;
// verify p2p message are attached to the M->S message sent to the old master instance
Assert.assertEquals(toSlaveMessage.getRelayMessages().entrySet().size(), 1);
String secondMaster =
getTopStateInstance(bestpossibleState.getInstanceStateMap(_db, _partition),
MasterSlaveSMD.States.MASTER.name());
Message relayMessage = toSlaveMessage.getRelayMessage(secondMaster);
Assert.assertNotNull(relayMessage);
Assert.assertEquals(relayMessage.getMsgSubType(), Message.MessageType.RELAYED_MESSAGE.name());
Assert.assertEquals(relayMessage.getTgtName(), secondMaster);
// Old master (initialMaster) completed the M->S transition,
// but has not forward p2p message to new master (secondMaster) yet.
// Validate: Controller should not send S->M message to new master.
handleMessage(_initialMaster, _db);
_fullPipeline.handle(event);
messages = getMessages(secondMaster);
Assert.assertEquals(messages.size(), 0);
// New master (second master) instance goes offline, controller should send S->M to the third master immediately.
PropertyKey liveInstanceKey = new PropertyKey.Builder(_clusterName).liveInstance(secondMaster);
accessor.removeProperty(liveInstanceKey);
_dataCache.requireFullRefresh();
_fullPipeline.handle(event);
bestpossibleState = event.getAttribute(AttributeName.INTERMEDIATE_STATE.name());
String thirdMaster = getTopStateInstance(bestpossibleState.getInstanceStateMap(_db, _partition),
MasterSlaveSMD.States.MASTER.name());
Assert.assertTrue(secondMaster != thirdMaster);
messages = getMessages(thirdMaster);
Assert.assertEquals(messages.size(), 1);
Assert.assertEquals(messages.get(0).getTgtName(), thirdMaster);
Assert.assertEquals(messages.get(0).getFromState(), MasterSlaveSMD.States.SLAVE.name());
Assert.assertEquals(messages.get(0).getToState(), MasterSlaveSMD.States.MASTER.name());
}
/**
* This is to simulate the participant (without starting a real participant thread) to handle the pending message.
* It sets the CurrentState to target State, and remove the pending message from ZK.
* @param instance
* @param resource
*/
private void handleMessage(String instance, String resource) {
PropertyKey propertyKey = new PropertyKey.Builder(_clusterName).messages(instance);
List<Message> messages = accessor.getChildValues(propertyKey, true);
String session = _dataCache.getLiveInstances().get(instance).getEphemeralOwner();
for (Message m : messages) {
if (m.getResourceName().equals(resource)) {
PropertyKey currentStateKey =
new PropertyKey.Builder(_clusterName).currentState(instance, session, resource);
CurrentState currentState = accessor.getProperty(currentStateKey);
if (currentState == null) {
currentState = new CurrentState(resource);
currentState.setSessionId(session);
currentState.setStateModelDefRef(BuiltInStateModelDefinitions.MasterSlave.name());
}
String partition = m.getPartitionName();
String fromState = m.getFromState();
String toState = m.getToState();
String partitionState = currentState.getState(partition);
if ((partitionState == null && fromState.equals(
BuiltInStateModelDefinitions.MasterSlave.getStateModelDefinition().getInitialState()))
|| (partitionState.equals(fromState))) {
currentState.setPreviousState(partition, fromState);
currentState.setState(partition, toState);
currentState.setStartTime(partition, System.currentTimeMillis());
try {
Thread.sleep(50);
} catch (InterruptedException e) {
e.printStackTrace();
}
currentState.setEndTime(partition, System.currentTimeMillis());
accessor.setProperty(currentStateKey, currentState);
PropertyKey messageKey =
new PropertyKey.Builder(_clusterName).message(instance, m.getMsgId());
accessor.removeProperty(messageKey);
}
}
}
}
/**
* Enable all instances, clean all pending messages, set CurrentState to the BestPossibleState
*/
private void reset(ResourcesStateMap bestpossibleState) {
for (String ins : _liveInstanceMap.keySet()) {
LiveInstance liveInstance = _liveInstanceMap.get(ins);
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.liveInstance(liveInstance.getId()), liveInstance);
}
for (String ins : _instances) {
admin.enableInstance(_clusterName, _initialMaster, true);
cleanMessages(ins);
}
for (String resource : bestpossibleState.resourceSet()) {
setCurrentState(resource, bestpossibleState.getPartitionStateMap(resource).getStateMap());
}
for (String ins : _instances) {
cleanMessages(ins);
}
_dataCache.requireFullRefresh();
}
private void setCurrentState(String resource,
Map<Partition, Map<String, String>> partitionStateMap) {
for (Partition p : partitionStateMap.keySet()) {
Map<String, String> partitionState = partitionStateMap.get(p);
for (String instance : partitionState.keySet()) {
String state = partitionState.get(instance);
String session = _liveInstanceMap.get(instance).getEphemeralOwner();
PropertyKey currentStateKey =
new PropertyKey.Builder(_clusterName).currentState(instance, session, resource);
CurrentState currentState = accessor.getProperty(currentStateKey);
if (currentState == null) {
currentState = new CurrentState(resource);
currentState.setSessionId(session);
currentState.setStateModelDefRef(BuiltInStateModelDefinitions.MasterSlave.name());
}
currentState.setState(p.getPartitionName(), state);
accessor.setProperty(currentStateKey, currentState);
}
}
}
private void cleanMessages(String instance) {
PropertyKey propertyKey = new PropertyKey.Builder(_clusterName).messages(instance);
List<Message> messages = accessor.getChildValues(propertyKey, true);
for (Message m : messages) {
accessor
.removeProperty(new PropertyKey.Builder(_clusterName).message(instance, m.getMsgId()));
}
}
List<Message> getMessages(String instance) {
return accessor.getChildValues(new PropertyKey.Builder(_clusterName).messages(instance), true);
}
private String getTopStateInstance(Map<String, String> instanceStateMap, String topState) {
String masterInstance = null;
for (Map.Entry<String, String> e : instanceStateMap.entrySet()) {
if (topState.equals(e.getValue())) {
masterInstance = e.getKey();
}
}
return masterInstance;
}
}
| 9,897 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging | Create_ds/helix/helix-core/src/test/java/org/apache/helix/messaging/p2pMessage/TestP2PStateTransitionMessages.java | package org.apache.helix.messaging.p2pMessage;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executors;
import org.apache.helix.HelixConstants;
import org.apache.helix.controller.common.PartitionStateMap;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.pipeline.Pipeline;
import org.apache.helix.controller.stages.AttributeName;
import org.apache.helix.controller.stages.BaseStageTest;
import org.apache.helix.controller.stages.BestPossibleStateCalcStage;
import org.apache.helix.controller.stages.BestPossibleStateOutput;
import org.apache.helix.controller.stages.CurrentStateOutput;
import org.apache.helix.controller.stages.IntermediateStateCalcStage;
import org.apache.helix.controller.stages.MessageGenerationPhase;
import org.apache.helix.controller.stages.MessageOutput;
import org.apache.helix.controller.stages.MessageSelectionStage;
import org.apache.helix.controller.stages.MessageThrottleStage;
import org.apache.helix.controller.stages.ReadClusterDataStage;
import org.apache.helix.model.BuiltInStateModelDefinitions;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.MasterSlaveSMD;
import org.apache.helix.model.Message;
import org.apache.helix.model.Partition;
import org.apache.helix.model.Resource;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestP2PStateTransitionMessages extends BaseStageTest {
String db = "testDB";
int numPartition = 1;
int numReplica = 3;
private void preSetup() {
setupIdealState(3, new String[] { db }, numPartition, numReplica,
IdealState.RebalanceMode.SEMI_AUTO, BuiltInStateModelDefinitions.MasterSlave.name());
setupStateModel();
setupInstances(3);
setupLiveInstances(3);
}
@Test
public void testP2PMessageEnabled() throws Exception {
preSetup();
ClusterConfig clusterConfig = new ClusterConfig(_clusterName);
clusterConfig.enableP2PMessage(true);
setClusterConfig(clusterConfig);
testP2PMessage(clusterConfig, true);
}
@Test
public void testP2PMessageDisabled() throws Exception {
preSetup();
testP2PMessage(null, false);
}
@Test
public void testAvoidDuplicatedMessageWithP2PEnabled() throws Exception {
preSetup();
ClusterConfig clusterConfig = new ClusterConfig(_clusterName);
clusterConfig.enableP2PMessage(true);
setClusterConfig(clusterConfig);
Map<String, Resource> resourceMap = getResourceMap(new String[] { db }, numPartition,
BuiltInStateModelDefinitions.MasterSlave.name(), clusterConfig, null);
ResourceControllerDataProvider cache = new ResourceControllerDataProvider();
cache.setAsyncTasksThreadPool(Executors.newSingleThreadExecutor());
event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
event.addAttribute(AttributeName.RESOURCES.name(), resourceMap);
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), resourceMap);
event.addAttribute(AttributeName.CURRENT_STATE.name(), new CurrentStateOutput());
event.addAttribute(AttributeName.helixmanager.name(), manager);
Pipeline pipeline = createPipeline();
pipeline.handle(event);
BestPossibleStateOutput bestPossibleStateOutput =
event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.name());
CurrentStateOutput currentStateOutput =
populateCurrentStateFromBestPossible(bestPossibleStateOutput);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
Partition p = new Partition(db + "_0");
String masterInstance = getTopStateInstance(bestPossibleStateOutput.getInstanceStateMap(db, p),
MasterSlaveSMD.States.MASTER.name());
Assert.assertNotNull(masterInstance);
admin.enableInstance(_clusterName, masterInstance, false);
cache = event.getAttribute(AttributeName.ControllerDataProvider.name());
cache.notifyDataChange(HelixConstants.ChangeType.INSTANCE_CONFIG);
pipeline = createPipeline();
pipeline.handle(event);
bestPossibleStateOutput = event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.name());
MessageOutput messageOutput =
event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
List<Message> messages = messageOutput.getMessages(db, p);
Assert.assertEquals(messages.size(), 1);
Message toSlaveMessage = messages.get(0);
Assert.assertEquals(toSlaveMessage.getTgtName(), masterInstance);
Assert.assertEquals(toSlaveMessage.getFromState(), MasterSlaveSMD.States.MASTER.name());
Assert.assertEquals(toSlaveMessage.getToState(), MasterSlaveSMD.States.SLAVE.name());
// verify p2p message sent to the old master instance
Assert.assertEquals(toSlaveMessage.getRelayMessages().entrySet().size(), 1);
String newMasterInstance =
getTopStateInstance(bestPossibleStateOutput.getInstanceStateMap(db, p),
MasterSlaveSMD.States.MASTER.name());
Message relayMessage = toSlaveMessage.getRelayMessage(newMasterInstance);
Assert.assertNotNull(relayMessage);
Assert.assertEquals(relayMessage.getMsgSubType(), Message.MessageType.RELAYED_MESSAGE.name());
Assert.assertEquals(relayMessage.getTgtName(), newMasterInstance);
Assert.assertEquals(relayMessage.getRelaySrcHost(), masterInstance);
Assert.assertEquals(relayMessage.getFromState(), MasterSlaveSMD.States.SLAVE.name());
Assert.assertEquals(relayMessage.getToState(), MasterSlaveSMD.States.MASTER.name());
// test the old master finish state transition, but has not forward p2p message yet.
currentStateOutput.setCurrentState(db, p, masterInstance, "SLAVE");
currentStateOutput.setPendingMessage(db, p, masterInstance, toSlaveMessage);
currentStateOutput.setPendingRelayMessage(db, p, masterInstance, relayMessage);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
pipeline.handle(event);
messageOutput =
event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = messageOutput.getMessages(db, p);
Assert.assertEquals(messages.size(), 0);
currentStateOutput =
populateCurrentStateFromBestPossible(bestPossibleStateOutput);
currentStateOutput.setCurrentState(db, p, masterInstance, "SLAVE");
currentStateOutput.setPendingMessage(db, p, newMasterInstance, relayMessage);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
pipeline.handle(event);
messageOutput =
event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = messageOutput.getMessages(db, p);
Assert.assertEquals(messages.size(), 1);
Message toOfflineMessage = messages.get(0);
Assert.assertEquals(toOfflineMessage.getTgtName(), masterInstance);
Assert.assertEquals(toOfflineMessage.getFromState(), MasterSlaveSMD.States.SLAVE.name());
Assert.assertEquals(toOfflineMessage.getToState(), MasterSlaveSMD.States.OFFLINE.name());
// Now, the old master finish state transition, but has not forward p2p message yet.
// Then the preference list has changed, so now the new master is different from previously calculated new master
// but controller should not send S->M to newly calculated master.
currentStateOutput.setCurrentState(db, p, masterInstance, "OFFLINE");
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
String slaveInstance =
getTopStateInstance(bestPossibleStateOutput.getInstanceStateMap(db, p),
MasterSlaveSMD.States.SLAVE.name());
Map<String, String> instanceStateMap = bestPossibleStateOutput.getInstanceStateMap(db, p);
instanceStateMap.put(newMasterInstance, "SLAVE");
instanceStateMap.put(slaveInstance, "MASTER");
bestPossibleStateOutput.setState(db, p, instanceStateMap);
event.addAttribute(AttributeName.BEST_POSSIBLE_STATE.name(), bestPossibleStateOutput);
pipeline = new Pipeline("test");
pipeline.addStage(new MessageGenerationPhase());
pipeline.addStage(new MessageSelectionStage());
pipeline.addStage(new IntermediateStateCalcStage());
pipeline.addStage(new MessageThrottleStage());
pipeline.handle(event);
messageOutput =
event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = messageOutput.getMessages(db, p);
Assert.assertEquals(messages.size(), 0);
// Now, the old master has forwarded the p2p master to previously calculated master,
// So the state-transition still happened in previously calculated master.
// Controller will not send S->M to new master.
currentStateOutput.setPendingMessage(db, p, newMasterInstance, relayMessage);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
event.addAttribute(AttributeName.BEST_POSSIBLE_STATE.name(), bestPossibleStateOutput);
event.addAttribute(AttributeName.INTERMEDIATE_STATE.name(), bestPossibleStateOutput);
pipeline = new Pipeline("test");
pipeline.addStage(new MessageGenerationPhase());
pipeline.addStage(new MessageSelectionStage());
pipeline.addStage(new IntermediateStateCalcStage());
pipeline.addStage(new MessageThrottleStage());
pipeline.handle(event);
messageOutput =
event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = messageOutput.getMessages(db, p);
Assert.assertEquals(messages.size(), 0);
// now, the previous calculated master completed the state transition and deleted the p2p message.
// Controller should drop this master first.
currentStateOutput =
populateCurrentStateFromBestPossible(bestPossibleStateOutput);
currentStateOutput.setCurrentState(db, p, newMasterInstance, "MASTER");
currentStateOutput.setCurrentState(db, p, slaveInstance, "SLAVE");
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
pipeline = new Pipeline("test");
pipeline.addStage(new MessageGenerationPhase());
pipeline.addStage(new MessageSelectionStage());
pipeline.addStage(new MessageThrottleStage());
pipeline.handle(event);
messageOutput =
event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
messages = messageOutput.getMessages(db, p);
Assert.assertEquals(messages.size(), 1);
toSlaveMessage = messages.get(0);
Assert.assertEquals(toSlaveMessage.getTgtName(), newMasterInstance);
Assert.assertEquals(toSlaveMessage.getFromState(), MasterSlaveSMD.States.MASTER.name());
Assert.assertEquals(toSlaveMessage.getToState(), MasterSlaveSMD.States.SLAVE.name());
}
private void testP2PMessage(ClusterConfig clusterConfig, Boolean p2pMessageEnabled)
throws Exception {
Map<String, Resource> resourceMap = getResourceMap(new String[] { db }, numPartition,
BuiltInStateModelDefinitions.MasterSlave.name(), clusterConfig, null);
event.addAttribute(AttributeName.RESOURCES.name(), resourceMap);
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), resourceMap);
event.addAttribute(AttributeName.CURRENT_STATE.name(), new CurrentStateOutput());
event.addAttribute(AttributeName.helixmanager.name(), manager);
event.addAttribute(AttributeName.ControllerDataProvider.name(),
new ResourceControllerDataProvider());
Pipeline pipeline = createPipeline();
pipeline.handle(event);
BestPossibleStateOutput bestPossibleStateOutput =
event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.name());
CurrentStateOutput currentStateOutput =
populateCurrentStateFromBestPossible(bestPossibleStateOutput);
event.addAttribute(AttributeName.CURRENT_STATE.name(), currentStateOutput);
Partition p = new Partition(db + "_0");
String masterInstance = getTopStateInstance(bestPossibleStateOutput.getInstanceStateMap(db, p),
MasterSlaveSMD.States.MASTER.name());
Assert.assertNotNull(masterInstance);
admin.enableInstance(_clusterName, masterInstance, false);
ResourceControllerDataProvider cache = event.getAttribute(AttributeName.ControllerDataProvider.name());
cache.notifyDataChange(HelixConstants.ChangeType.INSTANCE_CONFIG);
pipeline = createPipeline();
pipeline.handle(event);
bestPossibleStateOutput = event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.name());
MessageOutput messageOutput =
event.getAttribute(AttributeName.MESSAGES_SELECTED.name());
List<Message> messages = messageOutput.getMessages(db, p);
Assert.assertEquals(messages.size(), 1);
Message message = messages.get(0);
Assert.assertEquals(message.getTgtName(), masterInstance);
Assert.assertEquals(message.getFromState(), MasterSlaveSMD.States.MASTER.name());
Assert.assertEquals(message.getToState(), MasterSlaveSMD.States.SLAVE.name());
if (p2pMessageEnabled) {
Assert.assertEquals(message.getRelayMessages().entrySet().size(), 1);
String newMasterInstance =
getTopStateInstance(bestPossibleStateOutput.getInstanceStateMap(db, p),
MasterSlaveSMD.States.MASTER.name());
Message relayMessage = message.getRelayMessage(newMasterInstance);
Assert.assertNotNull(relayMessage);
Assert.assertEquals(relayMessage.getMsgSubType(), Message.MessageType.RELAYED_MESSAGE.name());
Assert.assertEquals(relayMessage.getTgtName(), newMasterInstance);
Assert.assertEquals(relayMessage.getRelaySrcHost(), masterInstance);
Assert.assertEquals(relayMessage.getFromState(), MasterSlaveSMD.States.SLAVE.name());
Assert.assertEquals(relayMessage.getToState(), MasterSlaveSMD.States.MASTER.name());
} else {
Assert.assertTrue(message.getRelayMessages().entrySet().isEmpty());
}
}
private String getTopStateInstance(Map<String, String> instanceStateMap, String topState) {
String masterInstance = null;
for (Map.Entry<String, String> e : instanceStateMap.entrySet()) {
if (topState.equals(e.getValue())) {
masterInstance = e.getKey();
}
}
return masterInstance;
}
private CurrentStateOutput populateCurrentStateFromBestPossible(
BestPossibleStateOutput bestPossibleStateOutput) {
CurrentStateOutput currentStateOutput = new CurrentStateOutput();
for (String resource : bestPossibleStateOutput.getResourceStatesMap().keySet()) {
PartitionStateMap partitionStateMap = bestPossibleStateOutput.getPartitionStateMap(resource);
for (Partition p : partitionStateMap.partitionSet()) {
Map<String, String> stateMap = partitionStateMap.getPartitionMap(p);
for (Map.Entry<String, String> e : stateMap.entrySet()) {
currentStateOutput.setCurrentState(resource, p, e.getKey(), e.getValue());
}
}
}
return currentStateOutput;
}
private Pipeline createPipeline() {
Pipeline pipeline = new Pipeline("test");
pipeline.addStage(new ReadClusterDataStage());
pipeline.addStage(new BestPossibleStateCalcStage());
pipeline.addStage(new MessageGenerationPhase());
pipeline.addStage(new MessageSelectionStage());
pipeline.addStage(new IntermediateStateCalcStage());
pipeline.addStage(new MessageThrottleStage());
return pipeline;
}
}
| 9,898 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/store/TestJsonComparator.java | package org.apache.helix.store;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Date;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
public class TestJsonComparator {
@Test(groups = {
"unitTest"
})
public void testJsonComparator() {
System.out.println("START TestJsonComparator at " + new Date(System.currentTimeMillis()));
ZNRecord record = new ZNRecord("id1");
PropertyJsonComparator<ZNRecord> comparator =
new PropertyJsonComparator<ZNRecord>(ZNRecord.class);
AssertJUnit.assertTrue(comparator.compare(null, null) == 0);
AssertJUnit.assertTrue(comparator.compare(null, record) == -1);
AssertJUnit.assertTrue(comparator.compare(record, null) == 1);
System.out.println("END TestJsonComparator at " + new Date(System.currentTimeMillis()));
}
}
| 9,899 |