index
int64 0
0
| repo_id
stringlengths 9
205
| file_path
stringlengths 31
246
| content
stringlengths 1
12.2M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/store/TestPropertyStat.java | package org.apache.helix.store;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
public class TestPropertyStat {
@Test(groups = {
"unitTest"
})
public void testPropertyStat() {
PropertyStat stat = new PropertyStat(0, 0);
AssertJUnit.assertEquals(0, stat.getLastModifiedTime());
AssertJUnit.assertEquals(0, stat.getVersion());
}
}
| 9,900 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix | Create_ds/helix/helix-core/src/test/java/org/apache/helix/store/TestPropertyStoreException.java | package org.apache.helix.store;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
public class TestPropertyStoreException {
@Test(groups = {
"unitTest"
})
public void testPropertyStoreException() {
PropertyStoreException exception = new PropertyStoreException("msg");
AssertJUnit.assertEquals(exception.getMessage(), "msg");
exception = new PropertyStoreException();
AssertJUnit.assertNull(exception.getMessage());
}
}
| 9,901 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/store | Create_ds/helix/helix-core/src/test/java/org/apache/helix/store/zk/TestZkHelixPropertyStore.java | package org.apache.helix.store.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.lang.management.ManagementFactory;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import javax.management.JMException;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.apache.helix.AccessOption;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.monitoring.mbeans.MBeanRegistrar;
import org.apache.helix.monitoring.mbeans.MonitorDomainNames;
import org.apache.helix.monitoring.mbeans.ZkClientMonitor;
import org.apache.helix.monitoring.mbeans.ZkClientPathMonitor;
import org.apache.helix.store.HelixPropertyListener;
import org.apache.helix.zookeeper.zkclient.exception.ZkNoNodeException;
import org.apache.helix.zookeeper.zkclient.serialize.SerializableSerializer;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.Test;
public class TestZkHelixPropertyStore extends ZkUnitTestBase {
final String _root = "/" + getShortClassName();
final int bufSize = 128;
final int mapNr = 10;
final int firstLevelNr = 10;
final int secondLevelNr = 10;
// final int totalNodes = firstLevelNr * secondLevelNr;
class TestListener implements HelixPropertyListener {
Map<String, Long> _changeKeys = new HashMap<>();
Map<String, Long> _createKeys = new HashMap<>();
Map<String, Long> _deleteKeys = new HashMap<>();
public void reset() {
_changeKeys.clear();
_createKeys.clear();
_deleteKeys.clear();
}
@Override
public void onDataChange(String path) {
_changeKeys.put(path, System.currentTimeMillis());
}
@Override
public void onDataCreate(String path) {
_createKeys.put(path, System.currentTimeMillis());
}
@Override
public void onDataDelete(String path) {
_deleteKeys.put(path, System.currentTimeMillis());
}
}
@AfterClass
public void afterClass() {
deleteCluster(getShortClassName());
}
@Test
public void testSet() {
// Logger.getRootLogger().setLevel(Level.INFO);
System.out.println("START testSet() at " + new Date(System.currentTimeMillis()));
String subRoot = _root + "/" + "set";
List<String> subscribedPaths = new ArrayList<>();
subscribedPaths.add(subRoot);
ZkHelixPropertyStore<ZNRecord> store =
new ZkHelixPropertyStore<>(new ZkBaseDataAccessor<>(_gZkClient), subRoot, subscribedPaths);
// test set
setNodes(store, 'a', false);
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++) {
String nodeId = getNodeId(i, j);
String key = getSecondLevelKey(i, j);
ZNRecord record = store.get(key, null, 0);
Assert.assertEquals(record.getId(), nodeId);
}
}
// test get from cache
long startT = System.currentTimeMillis();
for (int i = 0; i < 1000; i++) {
ZNRecord record = store.get("/node_0/childNode_0_0", null, 0);
Assert.assertNotNull(record);
}
long endT = System.currentTimeMillis();
System.out.println("1000 Get() time used: " + (endT - startT) + "ms");
long latency = endT - startT;
Assert.assertTrue(latency < 200,
"1000 Gets should be finished within 200ms, but was " + latency + " ms");
store.stop();
System.out.println("END testSet() at " + new Date(System.currentTimeMillis()));
}
@Test
public void testSetInvalidPath() {
String subRoot = _root + "/" + "setInvalidPath";
ZkHelixPropertyStore<ZNRecord> store =
new ZkHelixPropertyStore<>(new ZkBaseDataAccessor<>(_gZkClient), subRoot, null);
try {
store.set("abc/xyz", new ZNRecord("testInvalid"), AccessOption.PERSISTENT);
Assert.fail("Should throw illegal-argument-exception since path doesn't start with /");
} catch (IllegalArgumentException e) {
// e.printStackTrace();
// OK
} catch (Exception e) {
Assert.fail("Should not throw exceptions other than illegal-argument");
}
}
@Test
public void testLocalTriggeredCallback() throws Exception {
System.out
.println("START testLocalTriggeredCallback() at " + new Date(System.currentTimeMillis()));
String subRoot = _root + "/" + "localCallback";
List<String> subscribedPaths = new ArrayList<>();
subscribedPaths.add(subRoot);
ZkHelixPropertyStore<ZNRecord> store =
new ZkHelixPropertyStore<>(new ZkBaseDataAccessor<>(_gZkClient), subRoot, subscribedPaths);
// change nodes via property store interface
// and verify all notifications have been received
TestListener listener = new TestListener();
store.subscribe("/", listener);
// test dataCreate callbacks
listener.reset();
setNodes(store, 'a', true);
// wait until all callbacks have been received
Thread.sleep(500);
int expectCreateNodes = 1 + firstLevelNr + firstLevelNr * secondLevelNr;
System.out.println("createKey#:" + listener._createKeys.size() + ", changeKey#:"
+ listener._changeKeys.size() + ", deleteKey#:" + listener._deleteKeys.size());
Assert.assertEquals(expectCreateNodes, listener._createKeys.size(),
"Should receive " + expectCreateNodes + " create callbacks");
// test dataChange callbacks
listener.reset();
setNodes(store, 'b', true);
// wait until all callbacks have been received
Thread.sleep(500);
int expectChangeNodes = firstLevelNr * secondLevelNr;
System.out.println("createKey#:" + listener._createKeys.size() + ", changeKey#:"
+ listener._changeKeys.size() + ", deleteKey#:" + listener._deleteKeys.size());
Assert.assertTrue(listener._changeKeys.size() >= expectChangeNodes,
"Should receive at least " + expectChangeNodes + " change callbacks");
// test delete callbacks
listener.reset();
int expectDeleteNodes = 1 + firstLevelNr + firstLevelNr * secondLevelNr;
store.remove("/", 0);
// wait until all callbacks have been received
for (int i = 0; i < 10; i++) {
if (listener._deleteKeys.size() == expectDeleteNodes)
break;
Thread.sleep(500);
}
System.out.println("createKey#:" + listener._createKeys.size() + ", changeKey#:"
+ listener._changeKeys.size() + ", deleteKey#:" + listener._deleteKeys.size());
Assert.assertEquals(expectDeleteNodes, listener._deleteKeys.size(),
"Should receive " + expectDeleteNodes + " delete callbacks");
store.stop();
System.out
.println("END testLocalTriggeredCallback() at " + new Date(System.currentTimeMillis()));
}
@Test
public void testZkTriggeredCallback() throws Exception {
System.out
.println("START testZkTriggeredCallback() at " + new Date(System.currentTimeMillis()));
String subRoot = _root + "/" + "zkCallback";
List<String> subscribedPaths = Collections.singletonList(subRoot);
ZkHelixPropertyStore<ZNRecord> store =
new ZkHelixPropertyStore<>(new ZkBaseDataAccessor<>(_gZkClient), subRoot, subscribedPaths);
// change nodes via property store interface
// and verify all notifications have been received
TestListener listener = new TestListener();
store.subscribe("/", listener);
// test create callbacks
listener.reset();
setNodes(_gZkClient, subRoot, 'a', true);
int expectCreateNodes = 1 + firstLevelNr + firstLevelNr * secondLevelNr;
Thread.sleep(500);
System.out.println("createKey#:" + listener._createKeys.size() + ", changeKey#:"
+ listener._changeKeys.size() + ", deleteKey#:" + listener._deleteKeys.size());
Assert.assertEquals(expectCreateNodes, listener._createKeys.size(),
"Should receive " + expectCreateNodes + " create callbacks");
// test change callbacks
listener.reset();
setNodes(_gZkClient, subRoot, 'b', true);
int expectChangeNodes = firstLevelNr * secondLevelNr;
for (int i = 0; i < 10; i++) {
if (listener._changeKeys.size() >= expectChangeNodes)
break;
Thread.sleep(500);
}
System.out.println("createKey#:" + listener._createKeys.size() + ", changeKey#:"
+ listener._changeKeys.size() + ", deleteKey#:" + listener._deleteKeys.size());
Assert.assertTrue(listener._changeKeys.size() >= expectChangeNodes,
"Should receive at least " + expectChangeNodes + " change callbacks");
// test delete callbacks
listener.reset();
int expectDeleteNodes = 1 + firstLevelNr + firstLevelNr * secondLevelNr;
_gZkClient.deleteRecursively(subRoot);
Thread.sleep(1000);
System.out.println("createKey#:" + listener._createKeys.size() + ", changeKey#:"
+ listener._changeKeys.size() + ", deleteKey#:" + listener._deleteKeys.size());
Assert.assertEquals(expectDeleteNodes, listener._deleteKeys.size(),
"Should receive " + expectDeleteNodes + " delete callbacks");
store.stop();
System.out.println("END testZkTriggeredCallback() at " + new Date(System.currentTimeMillis()));
}
@Test
public void testBackToBackRemoveAndSet() throws Exception {
System.out
.println("START testBackToBackRemoveAndSet() at " + new Date(System.currentTimeMillis()));
String subRoot = _root + "/" + "backToBackRemoveAndSet";
List<String> subscribedPaths = new ArrayList<>();
subscribedPaths.add(subRoot);
ZkHelixPropertyStore<ZNRecord> store =
new ZkHelixPropertyStore<>(new ZkBaseDataAccessor<>(_gZkClient), subRoot, subscribedPaths);
store.set("/child0", new ZNRecord("child0"), AccessOption.PERSISTENT);
ZNRecord record = store.get("/child0", null, 0); // will put the record in cache
Assert.assertEquals(record.getId(), "child0");
// System.out.println("1:get:" + record);
String child0Path = subRoot + "/child0";
for (int i = 0; i < 2; i++) {
_gZkClient.delete(child0Path);
_gZkClient.createPersistent(child0Path, new ZNRecord("child0-new-" + i));
}
Thread.sleep(500); // should wait for zk callback to add "/child0" into cache
record = store.get("/child0", null, 0);
Assert.assertEquals(record.getId(), "child0-new-1",
"Cache shoulde be updated to latest create");
// System.out.println("2:get:" + record);
_gZkClient.delete(child0Path);
Thread.sleep(500); // should wait for zk callback to remove "/child0" from cache
try {
record = store.get("/child0", null, AccessOption.THROW_EXCEPTION_IFNOTEXIST);
Assert.fail("/child0 should have been removed");
} catch (ZkNoNodeException e) {
// OK.
}
// System.out.println("3:get:" + record);
store.stop();
System.out
.println("END testBackToBackRemoveAndSet() at " + new Date(System.currentTimeMillis()));
}
private String getNodeId(int i, int j) {
return "childNode_" + i + "_" + j;
}
private String getSecondLevelKey(int i, int j) {
return "/node_" + i + "/" + getNodeId(i, j);
}
private String getFirstLevelKey(int i) {
return "/node_" + i;
}
private void setNodes(ZkHelixPropertyStore<ZNRecord> store, char c, boolean needTimestamp) {
char[] data = new char[bufSize];
for (int i = 0; i < bufSize; i++) {
data[i] = c;
}
Map<String, String> map = new TreeMap<>();
for (int i = 0; i < mapNr; i++) {
map.put("key_" + i, new String(data));
}
for (int i = 0; i < firstLevelNr; i++) {
for (int j = 0; j < secondLevelNr; j++) {
String nodeId = getNodeId(i, j);
ZNRecord record = new ZNRecord(nodeId);
record.setSimpleFields(map);
if (needTimestamp) {
long now = System.currentTimeMillis();
record.setSimpleField("SetTimestamp", Long.toString(now));
}
String key = getSecondLevelKey(i, j);
store.set(key, record, AccessOption.PERSISTENT);
}
}
}
private void setNodes(HelixZkClient zkClient, String root, char c, boolean needTimestamp) {
char[] data = new char[bufSize];
for (int i = 0; i < bufSize; i++) {
data[i] = c;
}
Map<String, String> map = new TreeMap<>();
for (int i = 0; i < mapNr; i++) {
map.put("key_" + i, new String(data));
}
for (int i = 0; i < firstLevelNr; i++) {
String firstLevelKey = getFirstLevelKey(i);
for (int j = 0; j < secondLevelNr; j++) {
String nodeId = getNodeId(i, j);
ZNRecord record = new ZNRecord(nodeId);
record.setSimpleFields(map);
if (needTimestamp) {
long now = System.currentTimeMillis();
record.setSimpleField("SetTimestamp", Long.toString(now));
}
String key = getSecondLevelKey(i, j);
try {
zkClient.writeData(root + key, record);
} catch (ZkNoNodeException e) {
zkClient.createPersistent(root + firstLevelKey, true);
zkClient.createPersistent(root + key, record);
}
}
}
}
@Test
public void testZkClientMonitor() throws JMException {
final String TEST_ROOT = "/test_root";
ZkHelixPropertyStore<ZNRecord> store =
new ZkHelixPropertyStore<>(ZK_ADDR, new SerializableSerializer(), TEST_ROOT);
ObjectName name = MBeanRegistrar.buildObjectName(MonitorDomainNames.HelixZkClient.name(),
ZkClientMonitor.MONITOR_TYPE, ZkHelixPropertyStore.MONITOR_TYPE,
ZkClientMonitor.MONITOR_KEY, TEST_ROOT, ZkClientPathMonitor.MONITOR_PATH, "Root");
MBeanServer beanServer = ManagementFactory.getPlatformMBeanServer();
Assert.assertTrue(beanServer.isRegistered(name));
store.getStat("/", AccessOption.PERSISTENT);
Assert.assertEquals((long) beanServer.getAttribute(name, "ReadCounter"), 1);
}
}
| 9,902 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/store | Create_ds/helix/helix-core/src/test/java/org/apache/helix/store/zk/TestAutoFallbackPropertyStore.java | package org.apache.helix.store.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import org.apache.helix.AccessOption;
import org.apache.helix.PropertyType;
import org.apache.helix.TestHelper;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.zookeeper.zkclient.DataUpdater;
import org.apache.zookeeper.data.Stat;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestAutoFallbackPropertyStore extends ZkUnitTestBase {
class MyDataUpdater implements DataUpdater<ZNRecord> {
final String _id;
MyDataUpdater(String id) {
_id = id;
}
@Override
public ZNRecord update(ZNRecord currentData) {
if (currentData == null) {
currentData = new ZNRecord(_id);
} else {
currentData.setSimpleField("key", "value");
}
return currentData;
}
}
@Test
public void testSingleUpdateUsingFallbackPath() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
String root = String.format("/%s/%s", clusterName, PropertyType.PROPERTYSTORE.name());
String fallbackRoot = String.format("/%s/%s", clusterName, "HELIX_PROPERTYSTORE");
ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
// create 0 under fallbackRoot
for (int i = 0; i < 1; i++) {
String path = String.format("%s/%d", fallbackRoot, i);
baseAccessor.create(path, new ZNRecord(Integer.toString(i)), AccessOption.PERSISTENT);
}
AutoFallbackPropertyStore<ZNRecord> store =
new AutoFallbackPropertyStore<>(baseAccessor, root, fallbackRoot);
String path = String.format("/%d", 0);
Assert.assertFalse(baseAccessor.exists(String.format("%s%s", root, path), 0),
"Should not exist under new location");
Assert.assertTrue(baseAccessor.exists(String.format("%s%s", fallbackRoot, path), 0),
"Should exist under fallback location");
boolean succeed = store.update(path, new MyDataUpdater("new0"), AccessOption.PERSISTENT);
Assert.assertTrue(succeed);
// fallback path should remain unchanged
ZNRecord record = baseAccessor.get(String.format("%s%s", fallbackRoot, path), null, 0);
Assert.assertNotNull(record);
Assert.assertEquals(record.getId(), "0");
Assert.assertNull(record.getSimpleField("key"));
// new path should have simple field set
record = baseAccessor.get(String.format("%s%s", root, path), null, 0);
Assert.assertNotNull(record);
Assert.assertEquals(record.getId(), "0");
Assert.assertNotNull(record.getSimpleField("key"));
Assert.assertEquals(record.getSimpleField("key"), "value");
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testSingleUpdateUsingNewPath() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
String root = String.format("/%s/%s", clusterName, PropertyType.PROPERTYSTORE.name());
String fallbackRoot = String.format("/%s/%s", clusterName, "HELIX_PROPERTYSTORE");
ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
// create 0 under both fallbackRoot and root
for (int i = 0; i < 1; i++) {
String path = String.format("%s/%d", root, i);
baseAccessor.create(path, new ZNRecord("new" + i), AccessOption.PERSISTENT);
path = String.format("%s/%d", fallbackRoot, i);
baseAccessor.create(path, new ZNRecord(Integer.toString(i)), AccessOption.PERSISTENT);
}
AutoFallbackPropertyStore<ZNRecord> store =
new AutoFallbackPropertyStore<>(baseAccessor, root, fallbackRoot);
String path = String.format("/%d", 0);
Assert.assertTrue(baseAccessor.exists(String.format("%s%s", root, path), 0),
"Should exist under new location");
Assert.assertTrue(baseAccessor.exists(String.format("%s%s", fallbackRoot, path), 0),
"Should exist under fallback location");
boolean succeed = store.update(path, new MyDataUpdater("0"), AccessOption.PERSISTENT);
Assert.assertTrue(succeed);
ZNRecord record = baseAccessor.get(String.format("%s%s", fallbackRoot, path), null, 0);
Assert.assertNotNull(record);
Assert.assertEquals(record.getId(), "0");
Assert.assertNull(record.getSimpleField("key"));
record = baseAccessor.get(String.format("%s%s", root, path), null, 0);
Assert.assertNotNull(record);
Assert.assertEquals(record.getId(), "new0");
Assert.assertNotNull(record.getSimpleField("key"));
Assert.assertEquals(record.getSimpleField("key"), "value");
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testMultiUpdateUsingFallbackPath() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
String root = String.format("/%s/%s", clusterName, PropertyType.PROPERTYSTORE.name());
String fallbackRoot = String.format("/%s/%s", clusterName, "HELIX_PROPERTYSTORE");
ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
// create 0-9 under fallbackRoot
for (int i = 0; i < 10; i++) {
String path = String.format("%s/%d", fallbackRoot, i);
baseAccessor.create(path, new ZNRecord(Integer.toString(i)), AccessOption.PERSISTENT);
}
AutoFallbackPropertyStore<ZNRecord> store =
new AutoFallbackPropertyStore<>(baseAccessor, root, fallbackRoot);
List<String> paths = new ArrayList<>();
List<DataUpdater<ZNRecord>> updaters = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String path = String.format("/%d", i);
Assert.assertFalse(baseAccessor.exists(String.format("%s%s", root, path), 0),
"Should not exist under new location");
Assert.assertTrue(baseAccessor.exists(String.format("%s%s", fallbackRoot, path), 0),
"Should exist under fallback location");
paths.add(path);
updaters.add(new MyDataUpdater("new" + i));
}
boolean[] succeed = store.updateChildren(paths, updaters, AccessOption.PERSISTENT);
for (int i = 0; i < 10; i++) {
Assert.assertTrue(succeed[i]);
String path = paths.get(i);
// fallback path should remain unchanged
ZNRecord record = baseAccessor.get(String.format("%s%s", fallbackRoot, path), null, 0);
Assert.assertNotNull(record);
Assert.assertEquals(record.getId(), "" + i);
Assert.assertNull(record.getSimpleField("key"));
// new path should have simple field set
record = baseAccessor.get(String.format("%s%s", root, path), null, 0);
Assert.assertNotNull(record);
Assert.assertEquals(record.getId(), "" + i);
Assert.assertNotNull(record.getSimpleField("key"));
Assert.assertEquals(record.getSimpleField("key"), "value");
}
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testMultiUpdateUsingNewath() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
String root = String.format("/%s/%s", clusterName, PropertyType.PROPERTYSTORE.name());
String fallbackRoot = String.format("/%s/%s", clusterName, "HELIX_PROPERTYSTORE");
ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
// create 0-9 under both fallbackRoot and new root
for (int i = 0; i < 10; i++) {
String path = String.format("%s/%d", fallbackRoot, i);
baseAccessor.create(path, new ZNRecord(Integer.toString(i)), AccessOption.PERSISTENT);
path = String.format("%s/%d", root, i);
baseAccessor.create(path, new ZNRecord("new" + i), AccessOption.PERSISTENT);
}
AutoFallbackPropertyStore<ZNRecord> store =
new AutoFallbackPropertyStore<>(baseAccessor, root, fallbackRoot);
List<String> paths = new ArrayList<>();
List<DataUpdater<ZNRecord>> updaters = new ArrayList<>();
for (int i = 0; i < 20; i++) {
String path = String.format("/%d", i);
if (i < 10) {
Assert.assertTrue(baseAccessor.exists(String.format("%s%s", root, path), 0),
"Should exist under new location");
Assert.assertTrue(baseAccessor.exists(String.format("%s%s", fallbackRoot, path), 0),
"Should exist under fallback location");
} else {
Assert.assertFalse(baseAccessor.exists(String.format("%s%s", root, path), 0),
"Should not exist under new location");
Assert.assertFalse(baseAccessor.exists(String.format("%s%s", fallbackRoot, path), 0),
"Should not exist under fallback location");
}
paths.add(path);
updaters.add(new MyDataUpdater("new" + i));
}
boolean[] succeed = store.updateChildren(paths, updaters, AccessOption.PERSISTENT);
for (int i = 0; i < 10; i++) {
Assert.assertTrue(succeed[i]);
String path = paths.get(i);
// fallback path should remain unchanged
{
ZNRecord record = baseAccessor.get(String.format("%s%s", fallbackRoot, path), null, 0);
Assert.assertNotNull(record);
Assert.assertEquals(record.getId(), "" + i);
Assert.assertNull(record.getSimpleField("key"));
}
// new path should have simple field set
ZNRecord record = baseAccessor.get(String.format("%s%s", root, path), null, 0);
Assert.assertNotNull(record);
Assert.assertEquals(record.getId(), "new" + i);
Assert.assertNotNull(record.getSimpleField("key"));
Assert.assertEquals(record.getSimpleField("key"), "value");
}
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testSingleSet() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
String root = String.format("/%s/%s", clusterName, PropertyType.PROPERTYSTORE.name());
String fallbackRoot = String.format("/%s/%s", clusterName, "HELIX_PROPERTYSTORE");
ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
// create 0 under fallbackRoot
for (int i = 0; i < 1; i++) {
String path = String.format("%s/%d", fallbackRoot, i);
baseAccessor.create(path, new ZNRecord(Integer.toString(i)), AccessOption.PERSISTENT);
}
AutoFallbackPropertyStore<ZNRecord> store =
new AutoFallbackPropertyStore<>(baseAccessor, root, fallbackRoot);
String path = String.format("/%d", 0);
Assert.assertFalse(baseAccessor.exists(String.format("%s%s", root, path), 0),
"Should not exist under new location");
Assert.assertTrue(baseAccessor.exists(String.format("%s%s", fallbackRoot, path), 0),
"Should exist under fallback location");
ZNRecord record = new ZNRecord("new0");
boolean succeed = store.set(path, record, AccessOption.PERSISTENT);
Assert.assertTrue(succeed);
record = baseAccessor.get(String.format("%s%s", fallbackRoot, path), null, 0);
Assert.assertNotNull(record);
Assert.assertEquals(record.getId(), "0");
record = baseAccessor.get(String.format("%s%s", root, path), null, 0);
Assert.assertNotNull(record);
Assert.assertEquals(record.getId(), "new0");
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testMultiSet() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
String root = String.format("/%s/%s", clusterName, PropertyType.PROPERTYSTORE.name());
String fallbackRoot = String.format("/%s/%s", clusterName, "HELIX_PROPERTYSTORE");
ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
// create 0-9 under fallbackRoot
for (int i = 0; i < 10; i++) {
String path = String.format("%s/%d", fallbackRoot, i);
baseAccessor.create(path, new ZNRecord(Integer.toString(i)), AccessOption.PERSISTENT);
}
AutoFallbackPropertyStore<ZNRecord> store =
new AutoFallbackPropertyStore<>(baseAccessor, root, fallbackRoot);
List<String> paths = new ArrayList<>();
List<ZNRecord> records = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String path = String.format("/%d", i);
Assert.assertFalse(baseAccessor.exists(String.format("%s%s", root, path), 0),
"Should not exist under new location");
Assert.assertTrue(baseAccessor.exists(String.format("%s%s", fallbackRoot, path), 0),
"Should exist under fallback location");
paths.add(path);
ZNRecord record = new ZNRecord("new" + i);
records.add(record);
}
boolean[] succeed = store.setChildren(paths, records, AccessOption.PERSISTENT);
for (int i = 0; i < 10; i++) {
Assert.assertTrue(succeed[i]);
String path = String.format("/%d", i);
ZNRecord record = baseAccessor.get(String.format("%s%s", fallbackRoot, path), null, 0);
Assert.assertNotNull(record);
Assert.assertEquals(record.getId(), Integer.toString(i));
record = baseAccessor.get(String.format("%s%s", root, path), null, 0);
Assert.assertNotNull(record);
Assert.assertEquals(record.getId(), "new" + i);
}
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testSingleGetOnFallbackPath() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
String root = String.format("/%s/%s", clusterName, PropertyType.PROPERTYSTORE.name());
String fallbackRoot = String.format("/%s/%s", clusterName, "HELIX_PROPERTYSTORE");
ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
// create 0 under fallbackRoot
for (int i = 0; i < 1; i++) {
String path = String.format("%s/%d", fallbackRoot, i);
baseAccessor.create(path, new ZNRecord(Integer.toString(i)), AccessOption.PERSISTENT);
}
AutoFallbackPropertyStore<ZNRecord> store =
new AutoFallbackPropertyStore<>(baseAccessor, root, fallbackRoot);
String path = String.format("/%d", 0);
Assert.assertFalse(baseAccessor.exists(String.format("%s%s", root, path), 0),
"Should not exist under new location");
Assert.assertTrue(baseAccessor.exists(String.format("%s%s", fallbackRoot, path), 0),
"Should exist under fallback location");
// test single exist
boolean exist = store.exists(path, 0);
Assert.assertTrue(exist);
// test single getStat
Stat stat = store.getStat(path, 0);
Assert.assertNotNull(stat);
// test single get
ZNRecord record = store.get(path, null, 0);
Assert.assertNotNull(record);
Assert.assertEquals(record.getId(), "0");
Assert.assertFalse(baseAccessor.exists(String.format("%s%s", root, path), 0),
"Should not exist under new location after get");
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
void testMultiGetOnFallbackPath() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
String root = String.format("/%s/%s", clusterName, PropertyType.PROPERTYSTORE.name());
String fallbackRoot = String.format("/%s/%s", clusterName, "HELIX_PROPERTYSTORE");
ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
// create 0-9 under fallbackRoot
for (int i = 0; i < 10; i++) {
String path = String.format("%s/%d", fallbackRoot, i);
baseAccessor.create(path, new ZNRecord(Integer.toString(i)), AccessOption.PERSISTENT);
}
AutoFallbackPropertyStore<ZNRecord> store =
new AutoFallbackPropertyStore<>(baseAccessor, root, fallbackRoot);
List<String> paths = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String path = String.format("/%d", i);
Assert.assertFalse(baseAccessor.exists(String.format("%s%s", root, path), 0),
"Should not exist under new location");
Assert.assertTrue(baseAccessor.exists(String.format("%s%s", fallbackRoot, path), 0),
"Should exist under fallback location");
paths.add(path);
}
// test multi-exist
boolean[] exists = store.exists(paths, 0);
for (int i = 0; i < paths.size(); i++) {
Assert.assertTrue(exists[i]);
}
// test multi-getStat
Stat[] stats = store.getStats(paths, 0);
for (int i = 0; i < paths.size(); i++) {
Assert.assertNotNull(stats[i]);
}
// test multi-get
List<ZNRecord> records = store.get(paths, null, 0, true);
Assert.assertNotNull(records);
Assert.assertEquals(records.size(), 10);
for (int i = 0; i < 10; i++) {
ZNRecord record = records.get(i);
String path = paths.get(i);
Assert.assertNotNull(record);
Assert.assertEquals(record.getId(), Integer.toString(i));
Assert.assertFalse(baseAccessor.exists(String.format("%s%s", root, path), 0),
"Should not exist under new location after get");
}
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testFailOnSingleGet() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
String root = String.format("/%s/%s", clusterName, PropertyType.PROPERTYSTORE.name());
String fallbackRoot = String.format("/%s/%s", clusterName, "HELIX_PROPERTYSTORE");
ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
AutoFallbackPropertyStore<ZNRecord> store =
new AutoFallbackPropertyStore<>(baseAccessor, root, fallbackRoot);
String path = String.format("/%d", 0);
Assert.assertFalse(baseAccessor.exists(String.format("%s%s", root, path), 0),
"Should not exist under new location");
Assert.assertFalse(baseAccessor.exists(String.format("%s%s", fallbackRoot, path), 0),
"Should not exist under fallback location");
// test single exist
boolean exist = store.exists(path, 0);
Assert.assertFalse(exist);
// test single getStat
Stat stat = store.getStat(path, 0);
Assert.assertNull(stat);
// test single get
ZNRecord record = store.get(path, null, 0);
Assert.assertNull(record);
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testFailOnMultiGet() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
String root = String.format("/%s/%s", clusterName, PropertyType.PROPERTYSTORE.name());
String fallbackRoot = String.format("/%s/%s", clusterName, "HELIX_PROPERTYSTORE");
ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
// create 0-9 under fallbackRoot
for (int i = 0; i < 10; i++) {
String path = String.format("%s/%d", fallbackRoot, i);
baseAccessor.create(path, new ZNRecord(Integer.toString(i)), AccessOption.PERSISTENT);
}
AutoFallbackPropertyStore<ZNRecord> store =
new AutoFallbackPropertyStore<>(baseAccessor, root, fallbackRoot);
List<String> paths = new ArrayList<>();
for (int i = 0; i < 20; i++) {
String path = String.format("/%d", i);
Assert.assertFalse(baseAccessor.exists(String.format("%s%s", root, path), 0),
"Should not exist under new location");
if (i < 10) {
Assert.assertTrue(baseAccessor.exists(String.format("%s%s", fallbackRoot, path), 0),
"Should exist under fallback location");
} else {
Assert.assertFalse(baseAccessor.exists(String.format("%s%s", fallbackRoot, path), 0),
"Should not exist under fallback location");
}
paths.add(path);
}
// test multi-exist
boolean[] exists = store.exists(paths, 0);
for (int i = 0; i < paths.size(); i++) {
if (i < 10) {
Assert.assertTrue(exists[i]);
} else {
Assert.assertFalse(exists[i]);
}
}
// test multi-getStat
Stat[] stats = store.getStats(paths, 0);
for (int i = 0; i < paths.size(); i++) {
if (i < 10) {
Assert.assertNotNull(stats[i]);
} else {
Assert.assertNull(stats[i]);
}
}
// test multi-get
List<ZNRecord> records = store.get(paths, null, 0, true);
Assert.assertNotNull(records);
Assert.assertEquals(records.size(), 20);
for (int i = 0; i < 20; i++) {
ZNRecord record = records.get(i);
String path = paths.get(i);
if (i < 10) {
Assert.assertNotNull(record);
Assert.assertEquals(record.getId(), Integer.toString(i));
} else {
Assert.assertNull(record);
}
Assert.assertFalse(baseAccessor.exists(String.format("%s%s", root, path), 0),
"Should not exist under new location after get");
}
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
@Test
public void testGetChildren() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
String root = String.format("/%s/%s", clusterName, PropertyType.PROPERTYSTORE.name());
String fallbackRoot = String.format("/%s/%s", clusterName, "HELIX_PROPERTYSTORE");
ZkBaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<>(_gZkClient);
// create 0-9 under fallbackRoot and 10-19 under root
for (int i = 0; i < 20; i++) {
if (i < 10) {
String path = String.format("%s/%d", fallbackRoot, i);
baseAccessor.create(path, new ZNRecord(Integer.toString(i)), AccessOption.PERSISTENT);
} else {
String path = String.format("%s/%d", root, i);
baseAccessor.create(path, new ZNRecord(Integer.toString(i)), AccessOption.PERSISTENT);
}
}
AutoFallbackPropertyStore<ZNRecord> store =
new AutoFallbackPropertyStore<>(baseAccessor, root, fallbackRoot);
List<String> paths = new ArrayList<>();
for (int i = 0; i < 20; i++) {
String path = String.format("/%d", i);
if (i < 10) {
Assert.assertTrue(baseAccessor.exists(String.format("%s%s", fallbackRoot, path), 0),
"Should exist under fallback location");
Assert.assertFalse(baseAccessor.exists(String.format("%s%s", root, path), 0),
"Should not exist under new location");
} else {
Assert.assertFalse(baseAccessor.exists(String.format("%s%s", fallbackRoot, path), 0),
"Should not exist under fallback location");
Assert.assertTrue(baseAccessor.exists(String.format("%s%s", root, path), 0),
"Should exist under new location");
}
paths.add(path);
}
List<String> childs = store.getChildNames("/", 0);
Assert.assertNotNull(childs);
Assert.assertEquals(childs.size(), 20);
for (int i = 0; i < 20; i++) {
String child = childs.get(i);
Assert.assertTrue(childs.contains(child));
}
List<ZNRecord> records = store.getChildren("/", null, 0, 0, 0);
Assert.assertNotNull(records);
Assert.assertEquals(records.size(), 20);
for (int i = 0; i < 20; i++) {
ZNRecord record = records.get(i);
Assert.assertNotNull(record);
String id = record.getId();
Assert.assertTrue(childs.contains(id));
}
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
}
| 9,903 |
0 | Create_ds/helix/helix-core/src/test/java/org/apache/helix/store | Create_ds/helix/helix-core/src/test/java/org/apache/helix/store/zk/TestZkManagerWithAutoFallbackStore.java | package org.apache.helix.store.zk;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Date;
import org.apache.helix.AccessOption;
import org.apache.helix.BaseDataAccessor;
import org.apache.helix.TestHelper;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.ZkUnitTestBase;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.testng.Assert;
import org.testng.annotations.Test;
public class TestZkManagerWithAutoFallbackStore extends ZkUnitTestBase {
@Test
public void testBasic() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
int n = 2;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
"localhost", // participant name prefix
"TestDB", // resource name prefix
1, // resources
32, // partitions per resource
n, // number of nodes
2, // replicas
"MasterSlave", false); // do rebalance
// start participants
MockParticipantManager[] participants = new MockParticipantManager[n];
for (int i = 0; i < 1; i++) {
String instanceName = "localhost_" + (12918 + i);
participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participants[i].syncStart();
}
// add some data to fallback path: HELIX_PROPERTYSTORE
BaseDataAccessor<ZNRecord> accessor =
participants[0].getHelixDataAccessor().getBaseDataAccessor();
for (int i = 0; i < 10; i++) {
String path = String.format("/%s/HELIX_PROPERTYSTORE/%d", clusterName, i);
ZNRecord record = new ZNRecord("" + i);
record.setSimpleField("key1", "value1");
accessor.set(path, record, AccessOption.PERSISTENT);
}
ZkHelixPropertyStore<ZNRecord> store = participants[0].getHelixPropertyStore();
// read shall use fallback paths
for (int i = 0; i < 10; i++) {
String path = String.format("/%d", i);
ZNRecord record = store.get(path, null, 0);
Assert.assertNotNull(record);
Assert.assertEquals(record.getId(), "" + i);
Assert.assertNotNull(record.getSimpleField("key1"));
Assert.assertEquals(record.getSimpleField("key1"), "value1");
}
// update shall update new paths
for (int i = 0; i < 10; i++) {
String path = String.format("/%d", i);
store.update(path, currentData -> {
if (currentData != null) {
currentData.setSimpleField("key2", "value2");
}
return currentData;
}, AccessOption.PERSISTENT);
}
for (int i = 0; i < 10; i++) {
String path = String.format("/%d", i);
ZNRecord record = store.get(path, null, 0);
Assert.assertNotNull(record);
Assert.assertEquals(record.getId(), "" + i);
Assert.assertNotNull(record.getSimpleField("key1"));
Assert.assertEquals(record.getSimpleField("key1"), "value1");
Assert.assertNotNull(record.getSimpleField("key2"));
Assert.assertEquals(record.getSimpleField("key2"), "value2");
}
// set shall use new path
for (int i = 10; i < 20; i++) {
String path = String.format("/%d", i);
ZNRecord record = new ZNRecord("" + i);
record.setSimpleField("key3", "value3");
store.set(path, record, AccessOption.PERSISTENT);
}
for (int i = 10; i < 20; i++) {
String path = String.format("/%d", i);
ZNRecord record = store.get(path, null, 0);
Assert.assertNotNull(record);
Assert.assertEquals(record.getId(), "" + i);
Assert.assertNotNull(record.getSimpleField("key3"));
Assert.assertEquals(record.getSimpleField("key3"), "value3");
}
participants[0].syncStop();
deleteCluster(clusterName);
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
}
| 9,904 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/BucketDataAccessor.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
public interface BucketDataAccessor {
/**
* Write a HelixProperty in buckets, compressed.
* @param path path to which the metadata will be written to
* @param value HelixProperty to write
* @param <T>
* @throws IOException
*/
<T extends HelixProperty> boolean compressedBucketWrite(String path, T value) throws IOException;
/**
* Read a HelixProperty that was written in buckets, compressed.
* @param path
* @param helixPropertySubType the subtype of HelixProperty the data was written in
* @param <T>
*/
<T extends HelixProperty> HelixProperty compressedBucketRead(String path,
Class<T> helixPropertySubType);
/**
* Delete the HelixProperty in the given path.
* @param path
*/
void compressedBucketDelete(String path);
/**
* Close the connection to the metadata store.
*/
void disconnect();
}
| 9,905 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/HelixManager.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import java.util.Optional;
import java.util.Set;
import org.apache.helix.api.listeners.ClusterConfigChangeListener;
import org.apache.helix.api.listeners.ConfigChangeListener;
import org.apache.helix.api.listeners.ControllerChangeListener;
import org.apache.helix.api.listeners.CurrentStateChangeListener;
import org.apache.helix.api.listeners.CustomizedStateChangeListener;
import org.apache.helix.api.listeners.CustomizedStateConfigChangeListener;
import org.apache.helix.api.listeners.CustomizedStateRootChangeListener;
import org.apache.helix.api.listeners.CustomizedViewChangeListener;
import org.apache.helix.api.listeners.CustomizedViewRootChangeListener;
import org.apache.helix.api.listeners.ExternalViewChangeListener;
import org.apache.helix.api.listeners.IdealStateChangeListener;
import org.apache.helix.api.listeners.InstanceConfigChangeListener;
import org.apache.helix.api.listeners.LiveInstanceChangeListener;
import org.apache.helix.api.listeners.MessageListener;
import org.apache.helix.api.listeners.ResourceConfigChangeListener;
import org.apache.helix.api.listeners.ScopedConfigChangeListener;
import org.apache.helix.controller.GenericHelixController;
import org.apache.helix.controller.pipeline.Pipeline;
import org.apache.helix.healthcheck.ParticipantHealthReportCollector;
import org.apache.helix.manager.zk.ZKHelixManager;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.CustomizedStateConfig;
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty;
import org.apache.helix.participant.HelixStateMachineEngine;
import org.apache.helix.participant.StateMachineEngine;
import org.apache.helix.spectator.RoutingTableProvider;
import org.apache.helix.store.zk.ZkHelixPropertyStore;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
/**
* Class that represents the Helix Agent.
* First class Object any process will interact with<br/>
* General flow <blockquote>
*
* <pre>
* manager = HelixManagerFactory.getZKHelixManager(
* clusterName, instanceName, ROLE, zkAddr);
* // ROLE can be participant, spectator or a controller<br/>
* manager.addPreConnectCallback(cb);
* // cb is invoked after a connection is established, but before the node of type ROLE has
* // joined the cluster. This is where one can add additional listeners (e.g manager.addSOMEListener(listener);)
* manager.connect();
* After connect is invoked the subsequent interactions will be via listener onChange callbacks
* There will be 3 scenarios for onChange callback, which can be determined using NotificationContext.type
* INIT -> will be invoked the first time the listener is added
* CALLBACK -> will be invoked due to datachange in the property value
* FINALIZE -> will be invoked when listener is removed or session expires
* manager.disconnect()
* </pre>
*
* </blockquote> Default implementations available
* @see HelixStateMachineEngine HelixStateMachineEngine for participant
* @see RoutingTableProvider RoutingTableProvider for spectator
* @see GenericHelixController RoutingTableProvider for controller
*/
public interface HelixManager {
@Deprecated
String ALLOW_PARTICIPANT_AUTO_JOIN =
ZKHelixManager.ALLOW_PARTICIPANT_AUTO_JOIN;
/**
* Start participating in the cluster operations. All listeners will be
* initialized and will be notified for every cluster state change This method
* is not re-entrant. One cannot call this method twice.
* @throws Exception
*/
void connect() throws Exception;
/**
* Check if the connection is alive, code depending on cluster manager must
* always do this if( manager.isConnected()){ //custom code } This will
* prevent client in doing anything when its disconnected from the cluster.
* There is no need to invoke connect again if isConnected return false.
* @return true if connected, false otherwise
*/
boolean isConnected();
/**
* Disconnect from the cluster. All the listeners will be removed and
* disconnected from the server. Its important for the client to ensure that
* new manager instance is used when it wants to connect again.
*/
void disconnect();
/**
* @see IdealStateChangeListener#onIdealStateChange(List, NotificationContext)
* @param listener
* @throws Exception
*/
void addIdealStateChangeListener(IdealStateChangeListener listener) throws Exception;
/**
* @see IdealStateChangeListener#onIdealStateChange(List, NotificationContext)
* @param listener
* @throws Exception
*/
@Deprecated
void addIdealStateChangeListener(org.apache.helix.IdealStateChangeListener listener) throws Exception;
/**
* @see LiveInstanceChangeListener#onLiveInstanceChange(List, NotificationContext)
* @param listener
*/
void addLiveInstanceChangeListener(LiveInstanceChangeListener listener) throws Exception;
/**
* @see LiveInstanceChangeListener#onLiveInstanceChange(List, NotificationContext)
* @param listener
*/
@Deprecated
void addLiveInstanceChangeListener(org.apache.helix.LiveInstanceChangeListener listener) throws Exception;
/**
* @see ConfigChangeListener#onConfigChange(List, NotificationContext)
* @param listener
* @deprecated replaced by addInstanceConfigChangeListener()
*/
@Deprecated
void addConfigChangeListener(ConfigChangeListener listener) throws Exception;
/**
* @see InstanceConfigChangeListener#onInstanceConfigChange(List, NotificationContext)
* @param listener
*/
void addInstanceConfigChangeListener(InstanceConfigChangeListener listener) throws Exception;
/**
* @see InstanceConfigChangeListener#onInstanceConfigChange(List, NotificationContext)
* @param listener
*/
@Deprecated
void addInstanceConfigChangeListener(org.apache.helix.InstanceConfigChangeListener listener) throws Exception;
/**
* @see ResourceConfigChangeListener#onResourceConfigChange(List, NotificationContext)
* @param listener
*/
void addResourceConfigChangeListener(ResourceConfigChangeListener listener) throws Exception;
/**
* @see CustomizedStateConfigChangeListener#onCustomizedStateConfigChange(CustomizedStateConfig,
* NotificationContext)
* @param listener
*/
void addCustomizedStateConfigChangeListener(
CustomizedStateConfigChangeListener listener) throws Exception;
/**
* @see ClusterConfigChangeListener#onClusterConfigChange(ClusterConfig, NotificationContext)
* @param listener
*/
void addClusterfigChangeListener(ClusterConfigChangeListener listener) throws Exception;
/**
* @see ScopedConfigChangeListener#onConfigChange(List, NotificationContext)
* @param listener
* @param scope
*/
void addConfigChangeListener(ScopedConfigChangeListener listener, ConfigScopeProperty scope)
throws Exception;
/**
* @see ScopedConfigChangeListener#onConfigChange(List, NotificationContext)
* @param listener
* @param scope
*/
@Deprecated
void addConfigChangeListener(org.apache.helix.ScopedConfigChangeListener listener, ConfigScopeProperty scope)
throws Exception;
/**
* @see MessageListener#onMessage(String, List, NotificationContext)
* @param listener
* @param instanceName
*/
void addMessageListener(MessageListener listener, String instanceName) throws Exception;
/**
* @see MessageListener#onMessage(String, List, NotificationContext)
* @param listener
* @param instanceName
*/
@Deprecated
void addMessageListener(org.apache.helix.MessageListener listener, String instanceName)
throws Exception;
/**
* @see CurrentStateChangeListener#onStateChange(String, List, NotificationContext)
* @param listener
* @param instanceName
*/
void addCurrentStateChangeListener(CurrentStateChangeListener listener, String instanceName,
String sessionId) throws Exception;
/**
* @see CurrentStateChangeListener#onStateChange(String, List, NotificationContext)
* @param listener
* @param instanceName
*/
@Deprecated
void addCurrentStateChangeListener(org.apache.helix.CurrentStateChangeListener listener, String instanceName,
String sessionId) throws Exception;
/**
* Uses CurrentStateChangeListener since TaskCurrentState shares the same CurrentState model
* @see CurrentStateChangeListener#onStateChange(String, List, NotificationContext)
* @param listener
* @param instanceName
*/
default void addTaskCurrentStateChangeListener(CurrentStateChangeListener listener,
String instanceName, String sessionId) throws Exception {
throw new UnsupportedOperationException("Not implemented");
}
/**
* @see CustomizedStateRootChangeListener#onCustomizedStateRootChange(String, NotificationContext)
* @param listener
* @param instanceName
*/
void addCustomizedStateRootChangeListener(CustomizedStateRootChangeListener listener,
String instanceName) throws Exception;
/**
* @see CustomizedStateChangeListener#onCustomizedStateChange(String, List, NotificationContext)
* @param listener
* @param instanceName
*/
void addCustomizedStateChangeListener(CustomizedStateChangeListener listener, String instanceName,
String stateName) throws Exception;
/**
* @see ExternalViewChangeListener#onExternalViewChange(List, NotificationContext)
* @param listener
*/
void addExternalViewChangeListener(ExternalViewChangeListener listener) throws Exception;
/**
* @see CustomizedViewChangeListener#onCustomizedViewChange(List, NotificationContext)
* @param listener
*/
void addCustomizedViewChangeListener(CustomizedViewChangeListener listener, String customizedStateType) throws Exception;
/**
* @see CustomizedViewRootChangeListener#onCustomizedViewRootChange(List, NotificationContext)
* @param listener
*/
void addCustomizedViewRootChangeListener(CustomizedViewRootChangeListener listener) throws Exception;
/**
* @see ExternalViewChangeListener#onExternalViewChange(List, NotificationContext)
* @param listener
*/
void addTargetExternalViewChangeListener(ExternalViewChangeListener listener) throws Exception;
/**
* @see ExternalViewChangeListener#onExternalViewChange(List, NotificationContext)
* @param listener
*/
@Deprecated
void addExternalViewChangeListener(org.apache.helix.ExternalViewChangeListener listener)
throws Exception;
/**
* Add listener for controller change
* Used in distributed cluster controller
*/
void addControllerListener(ControllerChangeListener listener);
/**
* Add listener for controller change
* Used in distributed cluster controller
*/
@Deprecated
void addControllerListener(org.apache.helix.ControllerChangeListener listener);
/**
* Add message listener for controller
* @param listener
*/
void addControllerMessageListener(MessageListener listener);
/**
* Add message listener for controller
* @param listener
*/
@Deprecated
void addControllerMessageListener(org.apache.helix.MessageListener listener);
/**
* Selectively enable controller pipeline using the given types. This will only take effect
* when called before connect(), and instance type is CONTROLLER
* @param types pipeline types to enable
*/
void setEnabledControlPipelineTypes(Set<Pipeline.Type> types);
/**
* Removes the listener. If the same listener was used for multiple changes,
* all change notifications will be removed.<br/>
* This will invoke onChange method on the listener with
* NotificationContext.type set to FINALIZE. Listener can clean up its state.<br/>
* The data provided in this callback may not be reliable.<br/>
* When a session expires all listeners will be removed and re-added
* automatically. <br/>
* This provides the ability for listeners to either reset their state or do
* any cleanup tasks.<br/>
* @param listener
* @return true if removed successfully, false otherwise
*/
boolean removeListener(PropertyKey key, Object listener);
/**
* Return the client to perform read/write operations on the cluster data
* store
* @return ClusterDataAccessor
*/
HelixDataAccessor getHelixDataAccessor();
/**
* Get config accessor
* @return ConfigAccessor
*/
ConfigAccessor getConfigAccessor();
/**
* Returns the cluster name associated with this cluster manager
* @return the associated cluster name
*/
String getClusterName();
/**
* Returns a string that can be used to connect to metadata store for this HelixManager instance
* i.e. for ZkHelixManager, this will have format "{zookeeper-address}:{port}"
* @return a string used to connect to metadata store
*/
String getMetadataStoreConnectionString();
/**
* Returns the instanceName used to connect to the cluster
* @return the associated instance name
*/
String getInstanceName();
/**
* Get the sessionId associated with the connection to cluster data store.
* @return the session identifier
*/
String getSessionId();
/**
* The time stamp is always updated when a notification is received. This can
* be used to check if there was any new notification when previous
* notification was being processed. This is updated based on the
* notifications from listeners registered.
* @return UNIX timestamp
*/
long getLastNotificationTime();
/**
* Provides admin interface to setup and modify cluster.
* @return instantiated HelixAdmin
*/
HelixAdmin getClusterManagmentTool();
/**
* Get property store
* @return the property store that works with ZNRecord objects
*/
ZkHelixPropertyStore<ZNRecord> getHelixPropertyStore();
/**
* Messaging service which can be used to send cluster wide messages.
* @return messaging service
*/
ClusterMessagingService getMessagingService();
/**
* Get cluster manager instance type
* @return instance type (e.g. PARTICIPANT, CONTROLLER, SPECTATOR)
*/
InstanceType getInstanceType();
/**
* Get cluster manager version
* @return the cluster manager version
*/
String getVersion();
/**
* Get helix manager properties read from
* helix-core/src/main/resources/cluster-manager.properties
* @return deserialized properties
*/
HelixManagerProperties getProperties();
/**
* @return the state machine engine
*/
StateMachineEngine getStateMachineEngine();
/**
* @return the session start time
*/
Long getSessionStartTime();
/**
* Checks whether the cluster manager is leader and returns the session ID associated to the
* connection of cluster data store, if and only if it is leader.
*
* @return {@code Optional<String>} session ID is present inside the {@code Optional} object
* if the cluster manager is leader. Otherwise, returns an empty {@code Optional} object.
*/
default Optional<String> getSessionIdIfLead() {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Check if the cluster manager is the leader
* @return true if this is a controller and a leader of the cluster
*/
boolean isLeader();
/**
* start timer tasks when becomes leader
*/
void startTimerTasks();
/**
* stop timer tasks when becomes standby
*/
void stopTimerTasks();
/**
* Add a callback that is invoked before a participant joins the cluster.</br>
* This zookeeper connection is established at this time and one can read existing cluster
* data</br>
* The purpose of this method is to allow application to have additional logic to validate their
* existing state and check for any errors.
* If the validation fails, throw exception/disable the instance. s
* @see PreConnectCallback#onPreConnect()
* @param callback
*/
void addPreConnectCallback(PreConnectCallback callback);
/**
* Add a LiveInstanceInfoProvider that is invoked before creating liveInstance.</br>
* This allows applications to provide additional metadata that will be published to zk and made
* available for discovery</br>
* @see LiveInstanceInfoProvider#getAdditionalLiveInstanceInfo()
* @param liveInstanceInfoProvider
*/
void setLiveInstanceInfoProvider(LiveInstanceInfoProvider liveInstanceInfoProvider);
/**
* Participant only component that periodically update participant health
* report to cluster manager server.
* @return ParticipantHealthReportCollector
*/
ParticipantHealthReportCollector getHealthReportCollector();
} | 9,906 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/HelixDataAccessor.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import java.util.Map;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.MaintenanceSignal;
import org.apache.helix.model.Message;
import org.apache.helix.model.PauseSignal;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.zookeeper.zkclient.DataUpdater;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
/**
* Interface used to interact with Helix Data Types like IdealState, Config,
* LiveInstance, Message, ExternalView etc PropertyKey represent the HelixData
* type. See {@link PropertyKey.Builder} to get more information on building a propertyKey.
*/
public interface HelixDataAccessor {
boolean createStateModelDef(StateModelDefinition stateModelDef);
boolean createControllerMessage(Message message);
boolean createControllerLeader(LiveInstance leader);
boolean createPause(PauseSignal pauseSignal);
boolean createMaintenance(MaintenanceSignal maintenanceSignal);
/**
* Set a property, overwrite if it exists and creates if not exists. This api
* assumes the node exists and only tries to update it only if the call fail
* it will create the node. So there is a performance cost if always ends up
* creating the node.
* @param key
* @param value
* @true if the operation was successful
*/
<T extends HelixProperty> boolean setProperty(PropertyKey key, T value);
/**
* Updates a property using newvalue.merge(oldvalue)
* @param key
* @param value
* @return true if the update was successful
*/
<T extends HelixProperty> boolean updateProperty(PropertyKey key, T value);
/**
* Updates a property using specified updater
* @param key
* @param updater an update routine for the data to merge in
* @param value
* @return true if the update was successful
*/
<T extends HelixProperty> boolean updateProperty(PropertyKey key, DataUpdater<ZNRecord> updater,
T value);
/**
* Return the property value, it must be refer to a single Helix Property. i.e
* PropertyKey.isLeaf() must return true.
* @param key
* @return value, Null if absent or on error
*/
<T extends HelixProperty> T getProperty(PropertyKey key);
/**
* Return a list of property values, each of which must be refer to a single Helix
* Property. Property may be bucketized.
* @param keys
* @return
*/
@Deprecated
<T extends HelixProperty> List<T> getProperty(List<PropertyKey> keys);
/**
* Return a list of property values, each of which must be refer to a single Helix
* Property. Property may be bucketized.
*
* Value will be null if not does not exist. If the node is failed to read, will throw exception
* when throwException is set to true.
*
* @param keys
* @param throwException
* @return
*/
<T extends HelixProperty> List<T> getProperty(List<PropertyKey> keys, boolean throwException);
/**
* Removes the property
* @param key
* @return true if removal was successful or node does not exist. false if the
* node existed and failed to remove it
*/
boolean removeProperty(PropertyKey key);
/**
* Return the metadata (HelixProperty.Stat) of the given property
* @param key
* @return
*/
HelixProperty.Stat getPropertyStat(PropertyKey key);
/**
* Return a list of property stats, each of which must refer to a single Helix property.
* @param keys
* @return
*/
List<HelixProperty.Stat> getPropertyStats(List<PropertyKey> keys);
/**
* Return the child names for a property. PropertyKey needs to refer to a
* collection like instances, resources. PropertyKey.isLeaf must be false
* @param key
* @return SubPropertyNames
*/
List<String> getChildNames(PropertyKey key);
/**
* Get the child values for a property. PropertyKey needs to refer to just one
* level above the non leaf. PropertyKey.isCollection must be true.
* @param key
* @return subPropertyValues
*/
@Deprecated
<T extends HelixProperty> List<T> getChildValues(PropertyKey key);
/**
* Get the child values for a property. PropertyKey needs to refer to just one
* level above the non leaf. PropertyKey.isCollection must be true.
*
* Value will be null if not does not exist. If the node is failed to read, will throw exception
* when throwException is set to true.
* @param key
* @param throwException
* @return subPropertyValues
*/
<T extends HelixProperty> List<T> getChildValues(PropertyKey key, boolean throwException);
/**
* Same as getChildValues except that it converts list into a map using the id
* of the HelixProperty
* @param key
* @return a map of property identifiers to typed properties
*/
@Deprecated
<T extends HelixProperty> Map<String, T> getChildValuesMap(PropertyKey key);
/**
* Same as getChildValues except that it converts list into a map using the id
* of the HelixProperty
*
* Value will be null if not does not exist. If the node is failed to read, will throw exception
* when throwException is set to true.
* @param key
* @param throwException
* @return a map of property identifiers to typed properties
*/
<T extends HelixProperty> Map<String, T> getChildValuesMap(PropertyKey key,
boolean throwException);
/**
* Adds multiple children to a parent.
* @param keys
* @param children
* @return array where true means the child was added and false means it was not
*/
<T extends HelixProperty> boolean[] createChildren(List<PropertyKey> keys, List<T> children);
/**
* Sets multiple children under one parent
* @param keys
* @param children
* @return array where true means the child was set and false means it was not
*/
<T extends HelixProperty> boolean[] setChildren(List<PropertyKey> keys, List<T> children);
/**
* Updates multiple children under one parent
* TODO: change to use property-keys instead of paths
* @param paths
* @param updaters
* @return array where true means the child was updated and false means it was not
*/
<T extends HelixProperty> boolean[] updateChildren(List<String> paths,
List<DataUpdater<ZNRecord>> updaters, int options);
/**
* Get key builder for the accessor
* @return instantiated PropertyKey.Builder
*/
PropertyKey.Builder keyBuilder();
/**
* Get underlying base data accessor
* @return a data accessor that can process ZNRecord objects
*/
BaseDataAccessor<ZNRecord> getBaseDataAccessor();
}
| 9,907 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/HelixConstants.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Identifying constants of the components in a Helix-managed cluster
*/
public interface HelixConstants {
// TODO: ChangeType and PropertyType are duplicated, consider unifying
enum ChangeType {
// @formatter:off
IDEAL_STATE (PropertyType.IDEALSTATES),
CONFIG (PropertyType.CONFIGS),
INSTANCE_CONFIG (PropertyType.CONFIGS),
RESOURCE_CONFIG (PropertyType.CONFIGS),
CUSTOMIZED_STATE_CONFIG (PropertyType.CONFIGS),
CLUSTER_CONFIG (PropertyType.CONFIGS),
LIVE_INSTANCE (PropertyType.LIVEINSTANCES),
CURRENT_STATE (PropertyType.CURRENTSTATES),
TASK_CURRENT_STATE (PropertyType.TASKCURRENTSTATES),
CUSTOMIZED_STATE_ROOT (PropertyType.CUSTOMIZEDSTATES),
CUSTOMIZED_STATE (PropertyType.CUSTOMIZEDSTATES),
MESSAGE (PropertyType.MESSAGES),
EXTERNAL_VIEW (PropertyType.EXTERNALVIEW),
CUSTOMIZED_VIEW (PropertyType.CUSTOMIZEDVIEW),
CUSTOMIZED_VIEW_ROOT (PropertyType.CUSTOMIZEDVIEW),
TARGET_EXTERNAL_VIEW (PropertyType.TARGETEXTERNALVIEW),
CONTROLLER (PropertyType.CONTROLLER),
MESSAGES_CONTROLLER (PropertyType.MESSAGES_CONTROLLER),
HEALTH (PropertyType.HEALTHREPORT);
// @formatter:on
private final PropertyType _propertyType;
ChangeType(PropertyType propertyType) {
_propertyType = propertyType;
}
public PropertyType getPropertyType() {
return _propertyType;
}
}
/**
* Use IdealState.PreferentListToken instead.
*/
@Deprecated
enum StateModelToken {
ANY_LIVEINSTANCE
}
/**
* Please use ClusterConfig instead
*/
@Deprecated
enum ClusterConfigType {
HELIX_DISABLE_PIPELINE_TRIGGERS,
PERSIST_BEST_POSSIBLE_ASSIGNMENT
}
String DEFAULT_STATE_MODEL_FACTORY = "DEFAULT";
}
| 9,908 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/InstanceConfigChangeListener.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Interface to implement to listen for changes to instance configurations.
*
* @deprecated
* NOTE: This interface definition is moved to {@link org.apache.helix.api.listeners.InstanceConfigChangeListener}
*/
@Deprecated
public interface InstanceConfigChangeListener extends
org.apache.helix.api.listeners.InstanceConfigChangeListener {
}
| 9,909 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/HelixAdmin.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.util.List;
import java.util.Map;
import org.apache.helix.api.status.ClusterManagementMode;
import org.apache.helix.api.status.ClusterManagementModeRequest;
import org.apache.helix.api.topology.ClusterTopology;
import org.apache.helix.constants.InstanceConstants;
import org.apache.helix.model.CloudConfig;
import org.apache.helix.model.ClusterConstraints;
import org.apache.helix.model.ClusterConstraints.ConstraintType;
import org.apache.helix.model.ConstraintItem;
import org.apache.helix.model.CustomizedStateConfig;
import org.apache.helix.model.CustomizedView;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.HelixConfigScope;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.MaintenanceSignal;
import org.apache.helix.model.ResourceConfig;
import org.apache.helix.model.StateModelDefinition;
/*
* Helix cluster management
*/
public interface HelixAdmin {
/**
* Get a list of clusters under "/"
* @return a list of cluster names
*/
List<String> getClusters();
/**
* Get a list of instances under a cluster
* @param clusterName
* @return a list of instance names
*/
List<String> getInstancesInCluster(String clusterName);
/**
* Get an instance config
* @param clusterName
* @param instanceName
* @return InstanceConfig corresponding to the specified instance
*/
InstanceConfig getInstanceConfig(String clusterName, String instanceName);
/**
* Set the instance config of an existing instance under the given cluster.
* @param clusterName the name of the cluster to which this instance belongs.
* @param instanceName the name of this instance.
* @param instanceConfig the new {@link InstanceConfig} that will replace the current one
* associated with this instance.
* @return true if the operation was successful; false otherwise.
*/
boolean setInstanceConfig(String clusterName, String instanceName, InstanceConfig instanceConfig);
/**
* Get a list of resources in a cluster
* @param clusterName
* @return a list of resource names in the cluster
*/
List<String> getResourcesInCluster(String clusterName);
/**
* Get a list of resources in a cluster with a tag
* @param clusterName
* @param tag
*/
List<String> getResourcesInClusterWithTag(String clusterName, String tag);
/**
* Add a cluster
* @param clusterName
* @return true if successfully created, or if cluster already exists
*/
boolean addCluster(String clusterName);
/**
* Add a cluster
* @param clusterName
* @param recreateIfExists If the cluster already exists, it will delete it and recreate
* @return true if successfully created, or if cluster already exists
*/
boolean addCluster(String clusterName, boolean recreateIfExists);
/**
* Add a cluster and also add this cluster as a resource group in the super cluster
* @param clusterName
* @param grandCluster
*/
void addClusterToGrandCluster(String clusterName, String grandCluster);
/** Add a CustomizedStateConfig to a cluster
* @param clusterName
* @param customizedStateConfig
*/
void addCustomizedStateConfig(String clusterName,
CustomizedStateConfig customizedStateConfig);
/**
* Remove CustomizedStateConfig from specific cluster
* @param clusterName
*/
void removeCustomizedStateConfig(String clusterName);
/**
* Add a type to CustomizedStateConfig of specific cluster
* @param clusterName
*/
void addTypeToCustomizedStateConfig(String clusterName, String type);
/**
* Remove a type from CustomizedStateConfig of specific cluster
* @param clusterName
*/
void removeTypeFromCustomizedStateConfig(String clusterName, String type);
/**
* Add a resource to a cluster, using the default ideal state mode AUTO
* @param clusterName
* @param resourceName
* @param numPartitions
* @param stateModelRef
*/
void addResource(String clusterName, String resourceName, int numPartitions,
String stateModelRef);
/**
* @param clusterName
* @param resourceName
* @param idealstate
*/
void addResource(String clusterName, String resourceName, IdealState idealstate);
/**
* Add a resource to a cluster
* @param clusterName
* @param resourceName
* @param numPartitions
* @param stateModelRef
* @param rebalancerMode
*/
void addResource(String clusterName, String resourceName, int numPartitions, String stateModelRef,
String rebalancerMode);
/**
* Add a resource to a cluster
* @param clusterName
* @param resourceName
* @param numPartitions
* @param stateModelRef
* @param rebalancerMode
* @param rebalanceStrategy
*/
void addResource(String clusterName, String resourceName, int numPartitions, String stateModelRef,
String rebalancerMode, String rebalanceStrategy);
/**
* Add a resource to a cluster, using a bucket size > 1
* @param clusterName
* @param resourceName
* @param numPartitions
* @param stateModelRef
* @param rebalancerMode
* @param bucketSize
*/
void addResource(String clusterName, String resourceName, int numPartitions, String stateModelRef,
String rebalancerMode, int bucketSize);
/**
* Add a resource to a cluster, using a bucket size > 1
* @param clusterName
* @param resourceName
* @param numPartitions
* @param stateModelRef
* @param rebalancerMode
* @param bucketSize
* @param maxPartitionsPerInstance
*/
void addResource(String clusterName, String resourceName, int numPartitions, String stateModelRef,
String rebalancerMode, int bucketSize, int maxPartitionsPerInstance);
/**
* Add a resource to a cluster, using a bucket size > 1
* @param clusterName
* @param resourceName
* @param numPartitions
* @param stateModelRef
* @param rebalancerMode
* @param rebalanceStrategy
* @param bucketSize
* @param maxPartitionsPerInstance
*/
void addResource(String clusterName, String resourceName, int numPartitions, String stateModelRef,
String rebalancerMode, String rebalanceStrategy, int bucketSize,
int maxPartitionsPerInstance);
/**
* Add an instance to a cluster
* @param clusterName
* @param instanceConfig
*/
void addInstance(String clusterName, InstanceConfig instanceConfig);
/**
* Drop an instance from a cluster
* @param clusterName
* @param instanceConfig
*/
void dropInstance(String clusterName, InstanceConfig instanceConfig);
/**
* Purge offline instances that have been offline for longer than the offline duration time
* from a cluster
* @param clusterName
* @param offlineDuration if an offline instance has been offline for longer than the set
* offlineDuration, the offline instance becomes eligible for being
* purged/deleted
*/
void purgeOfflineInstances(String clusterName, long offlineDuration);
/**
* Get ideal state for a resource
* @param clusterName
* @param resourceName
* @return
*/
IdealState getResourceIdealState(String clusterName, String resourceName);
/**
* Set ideal state for a resource
* @param clusterName
* @param resourceName
* @param idealState
*/
void setResourceIdealState(String clusterName, String resourceName, IdealState idealState);
/**
* Selectively updates fields for an existing resource's IdealState ZNode.
* @param clusterName
* @param resourceName
* @param idealState
*/
void updateIdealState(String clusterName, String resourceName, IdealState idealState);
/**
* Selectively removes fields for an existing resource's IdealState ZNode.
* @param clusterName
* @param resourceName
* @param idealState
*/
void removeFromIdealState(String clusterName, String resourceName, IdealState idealState);
/**
* Disable or enable an instance
* @param clusterName
* @param instanceName
* @param enabled
*/
void enableInstance(String clusterName, String instanceName, boolean enabled);
/**
* @param clusterName
* @param instanceName
* @param enabled
* @param disabledType disabledType for disable operation. It is ignored when enabled is true.
* Existing disabledType will be over write if instance is in disabled state.
* @param reason set additional string description on why the instance is disabled when
* <code>enabled</code> is false. Existing disabled reason will be over write if instance is in disabled state.
*/
void enableInstance(String clusterName, String instanceName, boolean enabled,
InstanceConstants.InstanceDisabledType disabledType, String reason);
/**
* Batch enable/disable instances in a cluster
* By default, all the instances are enabled
* @param clusterName
* @param instances
* @param enabled
*/
void enableInstance(String clusterName, List<String> instances, boolean enabled);
void setInstanceOperation(String clusterName, String instance,
InstanceConstants.InstanceOperation instanceOperation);
/**
* Disable or enable a resource
* @param clusterName
* @param resourceName
*/
void enableResource(String clusterName, String resourceName, boolean enabled);
/**
* Disable or enable a list of partitions on an instance
* @param enabled
* @param clusterName
* @param instanceName
* @param resourceName
* @param partitionNames
*/
void enablePartition(boolean enabled, String clusterName, String instanceName,
String resourceName, List<String> partitionNames);
/**
* Disable or enable a cluster
* @param clusterName
* @param enabled
*/
void enableCluster(String clusterName, boolean enabled);
/**
* @param clusterName
* @param enabled
* @param reason set additional string description on why the cluster is disabled when
* <code>enabled</code> is false.
*/
void enableCluster(String clusterName, boolean enabled, String reason);
/**
* **Deprecated: use autoEnableMaintenanceMode or manuallyEnableMaintenanceMode instead**
* Enable or disable maintenance mode for a cluster
* @param clusterName
* @param enabled
*/
@Deprecated
void enableMaintenanceMode(String clusterName, boolean enabled);
/**
* **Deprecated: use autoEnableMaintenanceMode or manuallyEnableMaintenanceMode instead**
* Enable or disable maintenance mode for a cluster
* @param clusterName
* @param enabled
* @param reason
*/
@Deprecated
void enableMaintenanceMode(String clusterName, boolean enabled, String reason);
/**
* Automatically enable maintenance mode. To be called by the Controller pipeline.
* @param clusterName
* @param enabled
* @param reason
* @param internalReason
*/
void autoEnableMaintenanceMode(String clusterName, boolean enabled, String reason,
MaintenanceSignal.AutoTriggerReason internalReason);
/**
* Manually enable maintenance mode. To be called by the REST client that accepts KV mappings as
* the payload.
* @param clusterName
* @param enabled
* @param reason
* @param customFields user-specified KV mappings to be stored in the ZNode
*/
void manuallyEnableMaintenanceMode(String clusterName, boolean enabled, String reason,
Map<String, String> customFields);
/**
* Check specific cluster is in maintenance mode or not
* @param clusterName the cluster name
* @return true if in maintenance mode, false otherwise
*/
boolean isInMaintenanceMode(String clusterName);
/**
* Requests to put a cluster into a management mode
* {@link ClusterManagementMode.Type}. When this method returns,
* it means the signal has been successfully sent, but it does not mean the cluster has
* fully entered the mode. Because the cluster can take some time to complete the request.
* <p>
* To check the cluster management mode status, call {@link #getClusterManagementMode(String)}.
*
* @param request request to set the cluster management mode. {@link ClusterManagementModeRequest}
*/
void setClusterManagementMode(ClusterManagementModeRequest request);
/**
* Gets cluster management status {@link ClusterManagementMode}: what mode the cluster is and
* whether the cluster has fully reached to that mode.
*
* @param clusterName cluster name
* @return {@link ClusterManagementMode}
*/
ClusterManagementMode getClusterManagementMode(String clusterName);
/**
* Reset a list of partitions in error state for an instance
* The partitions are assume to be in error state and reset will bring them from error
* to initial state. An error to initial state transition is required for reset.
* @param clusterName
* @param instanceName
* @param resourceName
* @param partitionNames
*/
void resetPartition(String clusterName, String instanceName, String resourceName,
List<String> partitionNames);
/**
* Reset all the partitions in error state for a list of instances
* @param clusterName
* @param instanceNames
*/
void resetInstance(String clusterName, List<String> instanceNames);
/**
* Reset all partitions in error state for a list of resources
* @param clusterName
* @param resourceNames
*/
void resetResource(String clusterName, List<String> resourceNames);
/**
* Add a state model definition
* @param clusterName
* @param stateModelDef
* @param record
* @return true if successfully created, or if state model definition already exists
*/
void addStateModelDef(String clusterName, String stateModelDef, StateModelDefinition record);
/**
* Add a state model definition
* @param clusterName
* @param stateModelDef
* @param record
* @param recreateIfExists If the state definition already exists, it will delete it and recreate
* @return true if successfully created, or if state model definition already exists
*/
void addStateModelDef(String clusterName, String stateModelDef, StateModelDefinition record,
boolean recreateIfExists);
/**
* Drop a resource from a cluster
* @param clusterName
* @param resourceName
*/
void dropResource(String clusterName, String resourceName);
/**
* Add cloud config to the cluster.
* @param clusterName
* @param cloudConfig
*/
void addCloudConfig(String clusterName, CloudConfig cloudConfig);
/**
* Remove the Cloud Config for specific cluster
* @param clusterName
*/
void removeCloudConfig(String clusterName);
/**
* Get the topology of a specific cluster
* @param clusterName
*/
ClusterTopology getClusterTopology(String clusterName);
/**
* Get a list of state model definitions in a cluster
* @param clusterName
* @return
*/
List<String> getStateModelDefs(String clusterName);
/**
* Get a state model definition in a cluster
* @param clusterName
* @param stateModelName
* @return StateModelDefinition identified by stateModelName
*/
StateModelDefinition getStateModelDef(String clusterName, String stateModelName);
/**
* Get external view for a resource
* @param clusterName
* @param resourceName
* @return ExternalView for the resource
*/
ExternalView getResourceExternalView(String clusterName, String resourceName);
/**
* Get customized view for a resource
* @param clusterName
* @param resourceName
* @return customized for the resource
*/
CustomizedView getResourceCustomizedView(String clusterName, String resourceName,
String customizedStateType);
/**
* Drop a cluster
* @param clusterName
*/
void dropCluster(String clusterName);
/**
* Set configuration values
* @param scope
* @param properties
*/
void setConfig(HelixConfigScope scope, Map<String, String> properties);
/**
* Remove configuration values
* @param scope
* @param keys
*/
void removeConfig(HelixConfigScope scope, List<String> keys);
/**
* Get configuration values
* @param scope
* @param keys
* @return configuration values ordered by the provided keys
*/
Map<String, String> getConfig(HelixConfigScope scope, List<String> keys);
/**
* Get configuration keys
* @param scope
* @return keys mapping to valid configuration values
*/
List<String> getConfigKeys(HelixConfigScope scope);
/**
* Rebalance a resource in cluster
* @param clusterName
* @param resourceName
* @param replica
*/
void rebalance(String clusterName, String resourceName, int replica);
/**
* Rebalance a cluster without respecting the delay
* @param clusterName
*/
void onDemandRebalance(String clusterName);
/**
* Add ideal state using a json format file
* @param clusterName
* @param resourceName
* @param idealStateFile
* @throws IOException
*/
void addIdealState(String clusterName, String resourceName, String idealStateFile)
throws IOException;
/**
* Add state model definition using a json format file
* @param clusterName
* @param stateModelDefName
* @param stateModelDefFile
* @throws IOException error reading the state model definition file
*/
void addStateModelDef(String clusterName, String stateModelDefName, String stateModelDefFile)
throws IOException;
/**
* Add a constraint item; create if not exist
* @param clusterName
* @param constraintType
* @param constraintId
* @param constraintItem
*/
void setConstraint(String clusterName, ConstraintType constraintType, String constraintId,
ConstraintItem constraintItem);
/**
* Remove a constraint item
* @param clusterName
* @param constraintType
* @param constraintId
*/
void removeConstraint(String clusterName, ConstraintType constraintType, String constraintId);
/**
* Get all constraints for a type
* @param clusterName
* @param constraintType
* @return constraints of constraintType
*/
ClusterConstraints getConstraints(String clusterName, ConstraintType constraintType);
/**
* @param clusterName
* @param currentIdealState
* @param instanceNames
*/
void rebalance(String clusterName, IdealState currentIdealState, List<String> instanceNames);
/**
* @param clusterName
* @param resourceName
* @param replica
* @param instances
*/
void rebalance(String clusterName, String resourceName, int replica, List<String> instances);
/**
* @param clusterName
* @param resourceName
* @param replica
* @param keyPrefix
* @param group the group identifier of instances to rebalance
*/
void rebalance(String clusterName, String resourceName, int replica, String keyPrefix,
String group);
/**
* @param clusterName
* @param tag
*/
List<String> getInstancesInClusterWithTag(String clusterName, String tag);
/**
* @param clusterName
* @param instanceName
* @param tag
*/
void addInstanceTag(String clusterName, String instanceName, String tag);
/**
* @param clusterName
* @param instanceName
* @param tag
*/
void removeInstanceTag(String clusterName, String instanceName, String tag);
void setInstanceZoneId(String clusterName, String instanceName, String zoneId);
/**
* Enable/disable batch message mode for specified cluster.
* By default batch message mode is disabled.
* @param clusterName
* @param enabled
*/
void enableBatchMessageMode(String clusterName, boolean enabled);
/**
* Enable/disable batch message mode for specified resource in a cluster
* By default batch message mode is disabled.
* @param clusterName
* @param resourceName
* @param enabled
*/
void enableBatchMessageMode(String clusterName, String resourceName, boolean enabled);
/**
* Get batch disabled instance map (disabled instance -> disabled time) in a cluster. It will
* include disabled instances and instances in disabled zones
* @param clusterName
* @return
*/
Map<String, String> getBatchDisabledInstances(String clusterName);
/**
* Get list of instances by domain for a cluster
* Example : domain could be "helixZoneId=1,rackId=3". All the instances domain contains these
* two domains will be selected.
* @param clusterName
* @return
*/
List<String> getInstancesByDomain(String clusterName, String domain);
/**
* Release resources used in HelixAdmin.
*/
default void close() {
System.out.println("Default close() was invoked! No operation was executed.");
}
/**
* Adds a resource with IdealState and ResourceConfig to be rebalanced by WAGED rebalancer with validation.
* Validation includes the following:
* 1. Check ResourceConfig has the WEIGHT field
* 2. Check that all capacity keys from ClusterConfig are set up in the WEIGHT field
* 3. Check that all ResourceConfig's weightMap fields have all of the capacity keys
* @param clusterName
* @param idealState
* @param resourceConfig
* @return true if the resource has been added successfully. False otherwise
*/
boolean addResourceWithWeight(String clusterName, IdealState idealState,
ResourceConfig resourceConfig);
/**
* Batch-enables Waged rebalance for the names of resources given.
* @param clusterName
* @param resourceNames
* @return
*/
boolean enableWagedRebalance(String clusterName, List<String> resourceNames);
/**
* Validates the resources to see if their weight configs have been set properly.
* Validation includes the following:
* 1. Check ResourceConfig has the WEIGHT field
* 2. Check that all capacity keys from ClusterConfig are set up in the WEIGHT field
* 3. Check that all ResourceConfig's weightMap fields have all of the capacity keys
* @param resourceNames
* @return for each resource, true if the weight configs have been set properly, false otherwise
*/
Map<String, Boolean> validateResourcesForWagedRebalance(String clusterName,
List<String> resourceNames);
/**
* Validates the instances to ensure their weights in InstanceConfigs have been set up properly.
* Validation includes the following:
* 1. If default instance capacity is not set, check that the InstanceConfigs have the CAPACITY field
* 2. Check that all capacity keys defined in ClusterConfig are present in the CAPACITY field
* @param clusterName
* @param instancesNames
* @return
*/
Map<String, Boolean> validateInstancesForWagedRebalance(String clusterName,
List<String> instancesNames);
/**
* Return if instance operation 'Evacuate' is finished.
* @param clusterName
* @param instancesNames
* @return Return true if there is no current state nor pending message on the instance.
*/
boolean isEvacuateFinished(String clusterName, String instancesNames);
/**
* Return if instance is ready for preparing joining cluster. The instance should have no current state,
* no pending message and tagged with operation that exclude the instance from Helix assignment.
* @param clusterName
* @param instancesNames
* @return true if the instance is ready for preparing joining cluster.
*/
boolean isReadyForPreparingJoiningCluster(String clusterName, String instancesNames);
}
| 9,910 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/HelixManagerProperty.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Properties;
import org.apache.helix.model.CloudConfig;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.zookeeper.api.client.RealmAwareZkClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* HelixManagerProperty is a general property/config object used for HelixManager creation.
*/
public class HelixManagerProperty {
private static final Logger LOG = LoggerFactory.getLogger(HelixManagerProperty.class.getName());
private String _version;
private long _healthReportLatency;
private HelixCloudProperty _helixCloudProperty;
private InstanceConfig.Builder _defaultInstanceConfigBuilder;
private RealmAwareZkClient.RealmAwareZkConnectionConfig _zkConnectionConfig;
private RealmAwareZkClient.RealmAwareZkClientConfig _zkClientConfig;
/**
* ** Deprecated - HelixManagerProperty should be a general property/config object used for
* HelixManager creation, not tied only to Properties or CloudConfig **
*
* Initialize Helix manager property with default value
* @param helixManagerProperties helix manager related properties input as a map
* @param cloudConfig cloudConfig read from Zookeeper
*/
@Deprecated
public HelixManagerProperty(Properties helixManagerProperties, CloudConfig cloudConfig) {
_helixCloudProperty = new HelixCloudProperty(cloudConfig);
_version = helixManagerProperties.getProperty(SystemPropertyKeys.HELIX_MANAGER_VERSION);
_healthReportLatency = Long.parseLong(
helixManagerProperties.getProperty(SystemPropertyKeys.PARTICIPANT_HEALTH_REPORT_LATENCY));
}
private HelixManagerProperty(String version, long healthReportLatency,
HelixCloudProperty helixCloudProperty, InstanceConfig.Builder defaultInstanceConfig,
RealmAwareZkClient.RealmAwareZkConnectionConfig zkConnectionConfig,
RealmAwareZkClient.RealmAwareZkClientConfig zkClientConfig) {
_version = version;
_healthReportLatency = healthReportLatency;
_helixCloudProperty = helixCloudProperty;
_defaultInstanceConfigBuilder = defaultInstanceConfig;
_zkConnectionConfig = zkConnectionConfig;
_zkClientConfig = zkClientConfig;
}
public HelixCloudProperty getHelixCloudProperty() {
if (_helixCloudProperty == null) {
_helixCloudProperty = new HelixCloudProperty(new CloudConfig());
}
return _helixCloudProperty;
}
public InstanceConfig.Builder getDefaultInstanceConfigBuilder() {
if (_defaultInstanceConfigBuilder == null) {
_defaultInstanceConfigBuilder = new InstanceConfig.Builder();
}
return _defaultInstanceConfigBuilder;
}
public String getVersion() {
return _version;
}
public long getHealthReportLatency() {
return _healthReportLatency;
}
public RealmAwareZkClient.RealmAwareZkConnectionConfig getZkConnectionConfig() {
return _zkConnectionConfig;
}
public RealmAwareZkClient.RealmAwareZkClientConfig getZkClientConfig() {
return _zkClientConfig;
}
public static class Builder {
private String _version;
private long _healthReportLatency;
private HelixCloudProperty _helixCloudProperty;
private InstanceConfig.Builder _defaultInstanceConfigBuilder;
private RealmAwareZkClient.RealmAwareZkConnectionConfig _zkConnectionConfig;
private RealmAwareZkClient.RealmAwareZkClientConfig _zkClientConfig;
public Builder() {
}
public HelixManagerProperty build() {
return new HelixManagerProperty(_version, _healthReportLatency, _helixCloudProperty,
_defaultInstanceConfigBuilder, _zkConnectionConfig, _zkClientConfig);
}
public Builder setVersion(String version) {
_version = version;
return this;
}
public Builder setHealthReportLatency(long healthReportLatency) {
_healthReportLatency = healthReportLatency;
return this;
}
public Builder setHelixCloudProperty(HelixCloudProperty helixCloudProperty) {
_helixCloudProperty = helixCloudProperty;
return this;
}
public Builder setDefaultInstanceConfigBuilder(
InstanceConfig.Builder defaultInstanceConfigBuilder) {
_defaultInstanceConfigBuilder = defaultInstanceConfigBuilder;
return this;
}
public Builder setRealmAWareZkConnectionConfig(
RealmAwareZkClient.RealmAwareZkConnectionConfig zkConnectionConfig) {
_zkConnectionConfig = zkConnectionConfig;
return this;
}
public Builder setRealmAwareZkClientConfig(
RealmAwareZkClient.RealmAwareZkClientConfig zkClientConfig) {
_zkClientConfig = zkClientConfig;
return this;
}
}
}
| 9,911 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/HelixCloudProperty.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.io.InputStream;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import org.apache.helix.cloud.constants.CloudProvider;
import org.apache.helix.cloud.event.CloudEventHandler;
import org.apache.helix.cloud.event.helix.CloudEventCallbackProperty;
import org.apache.helix.model.CloudConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Hold helix cloud properties read from CloudConfig and user defined files. Clients may override
* the fields from their application.
*/
public class HelixCloudProperty {
private static final Logger LOG = LoggerFactory.getLogger(HelixCloudProperty.class.getName());
private static final String AZURE_CLOUD_PROPERTY_FILE = SystemPropertyKeys.AZURE_CLOUD_PROPERTIES;
private static final String DEFAULT_CLOUD_PROCESSOR_PACKAGE_PREFIX = "org.apache.helix.cloud.";
private static final String CLOUD_INFO_SOURCE = "cloud_info_source";
private static final String CLOUD_INFO_PROCESSOR_NAME = "cloud_info_processor_name";
private static final String CLOUD_MAX_RETRY = "cloud_max_retry";
private static final String CONNECTION_TIMEOUT_MS = "connection_timeout_ms";
private static final String REQUEST_TIMEOUT_MS = "request_timeout_ms";
// Denote whether the instance is considered as in a cloud environment.
private boolean _isCloudEnabled;
// Unique id of the cloud environment where the instance is in.
private String _cloudId;
// Cloud environment provider, e.g. Azure, AWS, GCP, etc.
private String _cloudProvider;
// The sources where the cloud instance information can be retrieved from.
private List<String> _cloudInfoSources;
// The name of the function that will fetch and parse cloud instance information.
private String _cloudInfoProcessorName;
// The package for the class which contains implementation to fetch
// and parse cloud instance information.
private String _cloudInfoProcessorPackage;
// Http max retry times when querying the cloud instance information from cloud environment.
private int _cloudMaxRetry;
// Http connection time when querying the cloud instance information from cloud environment.
private long _cloudConnectionTimeout;
// Http request timeout when querying the cloud instance information from cloud environment.
private long _cloudRequestTimeout;
// Other customized properties that may be used.
private final Properties _customizedCloudProperties = new Properties();
private boolean _isCloudEventCallbackEnabled;
private CloudEventCallbackProperty _cloudEventCallbackProperty;
/**
* Initialize Helix Cloud Property based on the provider
* @param
*/
public HelixCloudProperty(CloudConfig cloudConfig) {
populateFieldsWithCloudConfig(cloudConfig);
}
public void populateFieldsWithCloudConfig(CloudConfig cloudConfig) {
if (cloudConfig == null) {
cloudConfig = new CloudConfig();
}
setCloudEnabled(cloudConfig.isCloudEnabled());
setCloudId(cloudConfig.getCloudID());
String cloudProviderStr = cloudConfig.getCloudProvider();
setCloudProvider(cloudProviderStr);
if (cloudProviderStr != null) {
String cloudInfoProcessorName = null;
switch (CloudProvider.valueOf(cloudProviderStr)) {
case AZURE:
Properties azureProperties = new Properties();
try {
InputStream stream = Thread.currentThread().getContextClassLoader()
.getResourceAsStream(AZURE_CLOUD_PROPERTY_FILE);
azureProperties.load(stream);
} catch (IOException e) {
String errMsg =
"failed to open Helix Azure cloud properties file: " + AZURE_CLOUD_PROPERTY_FILE;
throw new IllegalArgumentException(errMsg, e);
}
LOG.info("Successfully loaded Helix Azure cloud properties: {}", azureProperties);
setCloudInfoSources(
Collections.singletonList(azureProperties.getProperty(CLOUD_INFO_SOURCE)));
setCloudInfoProcessorPackage(
DEFAULT_CLOUD_PROCESSOR_PACKAGE_PREFIX + cloudProviderStr.toLowerCase());
setCloudInfoProcessorName(azureProperties.getProperty(CLOUD_INFO_PROCESSOR_NAME));
setCloudMaxRetry(Integer.valueOf(azureProperties.getProperty(CLOUD_MAX_RETRY)));
setCloudConnectionTimeout(
Long.valueOf(azureProperties.getProperty(CONNECTION_TIMEOUT_MS)));
setCloudRequestTimeout(Long.valueOf(azureProperties.getProperty(REQUEST_TIMEOUT_MS)));
break;
case CUSTOMIZED:
setCloudInfoSources(cloudConfig.getCloudInfoSources());
// Although it is unlikely that cloudInfoProcessorPackage is null, when using the CUSTOMIZED
// cloud provider, we will set the processor package to helix cloud package to preserves the
// backwards compatibility.
setCloudInfoProcessorPackage(cloudConfig.getCloudInfoProcessorPackage() != null
? cloudConfig.getCloudInfoProcessorPackage()
: DEFAULT_CLOUD_PROCESSOR_PACKAGE_PREFIX + cloudProviderStr.toLowerCase());
setCloudInfoProcessorName(cloudConfig.getCloudInfoProcessorName());
break;
default:
throw new HelixException(
String.format("Unsupported cloud provider: %s", cloudConfig.getCloudProvider()));
}
}
}
public boolean getCloudEnabled() {
return _isCloudEnabled;
}
public String getCloudId() {
return _cloudId;
}
public String getCloudProvider() {
return _cloudProvider;
}
public List<String> getCloudInfoSources() {
return _cloudInfoSources;
}
/**
* Get the package containing the CloudInfoProcessor class.
* @return A package.
*/
public String getCloudInfoProcessorPackage() {
return _cloudInfoProcessorPackage;
}
public String getCloudInfoProcessorName() {
return _cloudInfoProcessorName;
}
/**
* Get the fully qualified class name for the class which contains implementation to fetch
* and parse cloud instance information.
* @return A fully qualified class name.
*/
public String getCloudInfoProcessorFullyQualifiedClassName() {
return _cloudInfoProcessorPackage + "." + _cloudInfoProcessorName;
}
public int getCloudMaxRetry() {
return _cloudMaxRetry;
}
public long getCloudConnectionTimeout() {
return _cloudConnectionTimeout;
}
public long getCloudRequestTimeout() {
return _cloudRequestTimeout;
}
public Properties getCustomizedCloudProperties() {
return _customizedCloudProperties;
}
public String getCloudEventHandlerClassName() {
String defaultHandler = CloudEventHandler.class.getName();
return getCloudEventCallbackProperty() == null ? defaultHandler
: getCloudEventCallbackProperty().getUserArgs().getOrDefault(
CloudEventCallbackProperty.UserArgsInputKey.CLOUD_EVENT_HANDLER_CLASS_NAME,
defaultHandler);
}
public void setCloudEnabled(boolean isCloudEnabled) {
_isCloudEnabled = isCloudEnabled;
}
public void setCloudId(String cloudId) {
_cloudId = cloudId;
}
public void setCloudProvider(String cloudProvider) {
_cloudProvider = cloudProvider;
}
public void setCloudInfoSources(List<String> sources) {
_cloudInfoSources = sources;
}
/**
* Set the package containing the class name of the cloud info processor.
* @param cloudInfoProcessorPackage
*/
private void setCloudInfoProcessorPackage(String cloudInfoProcessorPackage) {
_cloudInfoProcessorPackage = cloudInfoProcessorPackage;
}
public void setCloudInfoProcessorName(String cloudInfoProcessorName) {
_cloudInfoProcessorName = cloudInfoProcessorName;
}
public void setCloudMaxRetry(int cloudMaxRetry) {
_cloudMaxRetry = cloudMaxRetry;
}
public void setCloudConnectionTimeout(long cloudConnectionTimeout) {
_cloudConnectionTimeout = cloudConnectionTimeout;
}
public void setCloudRequestTimeout(long cloudRequestTimeout) {
_cloudRequestTimeout = cloudRequestTimeout;
}
public void setCustomizedCloudProperties(Properties customizedCloudProperties) {
_customizedCloudProperties.putAll(customizedCloudProperties);
}
public boolean isCloudEventCallbackEnabled() {
return _isCloudEventCallbackEnabled;
}
public void setCloudEventCallbackEnabled(boolean enabled) {
_isCloudEventCallbackEnabled = enabled;
}
public CloudEventCallbackProperty getCloudEventCallbackProperty() {
return _cloudEventCallbackProperty;
}
public void setCloudEventCallbackProperty(CloudEventCallbackProperty cloudEventCallbackProperty) {
_cloudEventCallbackProperty = cloudEventCallbackProperty;
}
}
| 9,912 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/ExternalCommand.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.BufferedInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeoutException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Wrapper for running commands outside of the JVM
* @see {@link Process}
*/
public class ExternalCommand {
public static final String MODULE = ExternalCommand.class.getName();
public static final Logger LOG = LoggerFactory.getLogger(MODULE);
private final ProcessBuilder _processBuilder;
private Process _process;
private InputReader _out;
private InputReader _err;
/**
* Stream redirector
*/
private static class InputReader extends Thread {
private static final int BUFFER_SIZE = 2048;
private final InputStream _in;
private final ByteArrayOutputStream _out;
private boolean _running = false;
InputReader(InputStream in) {
_in = in;
_out = new ByteArrayOutputStream();
}
@Override
public void run() {
_running = true;
byte[] buf = new byte[BUFFER_SIZE];
int n = 0;
try {
while ((n = _in.read(buf)) != -1)
_out.write(buf, 0, n);
} catch (IOException e) {
LOG.error("error while reading external command", e);
}
_running = false;
}
public byte[] getOutput() {
if (_running)
throw new IllegalStateException("wait for process to be completed");
return _out.toByteArray();
}
}
/**
* Initialize with a {@link ProcessBuilder}
* @param processBuilder initialized {@link ProcessBuilder} object
*/
public ExternalCommand(ProcessBuilder processBuilder) {
_processBuilder = processBuilder;
}
/**
* After creating the command, you have to start it...
* @throws IOException
*/
public void start() throws IOException {
_process = _processBuilder.start();
_out = new InputReader(new BufferedInputStream(_process.getInputStream()));
_err = new InputReader(new BufferedInputStream(_process.getErrorStream()));
_out.start();
_err.start();
}
/**
* @see {@link ProcessBuilder#environment()}
*/
public Map<String, String> getEnvironment() {
return _processBuilder.environment();
}
/**
* @see {@link ProcessBuilder#directory()}
*/
public File getWorkingDirectory() {
return _processBuilder.directory();
}
/**
* @see {@link ProcessBuilder#directory(File)}
*/
public void setWorkingDirectory(File directory) {
_processBuilder.directory(directory);
}
/**
* @see {@link ProcessBuilder#redirectErrorStream()}
*/
public boolean getRedirectErrorStream() {
return _processBuilder.redirectErrorStream();
}
/**
* @see {@link ProcessBuilder#redirectErrorStream(boolean)}
*/
public void setRedirectErrorStream(boolean redirectErrorStream) {
_processBuilder.redirectErrorStream(redirectErrorStream);
}
/**
* Get the contents of the output stream after completion
* @return bytes from the output stream
* @throws InterruptedException the process was interrupted before completion
*/
public byte[] getOutput() throws InterruptedException {
waitFor();
return _out.getOutput();
}
/**
* Get the contents of the error stream after completion
* @return bytes from the error stream
* @throws InterruptedException the process was interrupted before completion
*/
public byte[] getError() throws InterruptedException {
waitFor();
return _err.getOutput();
}
/**
* Returns the output as a string.
* @param encoding string encoding scheme, e.g. "UTF-8"
* @return encoded string
* @throws InterruptedException the process was interrupted before completion
* @throws UnsupportedEncodingException the encoding scheme is invalid
*/
public String getStringOutput(String encoding) throws InterruptedException,
UnsupportedEncodingException {
return new String(getOutput(), encoding);
}
/**
* Returns the output as a string. Uses encoding "UTF-8".
* @return utf8 encoded string
* @throws InterruptedException the process was interrupted before completion
*/
public String getStringOutput() throws InterruptedException {
try {
return getStringOutput("UTF-8");
} catch (UnsupportedEncodingException e) {
// should not happen
throw new RuntimeException(e);
}
}
/**
* Returns the error as a string.
* @param encoding the encoding scheme, e.g. "UTF-8"
* @return error as string
* @throws InterruptedException the process was interrupted before completion
* @throws UnsupportedEncodingException the encoding scheme is invalid
*/
public String getStringError(String encoding) throws InterruptedException,
UnsupportedEncodingException {
return new String(getError(), encoding);
}
/**
* Returns the error as a string. Uses encoding "UTF-8".
* @return error as string
* @throws InterruptedException the process was interrupted before completion
*/
public String getStringError() throws InterruptedException {
try {
return getStringError("UTF-8");
} catch (UnsupportedEncodingException e) {
// should not happen
throw new RuntimeException(e);
}
}
/**
* Properly waits until everything is complete: joins on the thread that
* reads the output, joins on the thread that reads the error and finally
* wait for the process to be finished.
* @return the status code of the process.
* @throws InterruptedException the process was interrupted before completion
*/
public int waitFor() throws InterruptedException {
if (_process == null)
throw new IllegalStateException("you must call start first");
_out.join();
_err.join();
return _process.waitFor();
}
/**
* Properly waits until everything is complete: joins on the thread that
* reads the output, joins on the thread that reads the error and finally
* wait for the process to be finished.
* If the process has not completed before the timeout, throws a {@link TimeoutException}
* @return the status code of the process.
* @throws TimeoutException the process timed out
* @throws InterruptedException the process was interrupted before completion
*/
public int waitFor(long timeout) throws InterruptedException, TimeoutException {
if (_process == null)
throw new IllegalStateException("you must call start first");
// Chronos c = new Chronos();
_out.join(timeout);
// timeout -= c.tick();
if (timeout <= 0)
throw new TimeoutException("Wait timed out");
_err.join(timeout);
// timeout -= c.tick();
if (timeout <= 0)
throw new TimeoutException("Wait timed out");
// there is no timeout in this API, not much we can do here
// waiting on the other two threads should give us some safety
return _process.waitFor();
}
/**
* @see {@link Process#exitValue()}
* @return the return code of the process
*/
public int exitValue() {
if (_process == null)
throw new IllegalStateException("you must call start first");
return _process.exitValue();
}
/**
* see {@link Process#destroy()}
*/
public void destroy() {
if (_process == null)
throw new IllegalStateException("you must call start first");
_process.destroy();
}
/**
* Creates an external process from the command. It is not started and you have to call
* start on it!
* @param commands the command to execute
* @return the process
*/
public static ExternalCommand create(String... commands) {
ExternalCommand ec = new ExternalCommand(new ProcessBuilder(commands));
return ec;
}
/**
* Creates an external process from the command. It is not started and you have to call
* start on it!
* @param commands the command to execute
* @return the process
*/
public static ExternalCommand create(List<String> commands) {
ExternalCommand ec = new ExternalCommand(new ProcessBuilder(commands));
return ec;
}
/**
* Creates an external process from the command. The command is executed.
* @param commands the commands to execute
* @return the process
* @throws IOException if there is an error
*/
public static ExternalCommand start(String... commands) throws IOException {
ExternalCommand ec = new ExternalCommand(new ProcessBuilder(commands));
ec.start();
return ec;
}
/**
* Executes the external command in the given working directory and waits for it to be
* finished.
* @param workingDirectory the root directory from where to run the command
* @param command the command to execute (should be relative to the working directory
* @param args the arguments to the command
* @return the process
*/
public static ExternalCommand execute(File workingDirectory, String command, String... args)
throws IOException, InterruptedException {
try {
return executeWithTimeout(workingDirectory, command, 0, args);
} catch (TimeoutException e) {
// Can't happen!
throw new IllegalStateException(MODULE + ".execute: Unexpected timeout occurred!");
}
}
/**
* Executes the external command in the given working directory and waits (until timeout
* is elapsed) for it to be finished.
* @param workingDirectory
* the root directory from where to run the command
* @param command
* the command to execute (should be relative to the working directory
* @param timeout
* the maximum amount of time to wait for this external command (in ms). If
* this value is less than or equal to 0, timeout is ignored
* @param args
* the arguments to the command
* @return the process
*/
public static ExternalCommand executeWithTimeout(File workingDirectory, String command,
long timeout, String... args) throws IOException, InterruptedException, TimeoutException {
List<String> arguments = new ArrayList<String>(args.length + 1);
arguments.add(new File(workingDirectory, command).getAbsolutePath());
arguments.addAll(Arrays.asList(args));
ExternalCommand cmd = ExternalCommand.create(arguments);
cmd.setWorkingDirectory(workingDirectory);
cmd.setRedirectErrorStream(true);
cmd.start();
/* Use timeout if it is a valid value! */
if (timeout <= 0)
cmd.waitFor();
else
cmd.waitFor(timeout);
if (LOG.isDebugEnabled())
LOG.debug(cmd.getStringOutput());
return cmd;
}
}
| 9,913 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/PreConnectCallback.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Called to allow definition of tasks prior to connecting to Zookeeper
*/
public interface PreConnectCallback {
/**
* Callback function that is called by HelixManager before connected to zookeeper. If
* exception are thrown HelixManager will not connect and no live instance is created
* @see ZkHelixManager#handleNewSessionAsParticipant()
*/
public void onPreConnect();
}
| 9,914 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/MessageListener.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Interface to implement when there is a change to messages.
*
* @deprecated
* NOTE: This interface definition is moved to {@link org.apache.helix.api.listeners.MessageListener}
*/
@Deprecated
public interface MessageListener extends org.apache.helix.api.listeners.MessageListener {
}
| 9,915 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/ConfigAccessor.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.apache.helix.manager.zk.GenericZkHelixApiBuilder;
import org.apache.helix.manager.zk.ZKUtil;
import org.apache.helix.model.CloudConfig;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.ConfigScope;
import org.apache.helix.model.CustomizedStateConfig;
import org.apache.helix.model.HelixConfigScope;
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.RESTConfig;
import org.apache.helix.model.ResourceConfig;
import org.apache.helix.model.builder.HelixConfigScopeBuilder;
import org.apache.helix.msdcommon.exception.InvalidRoutingDataException;
import org.apache.helix.util.HelixUtil;
import org.apache.helix.util.StringTemplate;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.api.client.RealmAwareZkClient;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.datamodel.serializer.ZNRecordSerializer;
import org.apache.helix.zookeeper.impl.client.FederatedZkClient;
import org.apache.helix.zookeeper.impl.factory.SharedZkClientFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Provides access to the persistent configuration of the cluster, the instances that live on it,
* and the logical resources assigned to it.
*/
public class ConfigAccessor {
private static Logger LOG = LoggerFactory.getLogger(ConfigAccessor.class);
private static final StringTemplate template = new StringTemplate();
static {
// @formatter:off
template.addEntry(ConfigScopeProperty.CLUSTER, 1, "/{clusterName}/CONFIGS/CLUSTER");
template.addEntry(ConfigScopeProperty.CLUSTER, 2,
"/{clusterName}/CONFIGS/CLUSTER/{clusterName}|SIMPLEKEYS");
template.addEntry(ConfigScopeProperty.PARTICIPANT, 1, "/{clusterName}/CONFIGS/PARTICIPANT");
template.addEntry(ConfigScopeProperty.PARTICIPANT, 2,
"/{clusterName}/CONFIGS/PARTICIPANT/{participantName}|SIMPLEKEYS");
template.addEntry(ConfigScopeProperty.RESOURCE, 1, "/{clusterName}/CONFIGS/RESOURCE");
template.addEntry(ConfigScopeProperty.RESOURCE, 2,
"/{clusterName}/CONFIGS/RESOURCE/{resourceName}|SIMPLEKEYS");
template.addEntry(ConfigScopeProperty.PARTITION, 2,
"/{clusterName}/CONFIGS/RESOURCE/{resourceName}|MAPKEYS");
template.addEntry(ConfigScopeProperty.PARTITION, 3,
"/{clusterName}/CONFIGS/RESOURCE/{resourceName}|MAPMAPKEYS|{partitionName}");
// @formatter:on
}
private final RealmAwareZkClient _zkClient;
// true if ConfigAccessor was instantiated with a HelixZkClient, false otherwise
// This is used for close() to determine how ConfigAccessor should close the underlying ZkClient
private final boolean _usesExternalZkClient;
private ConfigAccessor(RealmAwareZkClient zkClient, boolean usesExternalZkClient) {
_zkClient = zkClient;
_usesExternalZkClient = usesExternalZkClient;
}
/**
* Initialize an accessor with a Zookeeper client
* Note: it is recommended to use the other constructor instead to avoid having to create a
* RealmAwareZkClient.
* @param zkClient
*/
@Deprecated
public ConfigAccessor(RealmAwareZkClient zkClient) {
_zkClient = zkClient;
_usesExternalZkClient = true;
}
/**
* Initialize a ConfigAccessor with a ZooKeeper connect string. It will use a SharedZkClient with
* default settings. Note that ZNRecordSerializer will be used for the internal ZkClient since
* ConfigAccessor only deals with Helix's data models like ResourceConfig.
* @param zkAddress
*/
@Deprecated
public ConfigAccessor(String zkAddress) {
_usesExternalZkClient = false;
// If the multi ZK config is enabled, use FederatedZkClient on multi-realm mode
if (Boolean.getBoolean(SystemPropertyKeys.MULTI_ZK_ENABLED) || zkAddress == null) {
try {
_zkClient = new FederatedZkClient(
new RealmAwareZkClient.RealmAwareZkConnectionConfig.Builder().build(),
new RealmAwareZkClient.RealmAwareZkClientConfig()
.setZkSerializer(new ZNRecordSerializer()));
return;
} catch (InvalidRoutingDataException | IllegalStateException e) {
throw new HelixException("Failed to create ConfigAccessor!", e);
}
}
_zkClient = SharedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(zkAddress),
new HelixZkClient.ZkClientConfig().setZkSerializer(new ZNRecordSerializer()));
}
/**
* get config
* @deprecated replaced by {@link #get(HelixConfigScope, String)}
* @param scope
* @param key
* @return value or null if doesn't exist
*/
@Deprecated
public String get(ConfigScope scope, String key) {
Map<String, String> map = get(scope, Arrays.asList(key));
return map.get(key);
}
/**
* get configs
* @deprecated replaced by {@link #get(HelixConfigScope, List<String>)}
* @param scope
* @param keys
* @return
*/
@Deprecated
public Map<String, String> get(ConfigScope scope, List<String> keys) {
if (scope == null || scope.getScope() == null) {
LOG.error("Scope can't be null");
return null;
}
// String value = null;
Map<String, String> map = new HashMap<String, String>();
String clusterName = scope.getClusterName();
if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) {
throw new HelixException("cluster " + clusterName + " is not setup yet");
}
String scopeStr = scope.getScopeStr();
String[] splits = scopeStr.split("\\|");
ZNRecord record = _zkClient.readData(splits[0], true);
if (record != null) {
if (splits.length == 1) {
for (String key : keys) {
if (record.getSimpleFields().containsKey(key)) {
map.put(key, record.getSimpleField(key));
}
}
} else if (splits.length == 2) {
if (record.getMapField(splits[1]) != null) {
for (String key : keys) {
if (record.getMapField(splits[1]).containsKey(key)) {
map.put(key, record.getMapField(splits[1]).get(key));
}
}
}
}
}
return map;
}
/**
* get a single config entry
* @param scope specification of the entity set to query
* (e.g. cluster, resource, participant, etc.)
* @param key the identifier of the configuration entry
* @return the configuration entry
*/
public String get(HelixConfigScope scope, String key) {
Map<String, String> map = get(scope, Arrays.asList(key));
if (map != null) {
return map.get(key);
}
return null;
}
/**
* get many config entries
* @param scope scope specification of the entity set to query
* (e.g. cluster, resource, participant, etc.)
* @param keys the identifiers of the configuration entries
* @return the configuration entries, organized by key
*/
public Map<String, String> get(HelixConfigScope scope, List<String> keys) {
if (scope == null || scope.getType() == null || !scope.isFullKey()) {
LOG.error("fail to get configs. invalid config scope. scope: {}, keys: {}.", scope, keys);
return null;
}
ZNRecord record = getConfigZnRecord(scope);
if (record == null) {
LOG.warn("No config found at {}.", scope.getZkPath());
return null;
}
Map<String, String> map = new HashMap<String, String>();
String mapKey = scope.getMapKey();
if (mapKey == null) {
for (String key : keys) {
if (record.getSimpleFields().containsKey(key)) {
map.put(key, record.getSimpleField(key));
}
}
} else {
Map<String, String> configMap = record.getMapField(mapKey);
if (configMap == null) {
LOG.warn("No map-field found in {} using mapKey: {}.", record, mapKey);
return null;
}
for (String key : keys) {
if (record.getMapField(mapKey).containsKey(key)) {
map.put(key, record.getMapField(mapKey).get(key));
}
}
}
return map;
}
private ZNRecord getConfigZnRecord(HelixConfigScope scope) {
String clusterName = scope.getClusterName();
if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) {
throw new HelixException("fail to get configs. cluster " + clusterName + " is not setup yet");
}
return _zkClient.readData(scope.getZkPath(), true);
}
/**
* Set config, create if not exist
* @deprecated replaced by {@link #set(HelixConfigScope, String, String)}
* @param scope
* @param key
* @param value
*/
@Deprecated
public void set(ConfigScope scope, String key, String value) {
Map<String, String> map = new HashMap<String, String>();
map.put(key, value);
set(scope, map);
}
/**
* Set configs, create if not exist
* @deprecated replaced by {@link #set(HelixConfigScope, Map<String, String>)}
* @param scope
* @param keyValueMap
*/
@Deprecated
public void set(ConfigScope scope, Map<String, String> keyValueMap) {
if (scope == null || scope.getScope() == null) {
LOG.error("Scope can't be null.");
return;
}
String clusterName = scope.getClusterName();
if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) {
throw new HelixException("cluster: " + clusterName + " is NOT setup.");
}
if (scope.getScope() == ConfigScopeProperty.PARTICIPANT) {
String scopeStr = scope.getScopeStr();
String instanceName = scopeStr.substring(scopeStr.lastIndexOf('/') + 1);
if (!ZKUtil.isInstanceSetup(_zkClient, scope.getClusterName(), instanceName,
InstanceType.PARTICIPANT)) {
throw new HelixException(
"instance: " + instanceName + " is NOT setup in cluster: " + clusterName);
}
}
// use "|" to delimit resource and partition. e.g. /MyCluster/CONFIGS/PARTICIPANT/MyDB|MyDB_0
String scopeStr = scope.getScopeStr();
String[] splits = scopeStr.split("\\|");
String id = splits[0].substring(splits[0].lastIndexOf('/') + 1);
ZNRecord update = new ZNRecord(id);
if (splits.length == 1) {
for (String key : keyValueMap.keySet()) {
String value = keyValueMap.get(key);
update.setSimpleField(key, value);
}
} else if (splits.length == 2) {
if (update.getMapField(splits[1]) == null) {
update.setMapField(splits[1], new TreeMap<String, String>());
}
for (String key : keyValueMap.keySet()) {
String value = keyValueMap.get(key);
update.getMapField(splits[1]).put(key, value);
}
}
ZKUtil.createOrMerge(_zkClient, splits[0], update, true, true);
}
/**
* Set config, creating it if it doesn't exist
* @param scope scope specification of the entity set to query
* (e.g. cluster, resource, participant, etc.)
* @param key the identifier of the configuration entry
* @param value the configuration
*/
public void set(HelixConfigScope scope, String key, String value) {
Map<String, String> map = new TreeMap<String, String>();
map.put(key, value);
set(scope, map);
}
/**
* Set multiple configs, creating them if they don't exist
* @param scope scope specification of the entity set to query
* (e.g. cluster, resource, participant, etc.)
* @param keyValueMap configurations organized by their identifiers
*/
public void set(HelixConfigScope scope, Map<String, String> keyValueMap) {
if (scope == null || scope.getType() == null || !scope.isFullKey()) {
LOG.error("fail to set config. invalid config scope. Scope: {}.", scope);
return;
}
String clusterName = scope.getClusterName();
if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) {
throw new HelixException("fail to set config. cluster: " + clusterName + " is NOT setup.");
}
if (scope.getType() == ConfigScopeProperty.PARTICIPANT) {
if (!ZKUtil.isInstanceSetup(_zkClient, scope.getClusterName(), scope.getParticipantName(),
InstanceType.PARTICIPANT)) {
throw new HelixException("fail to set config. instance: " + scope.getParticipantName()
+ " is NOT setup in cluster: " + clusterName);
}
}
String mapKey = scope.getMapKey();
String zkPath = scope.getZkPath();
String id = zkPath.substring(zkPath.lastIndexOf('/') + 1);
ZNRecord update = new ZNRecord(id);
if (mapKey == null) {
update.getSimpleFields().putAll(keyValueMap);
} else {
update.setMapField(mapKey, keyValueMap);
}
ZKUtil.createOrMerge(_zkClient, zkPath, update, true, true);
}
/**
* Remove config
* @deprecated replaced by {@link #remove(HelixConfigScope, String)}
* @param scope
* @param key
*/
@Deprecated
public void remove(ConfigScope scope, String key) {
remove(scope, Arrays.asList(key));
}
/**
* remove configs
* @deprecated replaced by {@link #remove(HelixConfigScope, List<String>)}
* @param scope
* @param keys
*/
@Deprecated
public void remove(ConfigScope scope, List<String> keys) {
if (scope == null || scope.getScope() == null) {
LOG.error("Scope can't be null.");
return;
}
String clusterName = scope.getClusterName();
if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) {
throw new HelixException("cluster " + clusterName + " is not setup yet");
}
String scopeStr = scope.getScopeStr();
String[] splits = scopeStr.split("\\|");
String id = splits[0].substring(splits[0].lastIndexOf('/') + 1);
ZNRecord update = new ZNRecord(id);
if (splits.length == 1) {
// subtract doesn't care about value, use empty string
for (String key : keys) {
update.setSimpleField(key, "");
}
} else if (splits.length == 2) {
if (update.getMapField(splits[1]) == null) {
update.setMapField(splits[1], new TreeMap<String, String>());
}
// subtract doesn't care about value, use empty string
for (String key : keys) {
update.getMapField(splits[1]).put(key, "");
}
}
ZKUtil.subtract(_zkClient, splits[0], update);
}
/**
* Remove a single config
* @param scope scope specification of the entity set to query
* (e.g. cluster, resource, participant, etc.)
* @param key the identifier of the configuration entry
*/
public void remove(HelixConfigScope scope, String key) {
remove(scope, Arrays.asList(key));
}
/**
* Remove multiple configs
* @param scope scope specification of the entity set to query
* (e.g. cluster, resource, participant, etc.)
* @param keys the identifiers of the configuration entries
*/
public void remove(HelixConfigScope scope, List<String> keys) {
if (scope == null || scope.getType() == null || !scope.isFullKey()) {
LOG.error("fail to remove. invalid scope: {}, keys: {}", scope, keys);
return;
}
String clusterName = scope.getClusterName();
if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) {
throw new HelixException("fail to remove. cluster " + clusterName + " is not setup yet");
}
String zkPath = scope.getZkPath();
String mapKey = scope.getMapKey();
String id = zkPath.substring(zkPath.lastIndexOf('/') + 1);
ZNRecord update = new ZNRecord(id);
if (mapKey == null) {
// subtract doesn't care about value, use empty string
for (String key : keys) {
update.setSimpleField(key, "");
}
} else {
update.setMapField(mapKey, new TreeMap<String, String>());
// subtract doesn't care about value, use empty string
for (String key : keys) {
update.getMapField(mapKey).put(key, "");
}
}
ZKUtil.subtract(_zkClient, zkPath, update);
}
/**
* Remove multiple configs
*
* @param scope scope specification of the entity set to query (e.g. cluster, resource,
* participant, etc.)
* @param recordToRemove the ZNRecord that holds the entries that needs to be removed
*/
public void remove(HelixConfigScope scope, ZNRecord recordToRemove) {
if (scope == null || scope.getType() == null || !scope.isFullKey()) {
LOG.error("fail to remove. invalid scope: {}.", scope);
return;
}
String clusterName = scope.getClusterName();
if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) {
throw new HelixException("fail to remove. cluster " + clusterName + " is not setup yet");
}
String zkPath = scope.getZkPath();
ZKUtil.subtract(_zkClient, zkPath, recordToRemove);
}
/**
* get config keys
* @deprecated replaced by {@link #getKeys(HelixConfigScope)}
* @param type
* @param clusterName
* @param keys
* @return
*/
@Deprecated
public List<String> getKeys(ConfigScopeProperty type, String clusterName, String... keys) {
if (type == null || clusterName == null) {
LOG.error("ClusterName|scope can't be null.");
return Collections.emptyList();
}
try {
if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) {
LOG.error("cluster {} is not setup yet.", clusterName);
return Collections.emptyList();
}
String[] args = new String[1 + keys.length];
args[0] = clusterName;
System.arraycopy(keys, 0, args, 1, keys.length);
String scopeStr = template.instantiate(type, args);
String[] splits = scopeStr.split("\\|");
List<String> retKeys = null;
if (splits.length == 1) {
retKeys = _zkClient.getChildren(splits[0]);
} else {
ZNRecord record = _zkClient.readData(splits[0]);
if (splits[1].startsWith("SIMPLEKEYS")) {
retKeys = new ArrayList<String>(record.getSimpleFields().keySet());
} else if (splits[1].startsWith("MAPKEYS")) {
retKeys = new ArrayList<String>(record.getMapFields().keySet());
} else if (splits[1].startsWith("MAPMAPKEYS")) {
retKeys = new ArrayList<String>(record.getMapField(splits[2]).keySet());
}
}
if (retKeys == null) {
LOG.error("Invalid scope: {} or keys: {}.", type, Arrays.toString(args));
return Collections.emptyList();
}
Collections.sort(retKeys);
return retKeys;
} catch (Exception e) {
return Collections.emptyList();
}
}
/**
* Get list of config keys for a scope
* @param scope
* @return a list of configuration keys
*/
public List<String> getKeys(HelixConfigScope scope) {
if (scope == null || scope.getType() == null) {
LOG.error("Fail to getKeys. Invalid config scope: {}.", scope);
return null;
}
if (!ZKUtil.isClusterSetup(scope.getClusterName(), _zkClient)) {
LOG.error("Fail to getKeys. Cluster {} is not setup yet.", scope.getClusterName());
return Collections.emptyList();
}
String zkPath = scope.getZkPath();
String mapKey = scope.getMapKey();
List<String> retKeys = null;
if (scope.isFullKey()) {
ZNRecord record = _zkClient.readData(zkPath);
if (mapKey == null) {
retKeys = new ArrayList<String>(record.getSimpleFields().keySet());
} else {
retKeys = new ArrayList<String>(record.getMapField(mapKey).keySet());
}
} else {
if (scope.getType() == ConfigScopeProperty.PARTITION) {
ZNRecord record = _zkClient.readData(zkPath);
retKeys = new ArrayList<String>(record.getMapFields().keySet());
} else {
retKeys = _zkClient.getChildren(zkPath);
}
}
if (retKeys != null) {
Collections.sort(retKeys);
}
return retKeys;
}
/**
* Get CustomizedStateConfig of the given cluster.
* @param clusterName
* @return The instance of {@link CustomizedStateConfig}
*/
public CustomizedStateConfig getCustomizedStateConfig(String clusterName) {
if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) {
throw new HelixException(String.format("Failed to get config. cluster: %s is not setup.", clusterName));
}
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.CUSTOMIZED_STATE).forCluster(clusterName).build();
ZNRecord record = getConfigZnRecord(scope);
if (record == null) {
LOG.warn("No customized state aggregation config found at {}.", scope.getZkPath());
return null;
}
return new CustomizedStateConfig.Builder(record).build();
}
/**
* Get ClusterConfig of the given cluster.
*
* @param clusterName
*
* @return
*/
public ClusterConfig getClusterConfig(String clusterName) {
if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) {
throw new HelixException("fail to get config. cluster: " + clusterName + " is NOT setup.");
}
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(clusterName).build();
ZNRecord record = getConfigZnRecord(scope);
if (record == null) {
LOG.warn("No config found at {}.", scope.getZkPath());
return null;
}
return new ClusterConfig(record);
}
/**
* Get CloudConfig of the given cluster.
* @param clusterName
* @return The instance of {@link CloudConfig}
*/
public CloudConfig getCloudConfig(String clusterName) {
if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) {
throw new HelixException(
String.format("Failed to get config. cluster: %s is not setup.", clusterName));
}
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.CLOUD).forCluster(clusterName).build();
ZNRecord record = getConfigZnRecord(scope);
if (record == null) {
LOG.warn("No cloud config found at {}.", scope.getZkPath());
return null;
}
return new CloudConfig(record);
}
/**
* Delete cloud config fields (not the whole config)
* @param clusterName
* @param cloudConfig
*/
public void deleteCloudConfigFields(String clusterName, CloudConfig cloudConfig) {
if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) {
throw new HelixException("fail to delete cloud config. cluster: " + clusterName + " is NOT setup.");
}
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.CLOUD).forCluster(clusterName).build();
remove(scope, cloudConfig.getRecord());
}
/**
* Update cloud config
* @param clusterName
* @param cloudConfig
*/
public void updateCloudConfig(String clusterName, CloudConfig cloudConfig) {
updateCloudConfig(clusterName, cloudConfig, false);
}
private void updateCloudConfig(String clusterName, CloudConfig cloudConfig, boolean overwrite) {
if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) {
throw new HelixException("Fail to update cloud config. cluster: " + clusterName + " is NOT setup.");
}
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.CLOUD).forCluster(clusterName).build();
String zkPath = scope.getZkPath();
if (overwrite) {
ZKUtil.createOrReplace(_zkClient, zkPath, cloudConfig.getRecord(), true);
} else {
ZKUtil.createOrUpdate(_zkClient, zkPath, cloudConfig.getRecord(), true, true);
}
}
/**
* Get RestConfig of the given cluster.
* @param clusterName The cluster
* @return The instance of {@link RESTConfig}
*/
public RESTConfig getRESTConfig(String clusterName) {
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.REST).forCluster(clusterName).build();
ZNRecord record = getConfigZnRecord(scope);
if (record == null) {
LOG.warn("No rest config found at {}.", scope.getZkPath());
return null;
}
return new RESTConfig(record);
}
/**
* Set RestConfig of a given cluster
* @param clusterName the cluster id
* @param restConfig the RestConfig to be set to the cluster
*/
public void setRESTConfig(String clusterName, RESTConfig restConfig) {
updateRESTConfig(clusterName, restConfig, true);
}
/**
* Update RestConfig of a given cluster
* @param clusterName the cluster id
* @param restConfig the new RestConfig to be set to the cluster
*/
public void updateRESTConfig(String clusterName, RESTConfig restConfig) {
updateRESTConfig(clusterName, restConfig, false);
}
private void updateRESTConfig(String clusterName, RESTConfig restConfig, boolean overwrite) {
if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) {
throw new HelixException("Fail to update REST config. cluster: " + clusterName + " is NOT setup.");
}
HelixConfigScope scope = new HelixConfigScopeBuilder(ConfigScopeProperty.REST).forCluster(clusterName).build();
String zkPath = scope.getZkPath();
// Create "/{clusterId}/CONFIGS/REST" if it does not exist
String parentPath = HelixUtil.getZkParentPath(zkPath);
if (!_zkClient.exists(parentPath)) {
ZKUtil.createOrMerge(_zkClient, parentPath, new ZNRecord(parentPath), true, true);
}
if (overwrite) {
ZKUtil.createOrReplace(_zkClient, zkPath, restConfig.getRecord(), true);
} else {
ZKUtil.createOrUpdate(_zkClient, zkPath, restConfig.getRecord(), true, true);
}
}
public void deleteRESTConfig(String clusterName) {
if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) {
throw new HelixException("Fail to delete REST config. cluster: " + clusterName + " is NOT setup.");
}
HelixConfigScope scope = new HelixConfigScopeBuilder(ConfigScopeProperty.REST).forCluster(clusterName).build();
String zkPath = scope.getZkPath();
// Check if "/{clusterId}/CONFIGS/REST" exists
String parentPath = HelixUtil.getZkParentPath(zkPath);
if (!_zkClient.exists(parentPath)) {
throw new HelixException("Fail to delete REST config. cluster: " + clusterName + " does not have a rest config."); }
ZKUtil.dropChildren(_zkClient, parentPath, new ZNRecord(clusterName));
}
/**
* Set ClusterConfig of the given cluster.
* The current Cluster config will be replaced with the given clusterConfig.
* WARNING: This is not thread-safe or concurrent updates safe.
*
* @param clusterName
* @param clusterConfig
*
* @return
*/
public void setClusterConfig(String clusterName, ClusterConfig clusterConfig) {
updateClusterConfig(clusterName, clusterConfig, true);
}
/**
* Update ClusterConfig of the given cluster.
* The value of field in current config will be replaced with the value of the same field in given config if it
* presents. If there is new field in given config but not in current config, the field will be added into
* the current config..
* The list fields and map fields will be replaced as a single entry.
*
* The current Cluster config will be replaced with the given clusterConfig.
* WARNING: This is not thread-safe or concurrent updates safe.
*
* @param clusterName
* @param clusterConfig
*
* @return
*/
public void updateClusterConfig(String clusterName, ClusterConfig clusterConfig) {
updateClusterConfig(clusterName, clusterConfig, false);
}
private void updateClusterConfig(String clusterName, ClusterConfig clusterConfig,
boolean overwrite) {
if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) {
throw new HelixException("fail to update config. cluster: " + clusterName + " is NOT setup.");
}
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(clusterName).build();
String zkPath = scope.getZkPath();
if (overwrite) {
ZKUtil.createOrReplace(_zkClient, zkPath, clusterConfig.getRecord(), true);
} else {
ZKUtil.createOrUpdate(_zkClient, zkPath, clusterConfig.getRecord(), true, true);
}
}
/**
* Get resource config for given resource in given cluster.
*
* @param clusterName
* @param resourceName
*
* @return
*/
public ResourceConfig getResourceConfig(String clusterName, String resourceName) {
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.RESOURCE).forCluster(clusterName)
.forResource(resourceName).build();
ZNRecord record = getConfigZnRecord(scope);
if (record == null) {
LOG.warn("No config found at {}.", scope.getZkPath());
return null;
}
return new ResourceConfig(record);
}
/**
* Set config of the given resource.
* The current Resource config will be replaced with the given clusterConfig.
*
* WARNING: This is not thread-safe or concurrent updates safe.
*
* @param clusterName
* @param resourceName
* @param resourceConfig
*
* @return
*/
public void setResourceConfig(String clusterName, String resourceName,
ResourceConfig resourceConfig) {
updateResourceConfig(clusterName, resourceName, resourceConfig, true);
}
/**
* Update ResourceConfig of the given resource.
* The value of field in current config will be replaced with the value of the same field in given config if it
* presents. If there is new field in given config but not in current config, the field will be added into
* the current config..
* The list fields and map fields will be replaced as a single entry.
*
* The current Cluster config will be replaced with the given clusterConfig.
* WARNING: This is not thread-safe or concurrent updates safe.
*
* @param clusterName
* @param resourceName
* @param resourceConfig
*
* @return
*/
public void updateResourceConfig(String clusterName, String resourceName,
ResourceConfig resourceConfig) {
updateResourceConfig(clusterName, resourceName, resourceConfig, false);
}
private void updateResourceConfig(String clusterName, String resourceName,
ResourceConfig resourceConfig, boolean overwrite) {
if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) {
throw new HelixException("fail to setup config. cluster: " + clusterName + " is NOT setup.");
}
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.RESOURCE).forCluster(clusterName)
.forResource(resourceName).build();
String zkPath = scope.getZkPath();
if (overwrite) {
ZKUtil.createOrReplace(_zkClient, zkPath, resourceConfig.getRecord(), true);
} else {
ZKUtil.createOrUpdate(_zkClient, zkPath, resourceConfig.getRecord(), true, true);
}
}
/**
* Get instance config for given resource in given cluster.
*
* @param clusterName
* @param instanceName
*
* @return
*/
public InstanceConfig getInstanceConfig(String clusterName, String instanceName) {
if (!ZKUtil.isInstanceSetup(_zkClient, clusterName, instanceName, InstanceType.PARTICIPANT)) {
throw new HelixException(
"fail to get config. instance: " + instanceName + " is NOT setup in cluster: "
+ clusterName);
}
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.PARTICIPANT).forCluster(clusterName)
.forParticipant(instanceName).build();
ZNRecord record = getConfigZnRecord(scope);
if (record == null) {
LOG.warn("No config found at {}.", scope.getZkPath());
return null;
}
return new InstanceConfig(record);
}
/**
* Set config of the given instance config.
* The current instance config will be replaced with the given instanceConfig.
* WARNING: This is not thread-safe or concurrent updates safe.
*
* @param clusterName
* @param instanceName
* @param instanceConfig
*
* @return
*/
public void setInstanceConfig(String clusterName, String instanceName,
InstanceConfig instanceConfig) {
updateInstanceConfig(clusterName, instanceName, instanceConfig, true);
}
/**
* Update InstanceConfig of the given resource. The value of field in current config will be
* replaced with the value of the same field in given config if it presents. If there is new field
* in given config but not in current config, the field will be added into the current config..
* The list fields and map fields will be replaced as a single entry.
* The current instanceConfig will be replaced with the given instanceConfig. WARNING: This is not
* thread-safe or concurrent updates safe.
* *
*
* @param clusterName
* @param instanceName
* @param instanceConfig
*
* @return
*/
public void updateInstanceConfig(String clusterName, String instanceName,
InstanceConfig instanceConfig) {
updateInstanceConfig(clusterName, instanceName, instanceConfig, false);
}
private void updateInstanceConfig(String clusterName, String instanceName,
InstanceConfig instanceConfig, boolean overwrite) {
if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) {
throw new HelixException("fail to setup config. cluster: " + clusterName + " is NOT setup.");
}
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.PARTICIPANT).forCluster(clusterName)
.forParticipant(instanceName).build();
String zkPath = scope.getZkPath();
if (!_zkClient.exists(zkPath)) {
throw new HelixException(
"updateInstanceConfig failed. Given InstanceConfig does not already exist. instance: "
+ instanceName);
}
if (overwrite) {
ZKUtil.createOrReplace(_zkClient, zkPath, instanceConfig.getRecord(), true);
} else {
ZKUtil.createOrUpdate(_zkClient, zkPath, instanceConfig.getRecord(), true, true);
}
}
/**
* Closes ConfigAccessor: closes the stateful resources including the ZkClient.
*/
public void close() {
if (_zkClient != null && !_usesExternalZkClient) {
_zkClient.close();
}
}
@Override
public void finalize() {
close();
}
public static class Builder extends GenericZkHelixApiBuilder<Builder> {
public Builder() {
}
public ConfigAccessor build() {
validate();
return new ConfigAccessor(
createZkClient(_realmMode, _realmAwareZkConnectionConfig, _realmAwareZkClientConfig,
_zkAddress), false);
}
}
}
| 9,916 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/BaseDataAccessor.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import org.apache.helix.zookeeper.zkclient.DataUpdater;
import org.apache.helix.zookeeper.zkclient.IZkChildListener;
import org.apache.helix.zookeeper.zkclient.IZkDataListener;
import org.apache.zookeeper.data.Stat;
/**
* Generic interface for accessing and manipulating data on a backing store like Zookeeper.
* @param <T> The type of record to use
*/
public interface BaseDataAccessor<T> {
/**
* This will always attempt to create the znode, if it exists it will return false. Will
* create parents if they do not exist. For performance reasons, it may try to create
* child first and only if it fails it will try to create parents
* @param path path to the ZNode to create
* @param record the data to write to the ZNode
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @return true if creation succeeded, false otherwise (e.g. if the ZNode exists)
*/
boolean create(String path, T record, int options);
/**
* This will always attempt to create the znode, if it exists it will return false. Will
* create parents if they do not exist. For performance reasons, it may try to create
* child first and only if it fails it will try to create parents
* @param path path to the ZNode to create
* @param record the data to write to the ZNode
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @param ttl TTL of the node in milliseconds, if options supports it
* @return true if creation succeeded, false otherwise (e.g. if the ZNode exists)
*/
default boolean create(String path, T record, int options, long ttl) {
throw new UnsupportedOperationException("create with TTL support is not implemented.");
}
/**
* This will always attempt to set the data on existing node. If the ZNode does not
* exist it will create it and all its parents ZNodes if necessary
* @param path path to the ZNode to set
* @param record the data to write to the ZNode
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @return true if data was successfully set, false otherwise
*/
boolean set(String path, T record, int options);
/**
* This will attempt to set the data on existing node only if version matches.
* If the ZNode does not exist it will create it and all its parent ZNodes only if expected
* version is -1
* @param path path to the ZNode to set
* @param record the data to write to the ZNode
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @param expectVersion the expected version of the data to be overwritten, -1 means match any
* version
* @return true if data was successfully set, false otherwise (e.g. if the version mismatches)
*/
boolean set(String path, T record, int expectVersion, int options);
/**
* This will attempt to update the data using the updater. If the ZNode
* does not exist it will create it and all its parent ZNodes.
* Updater will be invoked with null value if node does not exist.
* @param path path to the ZNode to update
* @param updater an update routine for the data to merge in
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @return true if data update succeeded, false otherwise
*/
boolean update(String path, DataUpdater<T> updater, int options);
/**
* This will remove the ZNode and all its descendants if any
* @param path path to the root ZNode to remove
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @return true if the removal succeeded, false otherwise
*/
boolean remove(String path, int options);
/**
* Use it when creating children under a parent node. This will use async api for better
* performance. If the child already exists it will return false.
* @param paths the paths to the children ZNodes
* @param records List of data to write to each of the path
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @return For each child: true if creation succeeded, false otherwise (e.g. if the child exists)
*/
boolean[] createChildren(List<String> paths, List<T> records, int options);
/**
* Use it when creating children under a parent node. This will use async api for better
* performance. If the child already exists it will return false.
* @param paths the paths to the children ZNodes
* @param records List of data to write to each of the path
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @param ttl TTL of the node in milliseconds, if options supports it
* @return For each child: true if creation succeeded, false otherwise (e.g. if the child exists)
*/
default boolean[] createChildren(List<String> paths, List<T> records, int options, long ttl) {
throw new UnsupportedOperationException("createChildren with TTL support is not implemented.");
}
/**
* can set multiple children under a parent node. This will use async api for better
* performance. If this child does not exist it will create it.
* @param paths the paths to the children ZNodes
* @param records List of data with which to overwrite the corresponding ZNodes
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @return For each child: true if the data was set, false otherwise
*/
boolean[] setChildren(List<String> paths, List<T> records, int options);
/**
* Can update multiple nodes using async api for better performance. If a child does not
* exist it will create it.
* @param paths paths to the children ZNodes
* @param updaters List of update routines for records to update
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @return For each child, true if the data is updated successfully, false otherwise
*/
boolean[] updateChildren(List<String> paths, List<DataUpdater<T>> updaters, int options);
/**
* remove multiple paths using async api. will remove any child nodes if any
* @param paths paths to the ZNodes to remove
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @return For each ZNode, true if successfully removed, false otherwise
*/
boolean[] remove(List<String> paths, int options);
/**
* Get the {@link T} corresponding to the path
* @param path path to the ZNode
* @param stat retrieve the stat of the ZNode
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @return the record data stored at the ZNode
*/
T get(String path, Stat stat, int options);
/**
* Get List of {@link T} corresponding to the paths using async api
* @param paths paths to the ZNodes
* @param stats retrieve a list of stats for the ZNodes
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @return List of record data stored at each ZNode
*/
@Deprecated
List<T> get(List<String> paths, List<Stat> stats, int options);
/**
* Get List of {@link T} corresponding to the paths using async api
* @param paths paths to the ZNodes
* @param stats retrieve a list of stats for the ZNodes
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @param throwException Will throw an exception when it set to be true and one of the path
* is failed to read.
* @return List of record data stored at each ZNode, node value will return null if the node does
* not exist
*/
List<T> get(List<String> paths, List<Stat> stats, int options, boolean throwException)
throws HelixException;
/**
* Get the children under a parent path using async api
*
* For this API, if some of child node is failed to read, Helix will return the data of read
* nodes. So user may get partial data. No exception will be thrown even if it is failed to read
* all the data.
*
* @param parentPath path to the immediate parent ZNode
* @param stats Zookeeper Stat objects corresponding to each child
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @return A list of children of the parent ZNode
*/
@Deprecated
List<T> getChildren(String parentPath, List<Stat> stats, int options);
/**
* Get the children under a parent path using async api
*
* If some of child node is failed to read, Helix will do the retry within retry count. If the
* result still cannot be retrieved completely, Helix will throw an HelixException.
*
* @param parentPath path to the immediate parent ZNode
* @param stats Zookeeper Stat objects corresponding to each child
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @param retryCount The number of retries that data is not completed read
* @param retryInterval The interval between two retries
* @return A list of children of the parent ZNode
*/
List<T> getChildren(String parentPath, List<Stat> stats, int options, int retryCount,
int retryInterval)
throws HelixException;
/**
* Returns the child names given a parent path
* @param parentPath path to the immediate parent ZNode
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @return a list of the names of all of the parent ZNode's children
*/
List<String> getChildNames(String parentPath, int options);
/**
* checks if the path exists in zk
* @param path path to the ZNode to test
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @return true if the ZNode exists, false otherwise
*/
boolean exists(String path, int options);
/**
* checks if the all the paths exists
* @param paths paths to the ZNodes to test
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @return for each path, true if a valid ZNode exists, false otherwise
*/
boolean[] exists(List<String> paths, int options);
/**
* Get the stats of all the paths
* @param paths paths of the ZNodes to query
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @return Zookeeper Stat object for each path
*/
Stat[] getStats(List<String> paths, int options);
/**
* Get the stats of a single path
* @param path path of the ZNode to query
* @param options Set the type of ZNode see the valid values in {@link AccessOption}
* @return Zookeeper Stat object corresponding to the ZNode
*/
Stat getStat(String path, int options);
/**
* Subscribe data listener to path
* @param path path to the ZNode to listen to
* @param listener the listener to register for changes
*/
void subscribeDataChanges(String path, IZkDataListener listener);
/**
* Unsubscribe data listener to path
* @param path path to the ZNode to stop listening to
* @param listener the listener currently subscribed to the ZNode
*/
void unsubscribeDataChanges(String path, IZkDataListener listener);
/**
* Subscribe child listener to path
* @param path path to the immediate parent ZNode
* @param listener the listener to register for changes
* @return
*/
List<String> subscribeChildChanges(String path, IZkChildListener listener);
/**
* Unsubscribe child listener to path
* @param path path to the immediate parent ZNode
* @param listener the listener currently subscribed to the children
*/
void unsubscribeChildChanges(String path, IZkChildListener listener);
/**
* TODO refactor this. reset() should not be in data accessor
* reset the cache if any, when session expiry happens
*/
void reset();
/**
* Close the connection to the metadata store
*/
default void close() {
System.out.println("Default close() was invoked! No operation was executed.");
}
}
| 9,917 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/PropertyPathConfig.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Use PropertyPathBuilder instead.
* Keep this class here for API backcompatible, will be removed in next major release.
*/
@Deprecated
public class PropertyPathConfig extends PropertyPathBuilder {
}
| 9,918 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/LiveInstanceChangeListener.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Interface to implement to listen for live instance changes.
*
* @deprecated
* NOTE: This interface definition is moved to {@link org.apache.helix.api.listeners.LiveInstanceChangeListener}
*/
@Deprecated
public interface LiveInstanceChangeListener extends
org.apache.helix.api.listeners.LiveInstanceChangeListener {
}
| 9,919 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/ZNRecordAssembler.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Deprecated - use ZNRecordAssembler in zookeeper-api instead.
* Constructs ZNRecords from collections of ZNRecords
*/
@Deprecated
public class ZNRecordAssembler extends org.apache.helix.zookeeper.datamodel.ZNRecordAssembler {
}
| 9,920 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/ScopedConfigChangeListener.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Interface to implement to listen for changes to any specified scope.
*
* @deprecated
* NOTE: This interface definition is moved to {@link org.apache.helix.api.listeners.ScopedConfigChangeListener}
*/
@Deprecated public interface ScopedConfigChangeListener
extends org.apache.helix.api.listeners.ScopedConfigChangeListener {
}
| 9,921 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/NotificationContext.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.HashMap;
import java.util.Map;
/**
* Metadata associated with a notification event and the current state of the cluster
*/
public class NotificationContext {
/**
* keys used for object map
*/
public enum MapKey {
TASK_EXECUTOR,
CURRENT_STATE_UPDATE,
HELIX_TASK_RESULT
}
private Map<String, Object> _map;
private HelixManager _manager;
private Type _type;
private HelixConstants.ChangeType _changeType;
private String _pathChanged;
private String _eventName;
private long _creationTime;
private boolean _isChildChange;
/**
* Get the name associated with the event
*
* @return event name
*/
public String getEventName() {
return _eventName;
}
/**
* Set the name associated with the event
*
* @param eventName the event name
*/
public void setEventName(String eventName) {
_eventName = eventName;
}
/**
* Instantiate with a HelixManager
*
* @param manager {@link HelixManager} object
*/
public NotificationContext(HelixManager manager) {
_manager = manager;
_map = new HashMap<>();
_creationTime = System.currentTimeMillis();
}
/**
* Clone a new Notification context from existing one. Map contents are
* not recursively deep copied.
*
* @return new copy of NotificationContext
*/
public NotificationContext clone() {
NotificationContext copy = new NotificationContext(_manager);
copy.setType(_type);
copy.setChangeType(_changeType);
copy.setPathChanged(_pathChanged);
copy.setEventName(_eventName);
copy.setCreationTime(_creationTime);
copy._map.putAll(_map);
return copy;
}
/**
* Get the HelixManager associated with this notification
*
* @return {@link HelixManager} object
*/
public HelixManager getManager() {
return _manager;
}
/**
* Get a map describing the update (keyed on {@link MapKey})
*
* @return the object map describing the update
*/
public Map<String, Object> getMap() {
return _map;
}
/**
* Get the type of the notification
*
* @return the notification type
*/
public Type getType() {
return _type;
}
/**
* Set the HelixManager associated with this notification
*
* @param manager {@link HelixManager} object
*/
public void setManager(HelixManager manager) {
this._manager = manager;
}
/**
* Gets creation time.
*
* @return the creation time
*/
public long getCreationTime() {
return _creationTime;
}
/**
* Sets creation time.
*
* @param creationTime the creation time
*/
public void setCreationTime(long creationTime) {
_creationTime = creationTime;
}
/**
* Add notification metadata
*
* @param key String value of a {@link MapKey}
* @param value
*/
public void add(String key, Object value) {
_map.put(key, value);
}
/**
* Set the notification map
*
* @param map
*/
public void setMap(Map<String, Object> map) {
this._map = map;
}
/**
* Set the notification type
*
* @param {@link Type} object
*/
public void setType(Type type) {
this._type = type;
}
/**
* Get a notification attribute
*
* @param key String from a {@link MapKey}
*/
public Object get(String key) {
return _map.get(key);
}
/**
* Valid types of notifications
*/
public enum Type {
INIT,
CALLBACK,
PERIODIC_REFRESH,
FINALIZE
}
/**
* Get the path changed status
*
* @return String corresponding to the path change
*/
public String getPathChanged() {
return _pathChanged;
}
/**
* Set the path changed status
*
* @param pathChanged
*/
public void setPathChanged(String pathChanged) {
this._pathChanged = pathChanged;
}
/**
* Gets the change type.
*
* @return the change typte
*/
public HelixConstants.ChangeType getChangeType() {
return _changeType;
}
/**
* Sets the change type.
*
* @param changeType the change type
*/
public void setChangeType(HelixConstants.ChangeType changeType) {
this._changeType = changeType;
}
public boolean getIsChildChange() {
return _isChildChange;
}
public void setIsChildChange(boolean isChildChange) {
this._isChildChange = isChildChange;
}
}
| 9,922 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/HelixTimerTask.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Timer;
/**
* Interface for defining a generic task to run periodically.
*/
public abstract class HelixTimerTask {
protected Timer _timer = null;
/**
* Start a timer task
*/
public abstract void start();
/**
* Stop a timer task
*/
public abstract void stop();
}
| 9,923 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/ZNRecordUpdater.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.zookeeper.datamodel.ZNRecord;
/**
* Deprecated - Use ZNRecordUpdater in zookeeper-api intstead.
* Class that specifies how a ZNRecord should be updated with another ZNRecord
*/
@Deprecated
public class ZNRecordUpdater extends org.apache.helix.zookeeper.datamodel.ZNRecordUpdater {
/**
* Initialize with the record that will be updated
* @param record
*/
public ZNRecordUpdater(ZNRecord record) {
super(record);
}
}
| 9,924 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/HelixManagerFactory.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.manager.zk.HelixManagerStateListener;
import org.apache.helix.manager.zk.ZKHelixManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Obtain one of a set of Helix cluster managers, organized by the backing system.
* factory that creates cluster managers.
*/
public final class HelixManagerFactory {
private static final Logger LOG = LoggerFactory.getLogger(HelixManagerFactory.class);
/**
* Construct a zk-based cluster manager that enforces all types (PARTICIPANT, CONTROLLER, and
* SPECTATOR) to have a name
* @param clusterName
* @param instanceName
* @param type
* @param zkAddr
* @return a HelixManager backed by Zookeeper
*/
public static HelixManager getZKHelixManager(String clusterName, String instanceName,
InstanceType type, String zkAddr) {
return new ZKHelixManager(clusterName, instanceName, type, zkAddr);
}
/**
* Construct a zk-based cluster manager that enforces all types (PARTICIPANT, CONTROLLER, and
* SPECTATOR) to have a name
* @param clusterName
* @param instanceName
* @param type
* @param zkAddr
* @param stateListener
* @return a HelixManager backed by Zookeeper
*/
public static HelixManager getZKHelixManager(String clusterName, String instanceName,
InstanceType type, String zkAddr, HelixManagerStateListener stateListener) {
return new ZKHelixManager(clusterName, instanceName, type, zkAddr, stateListener);
}
/**
* Construct a ZkHelixManager using the HelixManagerProperty instance given.
* HelixManagerProperty given must contain a valid ZkConnectionConfig.
* @param clusterName
* @param instanceName
* @param type
* @param stateListener
* @param helixManagerProperty must contain a valid ZkConnectionConfig
* @return
*/
public static HelixManager getZKHelixManager(String clusterName, String instanceName,
InstanceType type, HelixManagerStateListener stateListener,
HelixManagerProperty helixManagerProperty) {
return new ZKHelixManager(clusterName, instanceName, type, null, stateListener,
helixManagerProperty);
}
}
| 9,925 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/GroupCommit.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Iterator;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.zkclient.exception.ZkNoNodeException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
// TODO: move to mananger.zk
/**
* Support committing updates to data such that they are ordered for each key
*/
public class GroupCommit {
private static Logger LOG = LoggerFactory.getLogger(GroupCommit.class);
private static int MAX_RETRY = 3;
private static class Queue {
final AtomicReference<Thread> _running = new AtomicReference<Thread>();
final ConcurrentLinkedQueue<Entry> _pending = new ConcurrentLinkedQueue<Entry>();
}
private static class Entry {
final String _key;
final ZNRecord _record;
AtomicBoolean _sent = new AtomicBoolean(false);
Entry(String key, ZNRecord record) {
_key = key;
_record = record;
}
}
private final Queue[] _queues = new Queue[100];
/**
* Set up a group committer and its associated queues
*/
public GroupCommit() {
// Don't use Arrays.fill();
for (int i = 0; i < _queues.length; ++i) {
_queues[i] = new Queue();
}
}
private Queue getQueue(String key) {
return _queues[(key.hashCode() & Integer.MAX_VALUE) % _queues.length];
}
/**
* Do a group update for data associated with a given key
* @param accessor accessor with the ability to pull from the current data
* @param options see {@link AccessOption}
* @param key the data identifier
* @param record the data to be merged in
* @return true if successful, false otherwise
*/
public boolean commit(BaseDataAccessor<ZNRecord> accessor, int options, String key,
ZNRecord record) {
return commit(accessor, options, key, record, false);
}
public boolean commit(BaseDataAccessor<ZNRecord> accessor, int options, String key,
ZNRecord record, boolean removeIfEmpty) {
Queue queue = getQueue(key);
Entry entry = new Entry(key, record);
boolean success = true;
queue._pending.add(entry);
while (!entry._sent.get()) {
if (queue._running.compareAndSet(null, Thread.currentThread())) {
ArrayList<Entry> processed = new ArrayList<>();
try {
if (queue._pending.peek() == null) {
return true;
}
// remove from queue
Entry first = queue._pending.poll();
processed.add(first);
String mergedKey = first._key;
ZNRecord merged = null;
try {
// accessor will fallback to zk if not found in cache
merged = accessor.get(mergedKey, null, options);
} catch (ZkNoNodeException e) {
// OK.
} catch (Exception e) {
LOG.error("Fail to get " + mergedKey + " from ZK", e);
return false;
}
/**
* If the local cache does not contain a value, need to check if there is a
* value in ZK; use it as initial value if exists
*/
if (merged == null) {
merged = new ZNRecord(first._record);
}
merged.merge(first._record);
Iterator<Entry> it = queue._pending.iterator();
while (it.hasNext()) {
Entry ent = it.next();
if (!ent._key.equals(mergedKey)) {
continue;
}
processed.add(ent);
merged.merge(ent._record);
// System.out.println("After merging:" + merged);
it.remove();
}
int retry = 0;
success = false;
while (++retry <= MAX_RETRY && !success) {
if (removeIfEmpty && merged.getMapFields().isEmpty()) {
try {
success = accessor.remove(mergedKey, options);
} catch (Exception e) {
LOG.error("Fails to remove " + mergedKey + " from ZK due to ZK issue.", e);
success = false;
}
if (!success) {
LOG.error("Fails to remove " + mergedKey + " from ZK, retry it!");
} else {
LOG.info("Removed " + mergedKey);
}
} else {
try {
success = accessor.set(mergedKey, merged, options);
} catch (Exception e) {
LOG.error("Fails to update " + mergedKey + " to ZK due to ZK issue.", e);
success = false;
}
if (!success) {
LOG.error("Fails to update " + mergedKey + " to ZK, retry it! ");
}
}
}
} finally {
queue._running.set(null);
for (Entry e : processed) {
synchronized (e) {
e._sent.set(true);
e.notify();
}
}
}
} else {
synchronized (entry) {
try {
entry.wait(10);
} catch (InterruptedException e) {
LOG.error("Interrupted while committing change, key: " + key + ", record: " + record,
e);
// Restore interrupt status
Thread.currentThread().interrupt();
return false;
}
}
}
}
return success;
}
}
| 9,926 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/HelixManagerProperties.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.InputStream;
import java.util.Properties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* hold helix-manager properties read from
* helix-core/src/main/resources/cluster-manager.properties
*/
public class HelixManagerProperties {
private static final Logger LOG = LoggerFactory.getLogger(HelixManagerProperties.class.getName());
private final Properties _properties = new Properties();
/**
* Initialize properties from a file
* @param propertyFileName
*/
public HelixManagerProperties(String propertyFileName) {
try {
InputStream stream =
Thread.currentThread().getContextClassLoader().getResourceAsStream(propertyFileName);
_properties.load(stream);
} catch (Exception e) {
String errMsg = "fail to open properties file: " + propertyFileName;
throw new IllegalArgumentException(errMsg, e);
}
LOG.info("load helix-manager properties: " + _properties);
}
// for test purpose
public HelixManagerProperties() {
// load empty properties
}
/**
* Get properties wrapped as {@link Properties}
* @return Properties
*/
public Properties getProperties() {
return _properties;
}
/**
* get helix version
* @return version read from properties
*/
public String getVersion() {
return this.getProperty("clustermanager.version");
}
/**
* get property for key
* @param key
* @return property associated by key
*/
public String getProperty(String key) {
String value = _properties.getProperty(key);
if (value == null) {
LOG.warn("no property for key: " + key);
}
return value;
}
/**
* return true if version1 >= version2, false otherwise, ignore non-numerical strings
* assume version in format of n.n.n-x-x, where n is number and x is any string
* e.g. 0.6.0-incubating-SNAPSHOT
* @param version1
* @param version2
* @return true if version1 >= version2, false otherwise
*/
static boolean versionNoLessThan(String version1, String version2) {
if (version1 == null || version2 == null) {
LOG.warn("fail to compare versions. version1: " + version1 + ", version2: " + version2);
return true;
}
String[] version1Splits = version1.split("(\\.|-)");
String[] version2Splits = version2.split("(\\.|-)");
if (version1Splits == null || version1Splits.length == 0 || version2Splits == null
|| version2Splits.length == 0) {
LOG.warn("fail to compare versions. version1: " + version1 + ", version2: " + version2);
}
for (int i = 0; i < version1Splits.length && i < version2Splits.length; i++) {
try {
int versionNum1 = Integer.parseInt(version1Splits[i]);
int versionNum2 = Integer.parseInt(version2Splits[i]);
if (versionNum1 < versionNum2) {
return false;
} else if (versionNum1 > versionNum2) {
break;
}
} catch (Exception e) {
// ignore non-numerical strings and strings after non-numerical strings
break;
}
}
return true;
}
/**
* return true if participantVersion is no less than minimum supported version for participant
* false otherwise
* @param participantVersion
* @return true if compatible, false otherwise
*/
public boolean isParticipantCompatible(String participantVersion) {
return isFeatureSupported("participant", participantVersion);
}
/**
* return true if participantVersion is no less than minimum supported version for the feature
* false otherwise
* @param featureName
* @param participantVersion
* @return true if supported, false otherwise
*/
public boolean isFeatureSupported(String featureName, String participantVersion) {
String minSupportedVersion = getProperty("minimum_supported_version." + featureName);
return versionNoLessThan(participantVersion, minSupportedVersion);
}
}
| 9,927 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/HelixDefinedState.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* helix defined states
* ERROR : when errors happen during state transitions, transit to ERROR state
* participant will also invoke state-model.on-err(), ignore errors in state-model.on-err()
* when drop resource in ERROR state and not disabled, controller sends ERROR->DROPPED transition
* if errors happen in ERROR->DROPPED transition, participant will disable resource/partition
* when disable resource/partition in ERROR state, resource/partition will be marked disabled
* but controller not send any transitions
* when reset resource/partition in ERROR state and not disabled
* controller send ERROR->initial-state transition
* if errors happen in ERROR->initial-state transition, remain in ERROR state
* DROPPED : when drop resource in a non-ERROR state and not disabled
* controller sends all the transitions from current-state to initial-state
* then sends initial-state->DROPPED transition
* @see HELIX-43: add support for dropping partitions in error state
*/
public enum HelixDefinedState {
ERROR,
DROPPED
}
| 9,928 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/HelixRebalanceException.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Exception thrown by Helix due to rebalance failures.
*/
public class HelixRebalanceException extends Exception {
// TODO: Adding static description or other necessary fields into the enum instances for
// TODO: supporting the rebalance monitor to understand the exception.
public enum Type {
INVALID_CLUSTER_STATUS,
INVALID_REBALANCER_STATUS,
FAILED_TO_CALCULATE,
INVALID_INPUT,
UNKNOWN_FAILURE
}
private final Type _type;
public HelixRebalanceException(String message, Type type, Throwable cause) {
super(String.format("%s Failure Type: %s", message, type.name()), cause);
_type = type;
}
public HelixRebalanceException(String message, Type type) {
super(String.format("%s Failure Type: %s", message, type.name()));
_type = type;
}
public Type getFailureType() {
return _type;
}
}
| 9,929 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/HelixProperty.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.lang.reflect.Constructor;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.helix.zookeeper.datamodel.SessionAwareZNRecord;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.datamodel.ZNRecordDelta;
import org.apache.helix.zookeeper.zkclient.serialize.ZkSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A wrapper class for ZNRecord. Used as a base class for IdealState, CurrentState, etc.
*/
public class HelixProperty {
private static final Logger LOG = LoggerFactory.getLogger(HelixProperty.class);
public enum HelixPropertyAttribute {
BUCKET_SIZE, BATCH_MESSAGE_MODE
}
protected final ZNRecord _record;
/**
* Metadata of a HelixProperty
*/
public static class Stat {
// the version field of zookeeper Stat
private int _version;
private long _creationTime;
private long _modifiedTime;
private long _ephemeralOwner;
public Stat(int version, long creationTime, long modifiedTime, long ephemeralOwner) {
_version = version;
_creationTime = creationTime;
_modifiedTime = modifiedTime;
_ephemeralOwner = ephemeralOwner;
}
public Stat(Stat stat) {
_version = stat.getVersion();
_creationTime = stat.getCreationTime();
_modifiedTime = stat.getModifiedTime();
_ephemeralOwner = stat.getEphemeralOwner();
}
public Stat() {
_version = -1;
_creationTime = -1;
_modifiedTime = -1;
_ephemeralOwner = -1;
}
public int getVersion() {
return _version;
}
public void setVersion(int version) {
_version = version;
}
public long getCreationTime() {
return _creationTime;
}
public void setCreationTime(long creationTime) {
_creationTime = creationTime;
}
public long getModifiedTime() {
return _modifiedTime;
}
public void setModifiedTime(long modifiedTime) {
_modifiedTime = modifiedTime;
}
public long getEphemeralOwner() {
return _ephemeralOwner;
}
public void setEphemeralOwner(long ephemeralOwner) {
_ephemeralOwner = ephemeralOwner;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Stat)) {
return false;
}
Stat stat = (Stat) o;
if (_ephemeralOwner != stat._ephemeralOwner) {
return false;
}
if (_version != stat._version) {
return false;
}
if (_creationTime != stat._creationTime) {
return false;
}
return _modifiedTime == stat._modifiedTime;
}
@Override
public int hashCode() {
int result = _version;
result = 31 * result + (int) (_creationTime ^ (_creationTime >>> 32));
result = 31 * result + (int) (_modifiedTime ^ (_modifiedTime >>> 32));
result = 31 * result + (int) (_ephemeralOwner ^ (_ephemeralOwner >>> 32));
return result;
}
@Override
public String toString() {
return "Stat {" + "_version=" + _version + ", _creationTime=" + _creationTime
+ ", _modifiedTime=" + _modifiedTime + ", _ephemeralOwner=" + _ephemeralOwner + '}';
}
}
private Stat _stat;
/**
* Initialize the property with an identifier
* @param id
*/
public HelixProperty(String id) {
this(new ZNRecord(id), id, false);
}
/**
* Initialize the property with an existing ZNRecord
* @param record a deep copy of the record is made, updates to this record will not be reflected
* by the HelixProperty
*/
public HelixProperty(ZNRecord record) {
this(record, true);
}
/**
* Initialize the property with an existing ZNRecord
* @param record
* @param deepCopyRecord set to true to make a copy of the ZNRecord, false otherwise
*/
public HelixProperty(ZNRecord record, boolean deepCopyRecord) {
this(record, record.getId(), deepCopyRecord);
}
/**
* Initialize the property with an existing ZNRecord with new record id
* @param record a deep copy of the record is made, updates to this record will not be reflected by the HelixProperty
* @param id
*/
public HelixProperty(ZNRecord record, String id) {
this(record, id, true);
}
/**
* Initialize the property with an existing ZNRecord with new record id
* @param record
* @param id
* @param deepCopyRecord whether to deep copy the ZNRecord, set to true if subsequent changes to
* the ZNRecord should not affect this HelixProperty and vice versa, or false
*/
public HelixProperty(ZNRecord record, String id, boolean deepCopyRecord) {
if (deepCopyRecord) {
_record = record instanceof SessionAwareZNRecord ? new SessionAwareZNRecord(record, id)
: new ZNRecord(record, id);
} else {
_record = record;
}
_stat = new Stat(_record.getVersion(), _record.getCreationTime(), _record.getModifiedTime(),
_record.getEphemeralOwner());
}
/**
* Get the property identifier
* @return the property id
*/
public final String getId() {
return _record.getId();
}
/**
* Get the backing ZNRecord
* @return ZNRecord object associated with this property
*/
public final ZNRecord getRecord() {
return _record;
}
/**
* Set the changes to the backing ZNRecord
* @param deltaList list of ZNRecord updates to be made
*/
public final void setDeltaList(List<ZNRecordDelta> deltaList) {
_record.setDeltaList(deltaList);
}
@Override
public String toString() {
return "ZnRecord=" + _record.toString() + ", Stat=" + _stat.toString();
}
/**
* Get the size of buckets defined
* @return the bucket size, or 0 if not defined
*/
public int getBucketSize() {
String bucketSizeStr = _record.getSimpleField(HelixPropertyAttribute.BUCKET_SIZE.toString());
int bucketSize = 0;
if (bucketSizeStr != null) {
try {
bucketSize = Integer.parseInt(bucketSizeStr);
} catch (NumberFormatException e) {
// OK
}
}
return bucketSize;
}
/**
* Set the size of buckets defined
* @param bucketSize the bucket size (will default to 0 if negative)
*/
public void setBucketSize(int bucketSize) {
if (bucketSize <= 0) {
bucketSize = 0;
}
_record.setSimpleField(HelixPropertyAttribute.BUCKET_SIZE.toString(), "" + bucketSize);
}
/**
* static method that converts ZNRecord to an instance that subclasses HelixProperty
* @param clazz subclass of HelixProperty
* @param record the ZNRecord describing the property
* @return typed instance corresponding to the record, or null if conversion fails
*/
public static <T extends HelixProperty> T convertToTypedInstance(Class<T> clazz,
ZNRecord record) {
if (record == null) {
return null;
}
try {
Constructor<T> constructor = clazz.getConstructor(ZNRecord.class);
return constructor.newInstance(record);
} catch (Exception e) {
LOG.error("Exception convert znrecord: " + record + " to class: " + clazz, e);
}
return null;
}
/**
* Convert a collection of records to typed properties
* @param clazz Subclass of HelixProperty
* @param records the ZNRecords describing the property
* @return list of typed instances for which the conversion succeeded, or null if records is null
*/
public static <T extends HelixProperty> List<T> convertToTypedList(Class<T> clazz,
Collection<ZNRecord> records) {
if (records == null) {
return null;
}
List<T> decorators = new ArrayList<T>();
for (ZNRecord record : records) {
T decorator = HelixProperty.convertToTypedInstance(clazz, record);
if (decorator != null) {
decorators.add(decorator);
}
}
return decorators;
}
/**
* Converts a list of records to a map of the record identifier to typed properties
* @param records the ZNRecords to convert
* @return id --> HelixProperty subclass map
*/
public static <T extends HelixProperty> Map<String, T> convertListToMap(List<T> records) {
if (records == null) {
return Collections.emptyMap();
}
Map<String, T> decorators = new HashMap<String, T>();
for (T record : records) {
decorators.put(record.getId(), record);
}
return decorators;
}
/**
* Convert typed properties to a list of records
* @param typedInstances objects subclassing HelixProperty
* @return a list of ZNRecord objects
*/
public static <T extends HelixProperty> List<ZNRecord> convertToList(List<T> typedInstances) {
if (typedInstances == null) {
return Collections.emptyList();
}
List<ZNRecord> records = new ArrayList<>();
for (T typedInstance : typedInstances) {
records.add(typedInstance.getRecord());
}
return records;
}
/**
* Change the state of batch messaging
* @param enable true to enable, false to disable
*/
public void setBatchMessageMode(boolean enable) {
_record.setSimpleField(HelixPropertyAttribute.BATCH_MESSAGE_MODE.toString(), "" + enable);
}
/**
* Get the state of batch messaging
* @return true if enabled, false if disabled
*/
public boolean getBatchMessageMode() {
String enableStr = _record.getSimpleField(HelixPropertyAttribute.BATCH_MESSAGE_MODE.toString());
if (enableStr == null) {
return false;
}
try {
return Boolean.parseBoolean(enableStr.toLowerCase());
} catch (Exception e) {
return false;
}
}
/**
* Get the metadata (stat) of this record
* @return HelixProperty.Stat
*/
public Stat getStat() {
return _stat;
}
/**
* Set the metadata (stat) for this record
* @param stat
*/
public void setStat(Stat stat) {
_stat = new Stat(stat);
}
/**
* Get property validity
* @return true if valid, false if invalid
*/
public boolean isValid() {
return true;
}
public byte[] serialize(ZkSerializer serializer) {
return serializer.serialize(_record);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (obj instanceof HelixProperty) {
HelixProperty that = (HelixProperty) obj;
if (that.getRecord() != null) {
return that.getRecord().equals(this.getRecord());
}
}
return false;
}
}
| 9,930 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/ControllerChangeListener.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Interface to implement to respond to controller changes.
*
* @deprecated
* NOTE: This interface definition is moved to {@link org.apache.helix.api.listeners.ControllerChangeListener}
*/
@Deprecated
public interface ControllerChangeListener extends
org.apache.helix.api.listeners.ControllerChangeListener{
}
| 9,931 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/IdealStateChangeListener.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Interface to implement to listen for changes to the ideal state of resources.
*
* @deprecated
* NOTE: This interface definition is moved to {@link org.apache.helix.api.listeners.IdealStateChangeListener}
*/
@Deprecated
public interface IdealStateChangeListener extends
org.apache.helix.api.listeners.IdealStateChangeListener {
}
| 9,932 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/ClusterMessagingService.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import java.util.Map;
import org.apache.helix.messaging.AsyncCallback;
import org.apache.helix.messaging.handling.MessageHandlerFactory;
import org.apache.helix.model.Message;
/**
* Provides the ability to <br>
* <li>Send message to a specific component in the cluster[ participant, controller,
* Spectator(probably not needed) ]</li> <li>Broadcast message to all participants</li> <li>Send
* message to instances that hold a specific resource</li> <li>Asynchronous request response api.
* Send message with a co-relation id and invoke a method when there is a response. Can support
* timeout.</li>
*/
public interface ClusterMessagingService {
/**
* Send message matching the specifications mentioned in recipientCriteria.
* @param recipientCriteria criteria to be met, defined as {@link Criteria}
* @See Criteria
* @param message
* message to be sent. Some attributes of this message will be
* changed as required
* @return the number of messages that were successfully sent.
*/
int send(Criteria recipientCriteria, Message message);
/**
* This will send the message to all instances matching the criteria<br>
* When there is a reply to the message sent AsyncCallback.onReply will be
* invoked. Application can specify a timeout on AsyncCallback. After every
* reply is processed AsyncCallback.isDone will be invoked.<br>
* This method will return after sending the messages. <br>
* This is useful when message need to be sent and current thread need not
* wait for response since processing will be done in another thread.
* @see #send(Criteria, Message)
* @param recipientCriteria
* @param message
* @param callbackOnReply callback to trigger on completion
* @param timeOut Time to wait before failing the send
* @return the number of messages that were successfully sent
*/
int send(Criteria recipientCriteria, Message message, AsyncCallback callbackOnReply, int timeOut);
/**
* @see #send(Criteria, Message, AsyncCallback, int)
* @param recipientCriteria
* @param message
* @param callbackOnReply
* @param timeOut
* @param retryCount maximum number of times to retry the send
* @return the number of messages that were successfully sent
*/
int send(Criteria recipientCriteria, Message message, AsyncCallback callbackOnReply,
int timeOut, int retryCount);
/**
* This will send the message to all instances matching the criteria<br>
* When there is a reply to the message sent AsynCallback.onReply will be
* invoked. Application can specify a timeout on AsyncCallback. After every
* reply is processed AsyncCallback.isDone will be invoked.<br>
* This method will return only after the AsyncCallback.isDone() returns true <br>
* This is useful when message need to be sent and current thread has to wait
* for response. <br>
* The current thread can use callbackOnReply instance to store application
* specific data.
* @see #send(Criteria, Message, AsyncCallback, int)
* @param recipientCriteria
* @param message
* @param callbackOnReply
* @param timeOut
* @return the number of messages that were successfully sent
*/
int sendAndWait(Criteria recipientCriteria, Message message, AsyncCallback callbackOnReply,
int timeOut);
/**
* @see #send(Criteria, Message, AsyncCallback, int, int)
* @param receipientCriteria
* @param message
* @param callbackOnReply
* @param timeOut
* @param retryCount
* @return the number of messages that were successfully sent
*/
int sendAndWait(Criteria receipientCriteria, Message message, AsyncCallback callbackOnReply,
int timeOut, int retryCount);
/**
* This will register a message handler factory to create handlers for
* message. In case client code defines its own message type, it can define a
* message handler factory to create handlers to process those messages.
* Messages are processed in a threadpool which is hosted by cluster manager,
* and cluster manager will call the factory to create handler, and the
* handler is called in the threadpool.
* Note that only one message handler factory can be registered with one
* message type.
* @param type
* The message type that the factory will create handler for
* @param factory
* The per-type message factory
*/
public void registerMessageHandlerFactory(String type, MessageHandlerFactory factory);
/**
* This will register a message handler factory to create handlers for
* message. In case client code defines its own message type, it can define a
* message handler factory to create handlers to process those messages.
* Messages are processed in a threadpool which is hosted by cluster manager,
* and cluster manager will call the factory to create handler, and the
* handler is called in the threadpool.
* Note that only one message handler factory can be registered with one
* message type.
* @param types
* The different message types that the factory will create handler for
* @param factory
* The per-type message factory
*/
public void registerMessageHandlerFactory(List<String> types, MessageHandlerFactory factory);
/**
* This will generate all messages to be sent given the recipientCriteria and MessageTemplate,
* the messages are not sent.
* @param recipientCriteria criteria to be met, defined as {@link Criteria}
* @param messageTemplate the Message on which to base the messages to send
* @return messages to be sent, grouped by the type of instance to send the message to
*/
public Map<InstanceType, List<Message>> generateMessage(final Criteria recipientCriteria,
final Message messageTemplate);
}
| 9,933 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/ExternalViewChangeListener.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Interface to implement to be notified of changes to the external view.
*
* @deprecated
* NOTE: This interface definition is moved to {@link org.apache.helix.api.listeners.ExternalViewChangeListener}
*/
@Deprecated
public interface ExternalViewChangeListener extends
org.apache.helix.api.listeners.ExternalViewChangeListener {
}
| 9,934 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/CurrentStateChangeListener.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Interface to implement to respond to changes in the current state
*
* @deprecated
* NOTE: This interface definition is moved to {@link org.apache.helix.api.listeners.CurrentStateChangeListener}
*/
@Deprecated
public interface CurrentStateChangeListener extends
org.apache.helix.api.listeners.CurrentStateChangeListener {
}
| 9,935 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/HelixRollbackException.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
public class HelixRollbackException extends HelixException {
public HelixRollbackException(String message) {
super(message);
}
public HelixRollbackException(Throwable cause) {
super(cause);
}
public HelixRollbackException(String message, Throwable cause) {
super(message, cause);
}
}
| 9,936 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/AccessOption.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.zookeeper.CreateMode;
public class AccessOption {
public static int PERSISTENT = 0x1;
public static int EPHEMERAL = 0x2;
public static int PERSISTENT_SEQUENTIAL = 0x4;
public static int EPHEMERAL_SEQUENTIAL = 0x8;
public static int THROW_EXCEPTION_IFNOTEXIST = 0x10;
public static int CONTAINER = 0x20;
public static int PERSISTENT_WITH_TTL = 0x40;
public static int PERSISTENT_SEQUENTIAL_WITH_TTL = 0x80;
/**
* Helper method to get zookeeper create mode from options
* @param options bitmask representing mode; least significant set flag is selected
* @return zookeeper create mode
*/
public static CreateMode getMode(int options) {
if ((options & PERSISTENT) > 0) {
return CreateMode.PERSISTENT;
} else if ((options & EPHEMERAL) > 0) {
return CreateMode.EPHEMERAL;
} else if ((options & PERSISTENT_SEQUENTIAL) > 0) {
return CreateMode.PERSISTENT_SEQUENTIAL;
} else if ((options & EPHEMERAL_SEQUENTIAL) > 0) {
return CreateMode.EPHEMERAL_SEQUENTIAL;
} else if ((options & CONTAINER) > 0) {
return CreateMode.CONTAINER;
} else if ((options & PERSISTENT_WITH_TTL) > 0) {
return CreateMode.PERSISTENT_WITH_TTL;
} else if ((options & PERSISTENT_SEQUENTIAL_WITH_TTL) > 0) {
return CreateMode.PERSISTENT_SEQUENTIAL_WITH_TTL;
}
return null;
}
/**
* Helper method to get is-throw-exception-on-node-not-exist from options
* @param options bitmask containing Zookeeper mode options
* @return true if in is-throw-exception-on-node-not-exist, false otherwise
*/
public static boolean isThrowExceptionIfNotExist(int options) {
return (options & THROW_EXCEPTION_IFNOTEXIST) > 0;
}
}
| 9,937 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/ConfigChangeListener.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* @deprecated replaced by InstanceConfigChangeListener
*
* @deprecated
* NOTE: This interface definition is moved to {@link org.apache.helix.api.listeners.ConfigChangeListener}
*/
@Deprecated
public interface ConfigChangeListener
extends org.apache.helix.api.listeners.ConfigChangeListener {
}
| 9,938 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/PropertyKey.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Arrays;
import java.util.Objects;
import org.apache.helix.model.CloudConfig;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.ClusterConstraints;
import org.apache.helix.model.ClusterStatus;
import org.apache.helix.model.ControllerHistory;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.CustomizedState;
import org.apache.helix.model.CustomizedStateConfig;
import org.apache.helix.model.CustomizedView;
import org.apache.helix.model.Error;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.HealthStat;
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.MaintenanceSignal;
import org.apache.helix.model.Message;
import org.apache.helix.model.ParticipantHistory;
import org.apache.helix.model.PauseSignal;
import org.apache.helix.model.RESTConfig;
import org.apache.helix.model.ResourceConfig;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.model.StatusUpdate;
import org.apache.helix.task.JobConfig;
import org.apache.helix.task.JobContext;
import org.apache.helix.task.WorkflowConfig;
import org.apache.helix.task.WorkflowContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.helix.PropertyType.CONFIGS;
import static org.apache.helix.PropertyType.CONTROLLER;
import static org.apache.helix.PropertyType.CURRENTSTATES;
import static org.apache.helix.PropertyType.CUSTOMIZEDSTATES;
import static org.apache.helix.PropertyType.CUSTOMIZEDVIEW;
import static org.apache.helix.PropertyType.ERRORS;
import static org.apache.helix.PropertyType.ERRORS_CONTROLLER;
import static org.apache.helix.PropertyType.EXTERNALVIEW;
import static org.apache.helix.PropertyType.HISTORY;
import static org.apache.helix.PropertyType.IDEALSTATES;
import static org.apache.helix.PropertyType.INSTANCE_HISTORY;
import static org.apache.helix.PropertyType.LEADER;
import static org.apache.helix.PropertyType.LIVEINSTANCES;
import static org.apache.helix.PropertyType.MAINTENANCE;
import static org.apache.helix.PropertyType.MESSAGES;
import static org.apache.helix.PropertyType.MESSAGES_CONTROLLER;
import static org.apache.helix.PropertyType.PAUSE;
import static org.apache.helix.PropertyType.RESTCONFIGS;
import static org.apache.helix.PropertyType.STATEMODELDEFS;
import static org.apache.helix.PropertyType.STATUSUPDATES;
import static org.apache.helix.PropertyType.STATUSUPDATES_CONTROLLER;
import static org.apache.helix.PropertyType.TARGETEXTERNALVIEW;
import static org.apache.helix.PropertyType.TASKCURRENTSTATES;
/**
* Key allowing for type-safe lookups of and conversions to {@link HelixProperty} objects.
*/
public class PropertyKey {
private static Logger LOG = LoggerFactory.getLogger(PropertyKey.class);
public PropertyType _type;
private final String[] _params;
Class<? extends HelixProperty> _typeClazz;
// if type is CONFIGS, set configScope; otherwise null
ConfigScopeProperty _configScope;
/**
* Instantiate with a type, associated class, and parameters
* @param type
* @param typeClazz
* @param params parameters associated with the key, the first of which is the cluster name
*/
public PropertyKey(PropertyType type, Class<? extends HelixProperty> typeClazz,
String... params) {
this(type, null, typeClazz, params);
}
/**
* Instantiate with a type, scope, associated class, and parameters
* @param type
* @param configScope
* @param typeClazz
* @param params parameters associated with the key, the first of which is the cluster name
*/
public PropertyKey(PropertyType type, ConfigScopeProperty configScope,
Class<? extends HelixProperty> typeClazz, String... params) {
_type = type;
if (params == null || params.length == 0 || Arrays.asList(params).contains(null)) {
throw new IllegalArgumentException("params cannot be null");
}
_params = params;
_typeClazz = typeClazz;
_configScope = configScope;
}
@Override
public int hashCode() {
int result = (_type != null ? _type.hashCode() : 0);
result = 31 * result + Arrays.hashCode(_params);
result = 31 * result + (_typeClazz != null ? _typeClazz.hashCode() : 0);
result = 31 * result + (_configScope != null ? _configScope.hashCode() : 0);
return result;
}
@Override
public String toString() {
return getPath();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || !(o instanceof PropertyKey)) {
return false;
}
PropertyKey key = (PropertyKey) o;
if (_type != key._type) {
return false;
}
// Probably incorrect - comparing Object[] arrays with Arrays.equals
if (!Arrays.equals(_params, key._params)) {
return false;
}
// Avoid NPE when one typeClazz is null
if (!Objects.equals(_typeClazz, key._typeClazz)) {
return false;
}
return _configScope == key._configScope;
}
/**
* Get the path associated with this property
* @return absolute path to the property
*/
public String getPath() {
String clusterName = _params[0];
String[] subKeys = Arrays.copyOfRange(_params, 1, _params.length);
String path = PropertyPathBuilder.getPath(_type, clusterName, subKeys);
if (path == null) {
LOG.error("Invalid property key with type:" + _type + "subKeys:" + Arrays.toString(_params));
}
return path;
}
/**
* PropertyKey builder for a cluster
*/
public static class Builder {
private final String _clusterName;
/**
* Instantiate with a cluster name
* @param clusterName
*/
public Builder(String clusterName) {
_clusterName = clusterName;
}
/**
* Get a property key associated with {@link IdealState}
* @return {@link PropertyKey}
*/
public PropertyKey idealStates() {
return new PropertyKey(IDEALSTATES, IdealState.class, _clusterName);
}
/**
* Get a property key associated with {@link IdealState} and a resource
* @param resourceName
* @return {@link PropertyKey}
*/
public PropertyKey idealStates(String resourceName) {
return new PropertyKey(IDEALSTATES, IdealState.class, _clusterName, resourceName);
}
/**
* Get a property key associated with {@link StateModelDefinition}
* @return {@link PropertyKey}
*/
public PropertyKey stateModelDefs() {
return new PropertyKey(STATEMODELDEFS, StateModelDefinition.class, _clusterName);
}
/**
* Get a property key associated with {@link StateModelDefinition} for a given state model name
* @param stateModelName
* @return {@link PropertyKey}
*/
public PropertyKey stateModelDef(String stateModelName) {
return new PropertyKey(STATEMODELDEFS, StateModelDefinition.class, _clusterName,
stateModelName);
}
/**
* Get a property key associated with all cluster configurations
* @return {@link PropertyKey}
*/
public PropertyKey clusterConfigs() {
return new PropertyKey(CONFIGS, ConfigScopeProperty.CLUSTER, ClusterConfig.class,
_clusterName, ConfigScopeProperty.CLUSTER.toString());
}
/**
* Get a property key associated with this cluster configuration
* @return {@link PropertyKey}
*/
public PropertyKey clusterConfig() {
return new PropertyKey(CONFIGS, ConfigScopeProperty.CLUSTER, ClusterConfig.class,
_clusterName, ConfigScopeProperty.CLUSTER.toString(), _clusterName);
}
/**
* Get a property key associated with this cluster status
* @return {@link PropertyKey}
*/
public PropertyKey clusterStatus() {
return new PropertyKey(PropertyType.STATUS, ClusterStatus.class, _clusterName, _clusterName);
}
/**
* Get a property key associated with this Cloud configuration
* @return {@link PropertyKey}
*/
public PropertyKey cloudConfig() {
return new PropertyKey(CONFIGS, ConfigScopeProperty.CLOUD, CloudConfig.class,
_clusterName, ConfigScopeProperty.CLOUD.name(), _clusterName);
}
/**
* Get a property key associated with this customized state aggregation configuration
* @return {@link PropertyKey}
*/
public PropertyKey customizedStateConfig() {
return new PropertyKey(CONFIGS, ConfigScopeProperty.CUSTOMIZED_STATE,
CustomizedStateConfig.class, _clusterName,
ConfigScopeProperty.CUSTOMIZED_STATE.name(), _clusterName);
}
/**
* Get a property key associated with {@link InstanceConfig}
* @return {@link PropertyKey}
*/
public PropertyKey instanceConfigs() {
return new PropertyKey(CONFIGS, ConfigScopeProperty.PARTICIPANT, InstanceConfig.class,
_clusterName, ConfigScopeProperty.PARTICIPANT.toString());
}
/**
* Get a property key associated with {@link InstanceConfig} for a specific instance
* @param instanceName
* @return {@link PropertyKey}
*/
public PropertyKey instanceConfig(String instanceName) {
return new PropertyKey(CONFIGS, ConfigScopeProperty.PARTICIPANT, InstanceConfig.class,
_clusterName, ConfigScopeProperty.PARTICIPANT.toString(), instanceName);
}
/**
* Get a property key associated with resource configurations.
* @return {@link PropertyKey}
*/
public PropertyKey resourceConfigs() {
return new PropertyKey(CONFIGS, ConfigScopeProperty.RESOURCE, ResourceConfig.class,
_clusterName, ConfigScopeProperty.RESOURCE.toString());
}
/**
* Get a property key associated with a specific resource configuration.
* @param resourceName
* @return {@link PropertyKey}
*/
public PropertyKey resourceConfig(String resourceName) {
return new PropertyKey(CONFIGS, ConfigScopeProperty.RESOURCE, ResourceConfig.class,
_clusterName, ConfigScopeProperty.RESOURCE.toString(), resourceName);
}
/**
* Get a property key associated with a partition
* @param resourceName
* @param partitionName
* @return {@link PropertyKey}
*/
public PropertyKey partitionConfig(String resourceName, String partitionName) {
return new PropertyKey(CONFIGS, ConfigScopeProperty.RESOURCE, HelixProperty.class,
_clusterName, ConfigScopeProperty.RESOURCE.toString(), resourceName);
}
/**
* Get a property key associated with a partition configuration
* @param instanceName
* @param resourceName
* @param partitionName
* @return {@link PropertyKey}
*/
public PropertyKey partitionConfig(String instanceName, String resourceName,
String partitionName) {
return new PropertyKey(CONFIGS, ConfigScopeProperty.RESOURCE, HelixProperty.class,
_clusterName, ConfigScopeProperty.RESOURCE.toString(), resourceName);
}
/**
* Get a property key associated with {@link ClusterConstraints}
* @return {@link PropertyKey}
*/
public PropertyKey constraints() {
return new PropertyKey(CONFIGS, ClusterConstraints.class, _clusterName,
ConfigScopeProperty.CONSTRAINT.toString());
}
/**
* Get a property key associated with a specific {@link ClusterConstraints}
* @param constraintType
* @return {@link PropertyKey}
*/
public PropertyKey constraint(String constraintType) {
return new PropertyKey(CONFIGS, ClusterConstraints.class, _clusterName,
ConfigScopeProperty.CONSTRAINT.toString(), constraintType);
}
/**
* Get a property key associated with {@link LiveInstance}
* @return {@link PropertyKey}
*/
public PropertyKey liveInstances() {
return new PropertyKey(LIVEINSTANCES, LiveInstance.class, _clusterName);
}
/**
* Get a property key associated with a specific {@link LiveInstance}
* @param instanceName
* @return {@link PropertyKey}
*/
public PropertyKey liveInstance(String instanceName) {
return new PropertyKey(LIVEINSTANCES, LiveInstance.class, _clusterName, instanceName);
}
/**
* Get a property key associated with all instances
* @return {@link PropertyKey}
*/
public PropertyKey instances() {
return new PropertyKey(PropertyType.INSTANCES, null, _clusterName);
}
/**
* Get a property key associated with specified instance
* @return {@link PropertyKey}
*/
public PropertyKey instance(String instanceName) {
return new PropertyKey(PropertyType.INSTANCES, null, _clusterName, instanceName);
}
/**
* Get a property key associated with {@link Message} for an instance
* @param instanceName
* @return {@link PropertyKey}
*/
public PropertyKey messages(String instanceName) {
return new PropertyKey(MESSAGES, Message.class, _clusterName, instanceName);
}
/**
* Get a property key associated with {@link Error} for an instance
* @param instanceName
* @return {@link PropertyKey}
*/
public PropertyKey errors(String instanceName) {
return new PropertyKey(ERRORS, Error.class, _clusterName, instanceName);
}
/**
* Get a property key associated with {@link Error} for an instance under a session
* @param instanceName
* @param sessionId
* @return {@link PropertyKey}
*/
public PropertyKey errors(String instanceName, String sessionId) {
return new PropertyKey(ERRORS, Error.class, _clusterName, instanceName, sessionId);
}
/**
* Get a property key associated with {@link Error} for an instance under a session of
* specified resource
* @param instanceName
* @param sessionId
* @param resourceName
* @return {@link PropertyKey}
*/
public PropertyKey errors(String instanceName, String sessionId, String resourceName) {
return new PropertyKey(ERRORS, Error.class, _clusterName, instanceName, sessionId,
resourceName);
}
public PropertyKey participantHistory(String instanceName) {
return new PropertyKey(INSTANCE_HISTORY, ParticipantHistory.class, _clusterName,
instanceName);
}
/**
* Get a property key associated with a specific {@link Message} on an instance
* @param instanceName
* @param messageId
* @return {@link PropertyKey}
*/
public PropertyKey message(String instanceName, String messageId) {
return new PropertyKey(MESSAGES, Message.class, _clusterName, instanceName, messageId);
}
/**
* Get a property key associated with {@link CurrentState} of an instance
* @param instanceName
* @return {@link PropertyKey}
*/
public PropertyKey sessions(String instanceName) {
return new PropertyKey(CURRENTSTATES, CurrentState.class, _clusterName, instanceName);
}
/**
* Get a property key associated with {@link CurrentState} of an instance and session
* @param instanceName
* @param sessionId
* @return {@link PropertyKey}
*/
public PropertyKey currentStates(String instanceName, String sessionId) {
return new PropertyKey(CURRENTSTATES, CurrentState.class, _clusterName, instanceName,
sessionId);
}
/**
* Get a property key associated with {@link CurrentState} of an instance, session, and
* resource
* @param instanceName
* @param sessionId
* @param resourceName
* @return {@link PropertyKey}
*/
public PropertyKey currentState(String instanceName, String sessionId, String resourceName) {
return new PropertyKey(CURRENTSTATES, CurrentState.class, _clusterName, instanceName,
sessionId, resourceName);
}
/**
* Get a property key associated with {@link CurrentState} of an instance, session, resource,
* and bucket name
* @param instanceName
* @param sessionId
* @param resourceName
* @param bucketName
* @return {@link PropertyKey}
*/
public PropertyKey currentState(String instanceName, String sessionId, String resourceName,
String bucketName) {
if (bucketName == null) {
return new PropertyKey(CURRENTSTATES, CurrentState.class, _clusterName, instanceName,
sessionId, resourceName);
} else {
return new PropertyKey(CURRENTSTATES, CurrentState.class, _clusterName, instanceName,
sessionId, resourceName, bucketName);
}
}
/**
* Get a property key associated with {@link CurrentState} of an instance. This key is for
* TaskCurrentState specifically.
* @param instanceName
* @return {@link PropertyKey}
*/
public PropertyKey taskCurrentStateSessions(String instanceName) {
return new PropertyKey(TASKCURRENTSTATES, CurrentState.class, _clusterName, instanceName);
}
/**
* Get a property key associated with {@link CurrentState} of an instance and session. This key
* is for TaskCurrentState specifically.
* @param instanceName
* @param sessionId
* @return {@link PropertyKey}
*/
public PropertyKey taskCurrentStates(String instanceName, String sessionId) {
return new PropertyKey(TASKCURRENTSTATES, CurrentState.class, _clusterName, instanceName,
sessionId);
}
/**
* Get a property key associated with {@link CurrentState} of an instance, session, and
* job. This key is for TaskCurrentState specifically.
* @param instanceName
* @param sessionId
* @param jobName
* @return {@link PropertyKey}
*/
public PropertyKey taskCurrentState(String instanceName, String sessionId, String jobName) {
return new PropertyKey(TASKCURRENTSTATES, CurrentState.class, _clusterName, instanceName,
sessionId, jobName);
}
/**
* Get a property key associated with {@link CurrentState} of an instance, session, job,
* and bucket name. This key is for TaskCurrentState specifically.
* @param instanceName
* @param sessionId
* @param jobName
* @param bucketName
* @return {@link PropertyKey}
*/
public PropertyKey taskCurrentState(String instanceName, String sessionId, String jobName,
String bucketName) {
if (bucketName == null) {
return new PropertyKey(TASKCURRENTSTATES, CurrentState.class, _clusterName, instanceName,
sessionId, jobName);
}
return new PropertyKey(TASKCURRENTSTATES, CurrentState.class, _clusterName, instanceName,
sessionId, jobName, bucketName);
}
/**
* Get a property key associated with the root of {@link CustomizedState} of an instance
* @param instanceName
* @return {@link PropertyKey}
*/
public PropertyKey customizedStatesRoot(String instanceName) {
return new PropertyKey(CUSTOMIZEDSTATES, CustomizedState.class, _clusterName, instanceName);
}
/**
* Get a property key associated with {@link CustomizedState} of an instance and customized state
* @param instanceName
* @param customizedStateName
* @return {@link PropertyKey}
*/
public PropertyKey customizedStates(String instanceName, String customizedStateName) {
return new PropertyKey(CUSTOMIZEDSTATES, CustomizedState.class, _clusterName, instanceName,
customizedStateName);
}
/**
* Get a property key associated with {@link CustomizedState} of an instance, customized state, and resource
* @param instanceName
* @param customizedStateName
* @param resourceName
* @return {@link PropertyKey}
*/
public PropertyKey customizedState(String instanceName, String customizedStateName,
String resourceName) {
return new PropertyKey(CUSTOMIZEDSTATES, CustomizedState.class, _clusterName, instanceName,
customizedStateName, resourceName);
}
/**
* Get a property key associated with {@link StatusUpdate} of an instance, session, resource,
* and partition
* @param instanceName
* @param sessionId
* @param resourceName
* @param partitionName
* @return {@link PropertyKey}
*/
public PropertyKey stateTransitionStatus(String instanceName, String sessionId,
String resourceName, String partitionName) {
return new PropertyKey(STATUSUPDATES, StatusUpdate.class, _clusterName, instanceName,
sessionId, resourceName, partitionName);
}
/**
* Get a property key associated with {@link StatusUpdate} of an instance, session, and
* resource
* @param instanceName
* @param sessionId
* @param resourceName
* @return {@link PropertyKey}
*/
public PropertyKey stateTransitionStatus(String instanceName, String sessionId,
String resourceName) {
return new PropertyKey(STATUSUPDATES, StatusUpdate.class, _clusterName, instanceName,
sessionId, resourceName);
}
/**
* Get a property key associated with {@link StatusUpdate} of an instance and session
* @param instanceName
* @param sessionId
* @return {@link PropertyKey}
*/
public PropertyKey stateTransitionStatus(String instanceName, String sessionId) {
return new PropertyKey(STATUSUPDATES, StatusUpdate.class, _clusterName, instanceName,
sessionId);
}
/**
* Get a property key associated with {@link StatusUpdate} of an instance
* @param instanceName
* @return {@link PropertyKey}
*/
public PropertyKey stateTransitionStatus(String instanceName) {
return new PropertyKey(STATUSUPDATES, StatusUpdate.class, _clusterName, instanceName);
}
/**
* Used to get status update for a NON STATE TRANSITION type
* @param instanceName
* @param sessionId
* @param msgType
* @param msgId
* @return {@link PropertyKey}
*/
public PropertyKey taskStatus(String instanceName, String sessionId, String msgType,
String msgId) {
return new PropertyKey(STATUSUPDATES, StatusUpdate.class, _clusterName, instanceName,
sessionId, msgType, msgId);
}
/**
* Get a property key associated with {@link Error} of an instance, session, resource,
* and partition
* @param instanceName
* @param sessionId
* @param resourceName
* @param partitionName
* @return {@link PropertyKey}
*/
public PropertyKey stateTransitionError(String instanceName, String sessionId,
String resourceName, String partitionName) {
return new PropertyKey(ERRORS, Error.class, _clusterName, instanceName, sessionId,
resourceName, partitionName);
}
/**
* Get a property key associated with {@link Error} of an instance, session, and
* resource
* @param instanceName
* @param sessionId
* @param resourceName
* @return {@link PropertyKey}
*/
public PropertyKey stateTransitionErrors(String instanceName, String sessionId,
String resourceName) {
return new PropertyKey(ERRORS, Error.class, _clusterName, instanceName, sessionId,
resourceName);
}
/**
* Get a property key associated with {@link Error} of an instance, session, and
* resource
* @param instanceName
* @return {@link PropertyKey}
*/
public PropertyKey stateTransitionErrors(String instanceName) {
return new PropertyKey(ERRORS, Error.class, _clusterName, instanceName);
}
/**
* Used to get status update for a NON STATE TRANSITION type
* @param instanceName
* @param sessionId
* @param msgType
* @param msgId
* @return {@link PropertyKey}
*/
public PropertyKey taskError(String instanceName, String sessionId, String msgType,
String msgId) {
return new PropertyKey(ERRORS, null, _clusterName, instanceName, sessionId, msgType, msgId);
}
/**
* Get a property key associated with all {@link ExternalView}
* @return {@link PropertyKey}
*/
public PropertyKey externalViews() {
return new PropertyKey(EXTERNALVIEW, ExternalView.class, _clusterName);
}
/**
* Get a property key associated with an {@link ExternalView} of a resource
* @param resourceName
* @return {@link PropertyKey}
*/
public PropertyKey externalView(String resourceName) {
return new PropertyKey(EXTERNALVIEW, ExternalView.class, _clusterName, resourceName);
}
/**
* Get a property key associated with all {@link CustomizedView}
* @return {@link PropertyKey}
*/
public PropertyKey customizedViews() {
return new PropertyKey(CUSTOMIZEDVIEW, CustomizedView.class, _clusterName);
}
/**
* Get a property key associated with an {@link CustomizedView} of a type
* @param customizedStateType
* @return {@link PropertyKey}
*/
public PropertyKey customizedView(String customizedStateType) {
return new PropertyKey(CUSTOMIZEDVIEW, CustomizedView.class, _clusterName, customizedStateType);
}
/**
* Get a property key associated with an {@link CustomizedView} of a type and resource
* @param customizedStateType
* @return {@link PropertyKey}
*/
public PropertyKey customizedView(String customizedStateType, String resourceName) {
return new PropertyKey(CUSTOMIZEDVIEW, CustomizedView.class, _clusterName, customizedStateType,
resourceName);
}
/**
* Get a property key associated with all target external view
* @return {@link PropertyKey}
*/
public PropertyKey targetExternalViews() {
return new PropertyKey(TARGETEXTERNALVIEW, ExternalView.class, _clusterName);
}
/**
* Get a property key associated with an target external view of a resource
* @param resourceName
* @return {@link PropertyKey}
*/
public PropertyKey targetExternalView(String resourceName) {
return new PropertyKey(TARGETEXTERNALVIEW, ExternalView.class, _clusterName, resourceName);
}
/**
* Get a property key associated with a controller
* @return {@link PropertyKey}
*/
public PropertyKey controller() {
return new PropertyKey(CONTROLLER, null, _clusterName);
}
/**
* Get a property key associated with {@link Error} of controller errors
* @return {@link PropertyKey}
*/
public PropertyKey controllerTaskErrors() {
return new PropertyKey(ERRORS_CONTROLLER, Error.class, _clusterName);
}
/**
* Get a property key associated with {@link Error} of a controller error
* @param errorId
* @return {@link PropertyKey}
*/
public PropertyKey controllerTaskError(String errorId) {
return new PropertyKey(ERRORS_CONTROLLER, Error.class, _clusterName, errorId);
}
/**
* Get a property key associated with {@link StatusUpdate} of controller status updates
* @param subPath
* @return {@link PropertyKey}
*/
public PropertyKey controllerTaskStatuses(String subPath) {
return new PropertyKey(STATUSUPDATES_CONTROLLER, StatusUpdate.class, _clusterName, subPath);
}
/**
* Get a property key associated with {@link StatusUpdate} of a controller status update
* @param subPath
* @param recordName
* @return {@link PropertyKey}
*/
public PropertyKey controllerTaskStatus(String subPath, String recordName) {
return new PropertyKey(STATUSUPDATES_CONTROLLER, StatusUpdate.class, _clusterName, subPath,
recordName);
}
/**
* Get a property key associated with {@link StatusUpdate} of controller status updates
* @return {@link PropertyKey}
*/
public PropertyKey controllerTaskStatuses() {
return new PropertyKey(STATUSUPDATES_CONTROLLER, StatusUpdate.class, _clusterName);
}
/**
* Get a property key associated with all {@link Message}s for the controller
* @return {@link PropertyKey}
*/
public PropertyKey controllerMessages() {
return new PropertyKey(MESSAGES_CONTROLLER, Message.class, _clusterName);
}
/**
* Get a property key associated with a {@link Message} for the controller
* @param msgId
* @return {@link PropertyKey}
*/
public PropertyKey controllerMessage(String msgId) {
return new PropertyKey(MESSAGES_CONTROLLER, Message.class, _clusterName, msgId);
}
/**
* Get a property key associated with {@link ControllerHistory}
* @return {@link PropertyKey}
*/
public PropertyKey controllerLeaderHistory() {
return new PropertyKey(HISTORY, ControllerHistory.class, _clusterName);
}
/**
* Get a property key associated with a {@link LiveInstance} leader
* @return {@link PropertyKey}
*/
public PropertyKey controllerLeader() {
return new PropertyKey(LEADER, LiveInstance.class, _clusterName);
}
/**
* Get a property key associated with {@link PauseSignal}
* @return {@link PropertyKey}
*/
public PropertyKey pause() {
return new PropertyKey(PAUSE, PauseSignal.class, _clusterName);
}
/**
* Get a property key associated with {@link MaintenanceSignal}
* @return {@link PropertyKey}
*/
public PropertyKey maintenance() {
return new PropertyKey(MAINTENANCE, MaintenanceSignal.class, _clusterName);
}
/**
* Get a property key associated with a {@link HealthStat} for an instance
* @param instanceName
* @param id identifies the statistics
* @return {@link PropertyKey}
*/
public PropertyKey healthReport(String instanceName, String id) {
return new PropertyKey(PropertyType.HEALTHREPORT, HealthStat.class, _clusterName,
instanceName, id);
}
/**
* Get a property key associated with {@link HealthStat}s for an instance
* @param instanceName
* @return {@link PropertyKey}
*/
public PropertyKey healthReports(String instanceName) {
return new PropertyKey(PropertyType.HEALTHREPORT, HealthStat.class, _clusterName,
instanceName);
}
/**
* Get a PropertyKey associated with root path for Task Framework-related resources' configs.
* @return {@link PropertyKey}
*/
public PropertyKey workflowConfigZNodes() {
return new PropertyKey(PropertyType.TASK_CONFIG_ROOT, null, _clusterName);
}
/**
* Get a PropertyKey associated with root path for Task Framework-related resources' contexts.
* @return {@link PropertyKey}
*/
public PropertyKey workflowContextZNodes() {
return new PropertyKey(PropertyType.TASK_CONTEXT_ROOT, null, _clusterName);
}
/**
* Get a PropertyKey associated with {@link WorkflowConfig} for easier path generation.
* @param workflowName
* @return {@link PropertyKey}
*/
public PropertyKey workflowConfigZNode(String workflowName) {
return new PropertyKey(PropertyType.WORKFLOW_CONFIG, WorkflowConfig.class, _clusterName,
workflowName);
}
/**
* Get a PropertyKey associated with {@link WorkflowContext} for easier path generation.
* @param workflowName
* @return {@link PropertyKey}
*/
public PropertyKey workflowContextZNode(String workflowName) {
return new PropertyKey(PropertyType.WORKFLOW_CONTEXT, WorkflowContext.class, _clusterName,
workflowName);
}
/**
* Get a PropertyKey associated with {@link JobConfig} for easier path generation.
* @param workflowName
* @param jobName
* @return
*/
public PropertyKey jobConfigZNode(String workflowName, String jobName) {
return new PropertyKey(PropertyType.JOB_CONFIG, JobConfig.class, _clusterName, workflowName,
jobName);
}
/**
* Get a PropertyKey associated with {@link JobContext} for easier path generation.
* @param workflowName
* @param jobName
* @return
*/
public PropertyKey jobContextZNode(String workflowName, String jobName) {
return new PropertyKey(PropertyType.JOB_CONTEXT, JobContext.class, _clusterName,
workflowName, jobName);
}
/**
* Get a property key associated with {@link WorkflowContext} for easier path generation.
* TODO: Below returns the old path for WorkflowContexts
* @param workflowName
* @return {@link PropertyKey}
*/
@Deprecated
public PropertyKey workflowContext(String workflowName) {
return new PropertyKey(PropertyType.WORKFLOWCONTEXT, WorkflowContext.class, _clusterName,
workflowName);
}
/**
* Get a property key associated with {@link ResourceConfig}
* @return {@link PropertyKey}
*/
public PropertyKey restConfig() {
return new PropertyKey(RESTCONFIGS, RESTConfig.class, _clusterName);
}
}
/**
* Get the associated property type
* @return {@link PropertyType}
*/
public PropertyType getType() {
return _type;
}
/**
* Get parameters associated with the key
* @return the parameters in the same order they were provided
*/
public String[] getParams() {
return _params;
}
/**
* Get the associated class of this property
* @return subclass of {@link HelixProperty}
*/
public Class<? extends HelixProperty> getTypeClass() {
return _typeClazz;
}
/**
* Get the scope of this property
* @return {@link ConfigScopeProperty}
*/
public ConfigScopeProperty getConfigScope() {
return _configScope;
}
}
| 9,939 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/HelixPropertyFactory.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
import org.apache.helix.model.CloudConfig;
import org.apache.helix.msdcommon.exception.InvalidRoutingDataException;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.api.client.RealmAwareZkClient;
import org.apache.helix.zookeeper.datamodel.serializer.ZNRecordSerializer;
import org.apache.helix.zookeeper.impl.factory.DedicatedZkClientFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Singleton factory that builds different types of Helix property, e.g. Helix manager property.
*/
public final class HelixPropertyFactory {
private static final Logger LOG = LoggerFactory.getLogger(HelixPropertyFactory.class);
private static final String HELIX_PARTICIPANT_PROPERTY_FILE =
SystemPropertyKeys.HELIX_MANAGER_PROPERTIES;
private static class SingletonHelper {
private static final HelixPropertyFactory INSTANCE = new HelixPropertyFactory();
}
public static HelixPropertyFactory getInstance() {
return SingletonHelper.INSTANCE;
}
/**
* Retrieve Helix manager property. It returns the property object with default values.
* Clients may override these values.
*/
public HelixManagerProperty getHelixManagerProperty(String zkAddress, String clusterName) {
CloudConfig cloudConfig = getCloudConfig(zkAddress, clusterName, null);
Properties properties = new Properties();
try {
InputStream stream = Thread.currentThread().getContextClassLoader()
.getResourceAsStream(HELIX_PARTICIPANT_PROPERTY_FILE);
properties.load(stream);
} catch (IOException e) {
String errMsg = String.format("failed to open Helix participant properties file: %s",
HELIX_PARTICIPANT_PROPERTY_FILE);
throw new IllegalArgumentException(errMsg, e);
}
LOG.info("HelixPropertyFactory successfully loaded helix participant properties: {}",
properties);
return new HelixManagerProperty(properties, cloudConfig);
}
/**
* Retrieve the CloudConfig of the cluster if available.
* Note: the reason we create a dedicated zk client here is because we need an isolated access to
* ZK in order to create a HelixManager instance.
* If shared zk client instance is used, this logic may break if users write tests that shut down
* ZK server and start again at a 0 zxid because the shared client would have a higher zxid.
* @param zkAddress
* @param clusterName
* @return
*/
public static CloudConfig getCloudConfig(String zkAddress, String clusterName) {
return getCloudConfig(zkAddress, clusterName, null);
}
public static CloudConfig getCloudConfig(String zkAddress, String clusterName,
RealmAwareZkClient.RealmAwareZkConnectionConfig realmAwareZkConnectionConfig) {
CloudConfig cloudConfig;
RealmAwareZkClient dedicatedZkClient = null;
try {
if (Boolean.getBoolean(SystemPropertyKeys.MULTI_ZK_ENABLED) || zkAddress == null) {
// If the multi ZK config is enabled or zkAddress is null, use realm-aware mode with
// DedicatedZkClient
try {
if (realmAwareZkConnectionConfig == null) {
realmAwareZkConnectionConfig =
new RealmAwareZkClient.RealmAwareZkConnectionConfig.Builder()
.setRealmMode(RealmAwareZkClient.RealmMode.SINGLE_REALM)
.setZkRealmShardingKey("/" + clusterName).build();
}
dedicatedZkClient =
DedicatedZkClientFactory.getInstance().buildZkClient(realmAwareZkConnectionConfig);
} catch (IOException | InvalidRoutingDataException e) {
throw new HelixException("Not able to connect on multi-ZK mode!", e);
}
} else {
// Use a dedicated ZK client single-ZK mode
HelixZkClient.ZkConnectionConfig connectionConfig =
new HelixZkClient.ZkConnectionConfig(zkAddress);
dedicatedZkClient = DedicatedZkClientFactory.getInstance().buildZkClient(connectionConfig);
}
dedicatedZkClient.setZkSerializer(new ZNRecordSerializer());
ConfigAccessor configAccessor = new ConfigAccessor(dedicatedZkClient);
// The try-catch logic is for backward compatibility reason only. Even if the cluster is not set
// up yet, constructing a new ZKHelixManager should not throw an exception
try {
cloudConfig = configAccessor.getCloudConfig(clusterName);
if (cloudConfig == null) {
cloudConfig = new CloudConfig();
}
} catch (HelixException e) {
cloudConfig = new CloudConfig();
}
} finally {
// Use a try-finally to make sure zkclient connection is closed properly
if (dedicatedZkClient != null && !dedicatedZkClient.isClosed()) {
dedicatedZkClient.close();
}
}
return cloudConfig;
}
}
| 9,940 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/PropertyPathBuilder.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.helix.model.ClusterStatus;
import org.apache.helix.model.ControllerHistory;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.CustomizedView;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.MaintenanceSignal;
import org.apache.helix.model.Message;
import org.apache.helix.model.PauseSignal;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.model.StatusUpdate;
import org.apache.helix.task.TaskConstants;
import org.apache.helix.task.WorkflowContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Utility mapping properties to their Zookeeper locations
*/
public class PropertyPathBuilder {
private static Logger logger = LoggerFactory.getLogger(PropertyPathBuilder.class);
static final Map<PropertyType, Map<Integer, String>> templateMap =
new HashMap<PropertyType, Map<Integer, String>>();
@Deprecated // typeToClassMapping is not being used anywhere
static final Map<PropertyType, Class<? extends HelixProperty>> typeToClassMapping =
new HashMap<PropertyType, Class<? extends HelixProperty>>();
static {
typeToClassMapping.put(PropertyType.LIVEINSTANCES, LiveInstance.class);
typeToClassMapping.put(PropertyType.IDEALSTATES, IdealState.class);
typeToClassMapping.put(PropertyType.CONFIGS, InstanceConfig.class);
typeToClassMapping.put(PropertyType.EXTERNALVIEW, ExternalView.class);
typeToClassMapping.put(PropertyType.CUSTOMIZEDVIEW, CustomizedView.class);
typeToClassMapping.put(PropertyType.STATEMODELDEFS, StateModelDefinition.class);
typeToClassMapping.put(PropertyType.MESSAGES, Message.class);
typeToClassMapping.put(PropertyType.CURRENTSTATES, CurrentState.class);
typeToClassMapping.put(PropertyType.STATUSUPDATES, StatusUpdate.class);
typeToClassMapping.put(PropertyType.HISTORY, ControllerHistory.class);
typeToClassMapping.put(PropertyType.PAUSE, PauseSignal.class);
typeToClassMapping.put(PropertyType.MAINTENANCE, MaintenanceSignal.class);
typeToClassMapping.put(PropertyType.STATUS, ClusterStatus.class);
// TODO: Below must handle the case for future versions of Task Framework with a different path
// structure
typeToClassMapping.put(PropertyType.WORKFLOWCONTEXT, WorkflowContext.class);
// @formatter:off
addEntry(PropertyType.CONFIGS, 1, "/{clusterName}/CONFIGS");
addEntry(PropertyType.CONFIGS, 2, "/{clusterName}/CONFIGS/{scope}");
addEntry(PropertyType.CONFIGS, 3, "/{clusterName}/CONFIGS/{scope}/{scopeKey}");
// addEntry(PropertyType.CONFIGS,2,"/{clusterName}/CONFIGS/{instanceName}");
addEntry(PropertyType.LIVEINSTANCES, 1, "/{clusterName}/LIVEINSTANCES");
addEntry(PropertyType.LIVEINSTANCES, 2, "/{clusterName}/LIVEINSTANCES/{instanceName}");
addEntry(PropertyType.INSTANCES, 1, "/{clusterName}/INSTANCES");
addEntry(PropertyType.INSTANCES, 2, "/{clusterName}/INSTANCES/{instanceName}");
addEntry(PropertyType.IDEALSTATES, 1, "/{clusterName}/IDEALSTATES");
addEntry(PropertyType.IDEALSTATES, 2, "/{clusterName}/IDEALSTATES/{resourceName}");
addEntry(PropertyType.EXTERNALVIEW, 1, "/{clusterName}/EXTERNALVIEW");
addEntry(PropertyType.EXTERNALVIEW, 2, "/{clusterName}/EXTERNALVIEW/{resourceName}");
addEntry(PropertyType.CUSTOMIZEDVIEW, 1, "/{clusterName}/CUSTOMIZEDVIEW");
addEntry(PropertyType.CUSTOMIZEDVIEW, 2, "/{clusterName}/CUSTOMIZEDVIEW/{customizedStateType}");
addEntry(PropertyType.CUSTOMIZEDVIEW, 3, "/{clusterName}/CUSTOMIZEDVIEW/{customizedStateType}/{resourceName}");
addEntry(PropertyType.STATUS, 1, "/{clusterName}/STATUS");
addEntry(PropertyType.STATUS, 2, "/{clusterName}/STATUS/{clusterName}");
addEntry(PropertyType.TARGETEXTERNALVIEW, 1, "/{clusterName}/TARGETEXTERNALVIEW");
addEntry(PropertyType.TARGETEXTERNALVIEW, 2,
"/{clusterName}/TARGETEXTERNALVIEW/{resourceName}");
addEntry(PropertyType.CUSTOMIZEDVIEW, 1, "/{clusterName}/CUSTOMIZEDVIEW");
addEntry(PropertyType.CUSTOMIZEDVIEW, 2, "/{clusterName}/CUSTOMIZEDVIEW/{resourceName}");
addEntry(PropertyType.CUSTOMIZEDVIEW, 3,
"/{clusterName}/CUSTOMIZEDVIEW/{resourceName}/{customizedStateName}");
addEntry(PropertyType.STATEMODELDEFS, 1, "/{clusterName}/STATEMODELDEFS");
addEntry(PropertyType.STATEMODELDEFS, 2, "/{clusterName}/STATEMODELDEFS/{stateModelName}");
addEntry(PropertyType.CONTROLLER, 1, "/{clusterName}/CONTROLLER");
addEntry(PropertyType.PROPERTYSTORE, 1, "/{clusterName}/PROPERTYSTORE");
// INSTANCE
addEntry(PropertyType.MESSAGES, 2, "/{clusterName}/INSTANCES/{instanceName}/MESSAGES");
addEntry(PropertyType.MESSAGES, 3, "/{clusterName}/INSTANCES/{instanceName}/MESSAGES/{msgId}");
addEntry(PropertyType.CURRENTSTATES, 2,
"/{clusterName}/INSTANCES/{instanceName}/CURRENTSTATES");
addEntry(PropertyType.CURRENTSTATES, 3,
"/{clusterName}/INSTANCES/{instanceName}/CURRENTSTATES/{sessionId}");
addEntry(PropertyType.CURRENTSTATES, 4,
"/{clusterName}/INSTANCES/{instanceName}/CURRENTSTATES/{sessionId}/{resourceName}");
addEntry(PropertyType.CURRENTSTATES, 5,
"/{clusterName}/INSTANCES/{instanceName}/CURRENTSTATES/{sessionId}/{resourceName}/{bucketName}");
addEntry(PropertyType.TASKCURRENTSTATES, 2,
"/{clusterName}/INSTANCES/{instanceName}/TASKCURRENTSTATES");
addEntry(PropertyType.TASKCURRENTSTATES, 3,
"/{clusterName}/INSTANCES/{instanceName}/TASKCURRENTSTATES/{sessionId}");
addEntry(PropertyType.TASKCURRENTSTATES, 4,
"/{clusterName}/INSTANCES/{instanceName}/TASKCURRENTSTATES/{sessionId}/{resourceName}");
addEntry(PropertyType.TASKCURRENTSTATES, 5,
"/{clusterName}/INSTANCES/{instanceName}/TASKCURRENTSTATES/{sessionId}/{resourceName}/{bucketName}");
addEntry(PropertyType.CUSTOMIZEDSTATES, 2,
"/{clusterName}/INSTANCES/{instanceName}/CUSTOMIZEDSTATES");
addEntry(PropertyType.CUSTOMIZEDSTATES, 3,
"/{clusterName}/INSTANCES/{instanceName}/CUSTOMIZEDSTATES/{customizedStateName}");
addEntry(PropertyType.CUSTOMIZEDSTATES, 4,
"/{clusterName}/INSTANCES/{instanceName}/CUSTOMIZEDSTATES/{customizedStateName}/{resourceName}");
addEntry(PropertyType.STATUSUPDATES, 2,
"/{clusterName}/INSTANCES/{instanceName}/STATUSUPDATES");
addEntry(PropertyType.STATUSUPDATES, 3,
"/{clusterName}/INSTANCES/{instanceName}/STATUSUPDATES/{sessionId}");
addEntry(PropertyType.STATUSUPDATES, 4,
"/{clusterName}/INSTANCES/{instanceName}/STATUSUPDATES/{sessionId}/{subPath}");
addEntry(PropertyType.STATUSUPDATES, 5,
"/{clusterName}/INSTANCES/{instanceName}/STATUSUPDATES/{sessionId}/{subPath}/{recordName}");
addEntry(PropertyType.ERRORS, 2, "/{clusterName}/INSTANCES/{instanceName}/ERRORS");
addEntry(PropertyType.ERRORS, 3, "/{clusterName}/INSTANCES/{instanceName}/ERRORS/{sessionId}");
addEntry(PropertyType.ERRORS, 4,
"/{clusterName}/INSTANCES/{instanceName}/ERRORS/{sessionId}/{subPath}");
addEntry(PropertyType.ERRORS, 5,
"/{clusterName}/INSTANCES/{instanceName}/ERRORS/{sessionId}/{subPath}/{recordName}");
addEntry(PropertyType.INSTANCE_HISTORY, 2, "/{clusterName}/INSTANCES/{instanceName}/HISTORY");
addEntry(PropertyType.HEALTHREPORT, 2, "/{clusterName}/INSTANCES/{instanceName}/HEALTHREPORT");
addEntry(PropertyType.HEALTHREPORT, 3,
"/{clusterName}/INSTANCES/{instanceName}/HEALTHREPORT/{reportName}");
// CONTROLLER
addEntry(PropertyType.MESSAGES_CONTROLLER, 1, "/{clusterName}/CONTROLLER/MESSAGES");
addEntry(PropertyType.MESSAGES_CONTROLLER, 2, "/{clusterName}/CONTROLLER/MESSAGES/{msgId}");
addEntry(PropertyType.ERRORS_CONTROLLER, 1, "/{clusterName}/CONTROLLER/ERRORS");
addEntry(PropertyType.ERRORS_CONTROLLER, 2, "/{clusterName}/CONTROLLER/ERRORS/{errorId}");
addEntry(PropertyType.STATUSUPDATES_CONTROLLER, 1, "/{clusterName}/CONTROLLER/STATUSUPDATES");
addEntry(PropertyType.STATUSUPDATES_CONTROLLER, 2,
"/{clusterName}/CONTROLLER/STATUSUPDATES/{subPath}");
addEntry(PropertyType.STATUSUPDATES_CONTROLLER, 3,
"/{clusterName}/CONTROLLER/STATUSUPDATES/{subPath}/{recordName}");
addEntry(PropertyType.LEADER, 1, "/{clusterName}/CONTROLLER/LEADER");
addEntry(PropertyType.HISTORY, 1, "/{clusterName}/CONTROLLER/HISTORY");
addEntry(PropertyType.PAUSE, 1, "/{clusterName}/CONTROLLER/PAUSE");
addEntry(PropertyType.MAINTENANCE, 1, "/{clusterName}/CONTROLLER/MAINTENANCE");
// @formatter:on
// RESOURCE
addEntry(PropertyType.WORKFLOWCONTEXT, 2,
"/{clusterName}/PROPERTYSTORE/TaskRebalancer/{workflowName}/Context"); // Old
// TODO: These are the current task framework related paths. In the future, if we decide to use
// a different structure such as a non-flatten ZNode structure, these paths need to be changed
// accordingly.
addEntry(PropertyType.TASK_CONFIG_ROOT, 1, "/{clusterName}/CONFIGS/RESOURCE");
addEntry(PropertyType.WORKFLOW_CONFIG, 2, "/{clusterName}/CONFIGS/RESOURCE/{workflowName}");
addEntry(PropertyType.JOB_CONFIG, 3,
"/{clusterName}/CONFIGS/RESOURCE/{workflowName}" + "_" + "{jobName}");
addEntry(PropertyType.TASK_CONTEXT_ROOT, 1,
"/{clusterName}/PROPERTYSTORE" + TaskConstants.REBALANCER_CONTEXT_ROOT);
addEntry(PropertyType.WORKFLOW_CONTEXT, 2, "/{clusterName}/PROPERTYSTORE"
+ TaskConstants.REBALANCER_CONTEXT_ROOT + "/{workflowName}/Context");
addEntry(PropertyType.JOB_CONTEXT, 3, "/{clusterName}/PROPERTYSTORE"
+ TaskConstants.REBALANCER_CONTEXT_ROOT + "/{workflowName}" + "_" + "{jobName}/Context");
}
static Pattern pattern = Pattern.compile("(\\{.+?\\})");
private static void addEntry(PropertyType type, int numKeys, String template) {
if (!templateMap.containsKey(type)) {
templateMap.put(type, new HashMap<Integer, String>());
}
logger.trace("Adding template for type:" + type.getType() + " arguments:" + numKeys
+ " template:" + template);
templateMap.get(type).put(numKeys, template);
}
/**
* Get the Zookeeper path given the property type, cluster, and parameters
* @param type
* @param clusterName
* @param keys
* @return a valid path, or null if none exists
*/
public static String getPath(PropertyType type, String clusterName, String... keys) {
if (clusterName == null) {
logger.warn("ClusterName can't be null for type:" + type);
return null;
}
if (keys == null) {
keys = new String[] {};
}
String template = null;
if (templateMap.containsKey(type)) {
// keys.length+1 since we add clusterName
template = templateMap.get(type).get(keys.length + 1);
}
String result = null;
if (template != null) {
result = template;
Matcher matcher = pattern.matcher(template);
int count = 0;
while (matcher.find()) {
count = count + 1;
String var = matcher.group();
if (count == 1) {
result = result.replace(var, clusterName);
} else {
result = result.replace(var, keys[count - 2]);
}
}
}
if (result == null || result.indexOf('{') > -1 || result.indexOf('}') > -1) {
logger.warn("Unable to instantiate template:" + template + " using clusterName:" + clusterName
+ " and keys:" + Arrays.toString(keys));
}
return result;
}
/**
* Given a path, find the name of an instance at that path
* @param path
* @return a valid instance name, or null if none exists
*/
public static String getInstanceNameFromPath(String path) {
// path structure
// /<cluster_name>/instances/<instance_name>/[currentStates/messages]
if (path.contains("/" + PropertyType.INSTANCES + "/")) {
String[] split = path.split("\\/");
if (split.length > 3) {
return split[3];
}
}
return null;
}
public static String idealState(String clusterName) {
return "/" + clusterName + "/IDEALSTATES";
}
// Path = /<cluster>/IDEALSTATES/<resourceName>
public static String idealState(String clusterName, String resourceName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/IDEALSTATES/");
builder.append(resourceName);
return builder.toString();
}
public static String stateModelDef(String clusterName) {
return "/" + clusterName + "/STATEMODELDEFS";
}
// Path = /<cluster>/STATEMODELDEFS/<stateModelName>
public static String stateModelDef(String clusterName, String stateModelName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/STATEMODELDEFS/");
builder.append(stateModelName);
return builder.toString();
}
public static String externalView(String clusterName) {
return "/" + clusterName + "/EXTERNALVIEW";
}
// Path = /<cluster>/EXTERNALVIEW/<resouceName>
public static String externalView(String clusterName, String resourceName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/EXTERNALVIEW/");
builder.append(resourceName);
return builder.toString();
}
public static String targetExternalView(String clusterName) {
return "/" + clusterName + "/TARGETEXTERNALVIEW";
}
// Path = /<cluster>/TARGETEXTERNALVIEW/<resouceName>
public static String targetExternalView(String clusterName, String resourceName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/TARGETEXTERNALVIEW/");
builder.append(resourceName);
return builder.toString();
}
public static String customizedView(String clusterName) {
return "/" + clusterName + "/CUSTOMIZEDVIEW";
}
// Path = /<cluster>/CUSTOMIZEDVIEW/<customizedStateName>
public static String customizedView(String clusterName, String customizedStateName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/CUSTOMIZEDVIEW/");
builder.append(customizedStateName);
return builder.toString();
}
// Path = /<cluster>/CUSTOMIZEDVIEW/<customizedStateName>/<resourceName>
public static String customizedView(String clusterName, String customizedStateName,
String resourceName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/CUSTOMIZEDVIEW/");
builder.append(customizedStateName);
builder.append("/");
builder.append(resourceName);
return builder.toString();
}
public static String liveInstance(String clusterName) {
return "/" + clusterName + "/LIVEINSTANCES";
}
// Path = /<clusterName>/LIVEINSTANCES/<instanceName>
public static String liveInstance(String clusterName, String instanceName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/LIVEINSTANCES/");
builder.append(instanceName);
return builder.toString();
}
public static String instance(String clusterName) {
return "/" + clusterName + "/INSTANCES";
}
@Deprecated
public static String instanceProperty(String clusterName, String instanceName, PropertyType type,
String key) {
return String.format("/%s/INSTANCES/%s/%s/%s", clusterName, instanceName, type, key);
}
// Path = /<cluster>/INSTANCES/<instance>
public static String instance(String clusterName, String instanceName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/INSTANCES/");
builder.append(instanceName);
return builder.toString();
}
// Path = /<cluster>/INSTANCES/<instance>/MESSAGES
public static String instanceMessage(String clusterName, String instanceName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/INSTANCES/");
builder.append(instanceName);
builder.append("/MESSAGES");
return builder.toString();
}
// Path = /<cluster>/INSTANCES/<instance>/MESSAGES/<messageId>
public static String instanceMessage(String clusterName, String instanceName, String messageId) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/INSTANCES/");
builder.append(instanceName);
builder.append("/MESSAGES/");
builder.append(messageId);
return builder.toString();
}
// Path = /<cluster>/INSTANCES/<instance>/CURRENTSTATES
public static String instanceCurrentState(String clusterName, String instanceName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/INSTANCES/");
builder.append(instanceName);
builder.append("/CURRENTSTATES");
return builder.toString();
}
// Path = /<cluster>/INSTANCES/<instance>/CURRENTSTATES/<session>
public static String instanceCurrentState(String clusterName, String instanceName,
String sessionId) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/INSTANCES/");
builder.append(instanceName);
builder.append("/CURRENTSTATES/");
builder.append(sessionId);
return builder.toString();
}
// Path = /<cluster>/INSTANCES/<instance>/CURRENTSTATES/<session>/<resourceName>
public static String instanceCurrentState(String clusterName, String instanceName,
String sessionId, String resourceName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/INSTANCES/");
builder.append(instanceName);
builder.append("/CURRENTSTATES/");
builder.append(sessionId);
builder.append("/");
builder.append(resourceName);
return builder.toString();
}
// Path = /<cluster>/INSTANCES/<instance>/TASKCURRENTSTATES
public static String instanceTaskCurrentState(String clusterName, String instanceName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/INSTANCES/");
builder.append(instanceName);
builder.append("/TASKCURRENTSTATES");
return builder.toString();
}
// Path = /<cluster>/INSTANCES/<instance>/TASKCURRENTSTATES/<session>
public static String instanceTaskCurrentState(String clusterName, String instanceName,
String sessionId) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/INSTANCES/");
builder.append(instanceName);
builder.append("/TASKCURRENTSTATES/");
builder.append(sessionId);
return builder.toString();
}
// Path = /<cluster>/INSTANCES/<instance>/TASKCURRENTSTATES/<session>/<resourceName>
public static String instanceTaskCurrentState(String clusterName, String instanceName,
String sessionId, String resourceName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/INSTANCES/");
builder.append(instanceName);
builder.append("/TASKCURRENTSTATES/");
builder.append(sessionId);
builder.append("/");
builder.append(resourceName);
return builder.toString();
}
// Path = /<cluster>/INSTANCES/<instance>/CUSTOMIZEDSTATES/
public static String instanceCustomizedState(String clusterName, String instanceName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/INSTANCES/");
builder.append(instanceName);
builder.append("/CUSTOMIZEDSTATES");
return builder.toString();
}
// Path = /<cluster>/INSTANCES/<instance>/CUSTOMIZEDSTATES/<customizedStateName>
public static String instanceCustomizedState(String clusterName, String instanceName,
String customizedStateName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/INSTANCES/");
builder.append(instanceName);
builder.append("/CUSTOMIZEDSTATES/");
builder.append(customizedStateName);
return builder.toString();
}
// Path = /<cluster>/INSTANCES/<instance>/CUSTOMIZEDSTATES/<customizedStateName>/<resourceName>
public static String instanceCustomizedState(String clusterName, String instanceName,
String customizedStateName, String resourceName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/INSTANCES/");
builder.append(instanceName);
builder.append("/CUSTOMIZEDSTATES/");
builder.append(customizedStateName);
builder.append("/");
builder.append(resourceName);
return builder.toString();
}
// Path = /<cluster>/INSTANCES/<instance>/ERRORS
public static String instanceError(String clusterName, String instanceName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/INSTANCES/");
builder.append(instanceName);
builder.append("/ERRORS");
return builder.toString();
}
// Path = /<cluster>/INSTANCES/<instance>/ERRORS/<session>/<resourceName>/<partitionName>
public static String instanceError(String clusterName, String instanceName, String sessionId,
String resourceName, String partitionName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/INSTANCES/");
builder.append(instanceName);
builder.append("/ERRORS/");
builder.append(sessionId);
builder.append("/");
builder.append(resourceName);
builder.append("/");
builder.append(partitionName);
return builder.toString();
}
// Path = /<clusterName>/INSTANCES/<instanceName>/HISTORY
public static String instanceHistory(String clusterName, String instanceName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/INSTANCES/");
builder.append(instanceName);
builder.append("/HISTORY");
return builder.toString();
}
// Path = /<clusterName>/INSTANCES/<instanceName>/STATUSUPDATES
public static String instanceStatusUpdate(String clusterName, String instanceName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/INSTANCES/");
builder.append(instanceName);
builder.append("/STATUSUPDATES");
return builder.toString();
}
public static String propertyStore(String clusterName) {
return "/" + clusterName + "/PROPERTYSTORE";
}
// PATH = "/<clusterName>/CONFIGS/CLUSTER/<clusterName>"
public static String clusterConfig(String clusterName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/CONFIGS/CLUSTER/");
builder.append(clusterName);
return builder.toString();
}
public static String instanceConfig(String clusterName) {
return "/" + clusterName + "/CONFIGS/PARTICIPANT";
}
// PATH = "/<clusterName>/CONFIGS/PARTICIPANT/<instanceName>"
public static String instanceConfig(String clusterName, String instanceName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/CONFIGS/PARTICIPANT/");
builder.append(instanceName);
return builder.toString();
}
public static String resourceConfig(String clusterName) {
return "/" + clusterName + "/CONFIGS/RESOURCE";
}
public static String customizedStateConfig(String clusterName) {
return "/" + clusterName + "/CONFIGS/CUSTOMIZED_STATE";
}
public static String controller(String clusterName) {
return "/" + clusterName + "/CONTROLLER";
}
public static String controllerLeader(String clusterName) {
return "/" + clusterName + "/CONTROLLER/LEADER";
}
public static String controllerMessage(String clusterName) {
return "/" + clusterName + "/CONTROLLER/MESSAGES";
}
// PATH = "/<clusterName>/CONTROLLER/MESSAGES/<messageId>"
public static String controllerMessage(String clusterName, String messageId) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/CONTROLLER/MESSAGES/");
builder.append(messageId);
return builder.toString();
}
public static String controllerStatusUpdate(String clusterName) {
return "/" + clusterName + "/CONTROLLER/STATUSUPDATES";
}
// Path /<clusterName>/CONTROLLER/STATUSUPDATES/<subPath>/<recordName>"
public static String controllerStatusUpdate(String clusterName, String subPath,
String recordName) {
StringBuilder builder = new StringBuilder("/");
builder.append(clusterName);
builder.append("/CONTROLLER/STATUSUPDATES/");
builder.append(subPath);
builder.append("/");
builder.append(recordName);
return builder.toString();
}
public static String controllerError(String clusterName) {
return "/" + clusterName + "/CONTROLLER/ERRORS";
}
public static String controllerHistory(String clusterName) {
return String.format("/%s/CONTROLLER/HISTORY", clusterName);
}
public static String pause(String clusterName) {
return "/" + clusterName + "/CONTROLLER/PAUSE";
}
public static String maintenance(String clusterName) {
return "/" + clusterName + "/CONTROLLER/MAINTENANCE";
}
// PATH = "/<clusterName>/STATUS/CLUSTER/<clusterName>"
public static String clusterStatus(String clusterName) {
return "/" + clusterName + "/STATUS/CLUSTER/" + clusterName;
}
}
| 9,941 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/ZNRecordBucketizer.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Deprecated - use ZNRecordBucketizer in zookeeper-api instead.
* Operations to divide a ZNRecord into specified buckets
*/
@Deprecated
public class ZNRecordBucketizer extends org.apache.helix.zookeeper.datamodel.ZNRecordBucketizer {
/**
* Instantiate a bucketizer with the number of buckets
* @param bucketSize
*/
public ZNRecordBucketizer(int bucketSize) {
super(bucketSize);
}
}
| 9,942 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/PropertyType.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Types of nodes in a Helix cluster
*/
enum Type {
CLUSTER,
INSTANCE,
CONTROLLER,
RESOURCE,
TASK,
REST
}
/**
* Types of data stored on Zookeeper by Helix
*/
public enum PropertyType {
// @formatter:off
// CLUSTER PROPERTIES
CONFIGS(Type.CLUSTER, true, false, false, false, true),
LIVEINSTANCES(Type.CLUSTER, false, false, false, true, true),
INSTANCES(Type.CLUSTER, true, false),
IDEALSTATES(Type.CLUSTER, true, false, false, false, true),
EXTERNALVIEW(Type.CLUSTER, true, false),
CUSTOMIZEDVIEW(Type.CLUSTER, true, false),
TARGETEXTERNALVIEW(Type.CLUSTER, true, false),
STATEMODELDEFS(Type.CLUSTER, true, false, false, false, true),
CONTROLLER(Type.CLUSTER, true, false),
PROPERTYSTORE(Type.CLUSTER, true, false),
STATUS(Type.CLUSTER, true, false, true),
// INSTANCE PROPERTIES
MESSAGES(Type.INSTANCE, true, true, true),
CURRENTSTATES(Type.INSTANCE, true, true, false, false, true),
TASKCURRENTSTATES(Type.INSTANCE, true, true, false, false, true),
STATUSUPDATES(Type.INSTANCE, true, true, false, false, false, true),
ERRORS(Type.INSTANCE, true, true),
INSTANCE_HISTORY(Type.INSTANCE, true, true, true),
HEALTHREPORT(Type.INSTANCE, true, false, false, false, false, true),
CUSTOMIZEDSTATES(Type.INSTANCE, true, false, false, true, true),
// CONTROLLER PROPERTY
LEADER(Type.CONTROLLER, false, false, true, true),
HISTORY(Type.CONTROLLER, true, true, true),
PAUSE(Type.CONTROLLER, true, false, true),
MAINTENANCE(Type.CONTROLLER, true, false, true),
MESSAGES_CONTROLLER(Type.CONTROLLER, true, false, true),
STATUSUPDATES_CONTROLLER(Type.CONTROLLER, true, true, true),
ERRORS_CONTROLLER(Type.CONTROLLER, true, true, true),
// TASK PROPERTIES
@Deprecated // This returns the old path for WorkflowContext
WORKFLOWCONTEXT(Type.TASK, true, false, false, false, false),
TASK_CONFIG_ROOT(Type.TASK, true, false, false, false, false),
TASK_CONTEXT_ROOT(Type.TASK, true, false, false, false, false),
WORKFLOW_CONFIG(Type.TASK, true, false, false, false, false),
WORKFLOW_CONTEXT(Type.TASK, true, false, false, false, false),
JOB_CONFIG(Type.TASK, true, false, false, false, false),
JOB_CONTEXT(Type.TASK, true, false, false, false, false),
// REST PROPERTIES
RESTCONFIGS(Type.REST, true, false, false, false, true);
// @formatter:on
Type type;
boolean isPersistent;
boolean mergeOnUpdate;
boolean updateOnlyOnExists;
boolean createOnlyIfAbsent;
/**
* "isCached" defines whether the property is cached in data accessor if data is cached,
* then read from zk can be optimized
*/
boolean isCached;
PropertyType(Type type, boolean isPersistent, boolean mergeOnUpdate) {
this(type, isPersistent, mergeOnUpdate, false);
}
PropertyType(Type type, boolean isPersistent, boolean mergeOnUpdate, boolean updateOnlyOnExists) {
this(type, isPersistent, mergeOnUpdate, false, false);
}
PropertyType(Type type, boolean isPersistent, boolean mergeOnUpdate, boolean updateOnlyOnExists,
boolean createOnlyIfAbsent) {
this(type, isPersistent, mergeOnUpdate, updateOnlyOnExists, createOnlyIfAbsent, false);
}
PropertyType(Type type, boolean isPersistent, boolean mergeOnUpdate, boolean updateOnlyOnExists,
boolean createOnlyIfAbsent, boolean isCached) {
this(type, isPersistent, mergeOnUpdate, updateOnlyOnExists, createOnlyIfAbsent, isCached,
false);
}
PropertyType(Type type, boolean isPersistent, boolean mergeOnUpdate, boolean updateOnlyOnExists,
boolean createOnlyIfAbsent, boolean isCached, boolean isAsyncWrite) {
this.type = type;
this.isPersistent = isPersistent;
this.mergeOnUpdate = mergeOnUpdate;
this.updateOnlyOnExists = updateOnlyOnExists;
this.createOnlyIfAbsent = createOnlyIfAbsent;
this.isCached = isCached;
}
/**
* Determine if the property should only be created if it does not exist
* @return true if it can only be created if absent, false otherwise
*/
public boolean isCreateOnlyIfAbsent() {
return createOnlyIfAbsent;
}
/**
* Set policy for creating only if it does not already exist
* @param createIfAbsent
*/
public void setCreateIfAbsent(boolean createIfAbsent) {
this.createOnlyIfAbsent = createIfAbsent;
}
/**
* Gets the type of the associated node
* @return {@link Type}
*/
public Type getType() {
return type;
}
/**
* Set the type of the associated node
* @param type {@link Type}
*/
public void setType(Type type) {
this.type = type;
}
/**
* Get the persistent state of the property
* @return true if persistent, false if ephemeral
*/
public boolean isPersistent() {
return isPersistent;
}
/**
* Set the persistent state of the property
* @param isPersistent
*/
public void setPersistent(boolean isPersistent) {
this.isPersistent = isPersistent;
}
/**
* Determine if the property is merged or replaced on update
* @return true if merge occurs on update, false otherwise
*/
public boolean isMergeOnUpdate() {
return mergeOnUpdate;
}
/**
* Enable or disable merging on an update to this property
* @param mergeOnUpdate
*/
public void setMergeOnUpdate(boolean mergeOnUpdate) {
this.mergeOnUpdate = mergeOnUpdate;
}
/**
* Determine if this property is only updated if it exists
* @return true if only updated when it exists, false otherwise
*/
public boolean isUpdateOnlyOnExists() {
return updateOnlyOnExists;
}
/**
* Enable or disable updating only on existence
* @param updateOnlyOnExists
*/
public void setUpdateOnlyOnExists(boolean updateOnlyOnExists) {
this.updateOnlyOnExists = updateOnlyOnExists;
}
/**
* Check if value is cached
* @return true if cached, false otherwise
*/
public boolean isCached() {
return isCached;
}
} | 9,943 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/LiveInstanceInfoProvider.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.zookeeper.datamodel.ZNRecord;
/**
* Interface to provide additional information about a live instance at creation time
*/
public interface LiveInstanceInfoProvider {
/**
* Callback function that is called by HelixManager before it creates LiveInstance Zk Node.
* The ZNRecord returned by this function
* @see ZkHelixManager#addLiveInstance()
* @see HelixManager#setLiveInstanceInfoProvider(LiveInstanceInfoProvider)
*/
ZNRecord getAdditionalLiveInstanceInfo();
}
| 9,944 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Provide the classes necessary to create a Helix cluster manager
* <p>
* General flow
* <blockquote>
* <pre>
* manager = HelixManagerFactory.getManagerForROLE(); ROLE can be participant, spectator or a controller<br/>
* manager.connect();
* manager.addSOMEListener();
* After connect the subsequent interactions will be via listener onChange callbacks
* There will be 3 scenarios for onChange callback, which can be determined using NotificationContext.type
* INIT -> will be invoked the first time the listener is added
* CALLBACK -> will be invoked due to datachange in the property value
* FINALIZE -> will be invoked when listener is removed or session expires
*
* manager.disconnect()
* </pre>
*
* </blockquote>
*
* Default implementations available
*
* @see org.apache.helix.participant.HelixStateMachineEngine for participant
* @see RoutingTableProvider for spectator
* @see GenericHelixController for controller
*
*/
package org.apache.helix;
| 9,945 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/Criteria.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Describes various properties that operations involving {@link Message} delivery will follow.
*/
public class Criteria {
public enum DataSource {
IDEALSTATES,
EXTERNALVIEW,
LIVEINSTANCES,
INSTANCES
}
/**
* This can be CONTROLLER, PARTICIPANT, ROUTER Cannot be null
*/
InstanceType recipientInstanceType;
/**
* If true this will only be process by the instance that was running when the
* message was sent. If the instance process dies and comes back up it will be
* ignored.
*/
boolean sessionSpecific;
/**
* applicable only in case PARTICIPANT use % to broadcast to all instances
*/
String instanceName = "";
/**
* Name of the resource. Use % to send message to all resources
* owned by an instance.
*/
String resourceName = "";
/**
* Resource partition. Use % to send message to all partitions of a given
* resource
*/
String partitionName = "";
/**
* State of the resource
*/
String partitionState = "";
/**
* Exclude sending message to your self. True by default
*/
boolean selfExcluded = true;
/**
* Determine if use external view or ideal state as source of truth
*/
DataSource _dataSource = DataSource.EXTERNALVIEW;
/**
* The name of target cluster. If null, means sending to the local cluster
*/
String _clusterName = null;
/**
* Get the current source of truth
* @return either the ideal state or the external view
*/
public DataSource getDataSource() {
return _dataSource;
}
/**
* Set the current source of truth
* @param source ideal state or external view
*/
public void setDataSource(DataSource source) {
_dataSource = source;
}
/**
* Determine if the message is excluded from being sent to the sender
* @return true if the self-sent message is excluded, false otherwise
*/
public boolean isSelfExcluded() {
return selfExcluded;
}
/**
* Indicate whether or not the sender will be excluded as a message recipient
* @param selfExcluded true if the sender should be excluded, false otherwise
*/
public void setSelfExcluded(boolean selfExcluded) {
this.selfExcluded = selfExcluded;
}
/**
* Determine the type of the recipient
* @return InstanceType (e.g. PARTICIPANT, CONTROLLER, SPECTATOR)
*/
public InstanceType getRecipientInstanceType() {
return recipientInstanceType;
}
/**
* Set the type of the recipient
* @param recipientInstanceType InstanceType (e.g. PARTICIPANT, CONTROLLER, SPECTATOR)
*/
public void setRecipientInstanceType(InstanceType recipientInstanceType) {
this.recipientInstanceType = recipientInstanceType;
}
/**
* Determine if this message should be processed only if an instance was up at send time
* @return true if the message will be processed by current live nodes, false otherwise
*/
public boolean isSessionSpecific() {
return sessionSpecific;
}
/**
* Indicate whether or not a message should be restricted to a session
* @param sessionSpecific true if the message can only be processed by live nodes at send time,
* false otherwise
*/
public void setSessionSpecific(boolean sessionSpecific) {
this.sessionSpecific = sessionSpecific;
}
/**
* Get the name of the destination instance, available only for PARTICIPANT
* @return the instance name
*/
public String getInstanceName() {
return instanceName;
}
/**
* Set the name of the destination instance (PARTICIPANT only)
* @param instanceName the instance name or % for all instances
*/
public void setInstanceName(String instanceName) {
this.instanceName = instanceName;
}
/**
* Get the destination resource name
* @return destination resource name
*/
public String getResource() {
return resourceName;
}
/**
* Set the destination resource name
* @param resourceName the resource name or % for all resources
*/
public void setResource(String resourceName) {
this.resourceName = resourceName;
}
/**
* Get the destination partition name
* @return destination partition name
*/
public String getPartition() {
return partitionName;
}
/**
* Set the destination partition name
* @param partitionName the partition name, or % for all partitions of a resource
*/
public void setPartition(String partitionName) {
this.partitionName = partitionName;
}
/**
* Get the state of a resource partition
* @return the state of the resource partition
*/
public String getPartitionState() {
return partitionState;
}
/**
* Set the state of the resource partition
* @param partitionState the state of the resource partition
*/
public void setPartitionState(String partitionState) {
this.partitionState = partitionState;
}
/**
* Get the target cluster name
* @return the target cluster name if set or null if not set
*/
public String getClusterName() {
return _clusterName;
}
/**
* Set the target cluster name
* @param clusterName target cluster name to send message
*/
public void setClusterName(String clusterName) {
_clusterName = clusterName;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("instanceName").append("=").append(instanceName);
sb.append("resourceName").append("=").append(resourceName);
sb.append("partitionName").append("=").append(partitionName);
sb.append("partitionState").append("=").append(partitionState);
if (_clusterName != null) {
sb.append("clusterName").append("=").append(_clusterName);
}
return sb.toString();
}
}
| 9,946 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache | Create_ds/helix/helix-core/src/main/java/org/apache/helix/InstanceType.java | package org.apache.helix;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Arrays;
import java.util.List;
import org.apache.helix.monitoring.mbeans.MonitorDomainNames;
/**
* CONTROLLER: cluster managing component is a controller
* PARTICIPANT: participate in the cluster state changes
* SPECTATOR: interested in the state changes in the cluster
* CONTROLLER_PARTICIPANT:
* special participant that competes for the leader of CONTROLLER_CLUSTER
* used in cluster controller of distributed mode {@HelixControllerMain}
*/
public enum InstanceType {
CONTROLLER(new String[] {
MonitorDomainNames.ClusterStatus.name(),
MonitorDomainNames.HelixZkClient.name(),
MonitorDomainNames.HelixCallback.name(),
MonitorDomainNames.Rebalancer.name(),
MonitorDomainNames.AggregatedView.name()
}),
PARTICIPANT(new String[] {
MonitorDomainNames.CLMParticipantReport.name(),
MonitorDomainNames.HelixZkClient.name(),
MonitorDomainNames.HelixCallback.name(),
MonitorDomainNames.HelixThreadPoolExecutor.name()
}),
CONTROLLER_PARTICIPANT(new String[] {
MonitorDomainNames.ClusterStatus.name(),
MonitorDomainNames.HelixZkClient.name(),
MonitorDomainNames.HelixCallback.name(),
MonitorDomainNames.HelixThreadPoolExecutor.name(),
MonitorDomainNames.CLMParticipantReport.name(),
MonitorDomainNames.Rebalancer.name(),
MonitorDomainNames.AggregatedView.name()
}),
SPECTATOR(new String[] {
MonitorDomainNames.HelixZkClient.name()
}),
ADMINISTRATOR(new String[] {
MonitorDomainNames.HelixZkClient.name()
});
private final String[] _monitorDomains;
InstanceType(String[] monitorDomains) {
_monitorDomains = monitorDomains;
}
public List<String> getActiveMBeanDomains() {
return Arrays.asList(_monitorDomains);
}
}
| 9,947 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/ZnodeValue.java | package org.apache.helix.tools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import java.util.Map;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
public class ZnodeValue {
public String _singleValue;
public List<String> _listValue;
public Map<String, String> _mapValue;
public ZNRecord _znodeValue;
public ZnodeValue() {
}
public ZnodeValue(String value) {
_singleValue = value;
}
public ZnodeValue(List<String> value) {
_listValue = value;
}
public ZnodeValue(Map<String, String> value) {
_mapValue = value;
}
public ZnodeValue(ZNRecord value) {
_znodeValue = value;
}
@Override
public String toString() {
if (_singleValue != null) {
return _singleValue;
} else if (_listValue != null) {
return _listValue.toString();
} else if (_mapValue != null) {
return _mapValue.toString();
} else if (_znodeValue != null) {
return _znodeValue.toString();
}
return "null";
}
}
| 9,948 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/DefaultIdealStateCalculator.java | package org.apache.helix.tools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.TreeMap;
import org.apache.helix.HelixException;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.model.IdealState.IdealStateProperty;
/**
* DefaultIdealStateCalculator tries to optimally allocate master/slave partitions among
* espresso storage nodes.
* Given a batch of storage nodes, the partition and replication factor, the algorithm first given a
* initial state
* When new batches of storage nodes are added, the algorithm will calculate the new ideal state
* such that the total
* partition movements are minimized.
*/
public class DefaultIdealStateCalculator {
static final String _MasterAssignmentMap = "MasterAssignmentMap";
static final String _SlaveAssignmentMap = "SlaveAssignmentMap";
static final String _partitions = "partitions";
static final String _replicas = "replicas";
/**
* Calculate the initial ideal state given a batch of storage instances, the replication factor
* and
* number of partitions
* 1. Calculate the master assignment by random shuffling
* 2. for each storage instance, calculate the 1st slave assignment map, by another random
* shuffling
* 3. for each storage instance, calculate the i-th slave assignment map
* 4. Combine the i-th slave assignment maps together
* @param instanceNames
* list of storage node instances
* @param partitions
* number of partitions
* @param replicas
* The number of replicas (slave partitions) per master partition
* @param masterStateValue
* master state value: e.g. "MASTER" or "LEADER"
* @param slaveStateValue
* slave state value: e.g. "SLAVE" or "STANDBY"
* @param resourceName
* @return a ZNRecord that contain the idealstate info
*/
public static ZNRecord calculateIdealState(List<String> instanceNames, int partitions,
int replicas, String resourceName, String masterStateValue, String slaveStateValue) {
Collections.sort(instanceNames);
if (instanceNames.size() < replicas + 1) {
throw new HelixException("Number of instances must not be less than replicas + 1. "
+ "instanceNr:" + instanceNames.size() + ", replicas:" + replicas);
} else if (partitions < instanceNames.size()) {
ZNRecord idealState =
IdealStateCalculatorByShuffling.calculateIdealState(instanceNames, partitions, replicas,
resourceName, 12345, masterStateValue, slaveStateValue);
int i = 0;
for (String partitionId : idealState.getMapFields().keySet()) {
Map<String, String> partitionAssignmentMap = idealState.getMapField(partitionId);
List<String> partitionAssignmentPriorityList = new ArrayList<String>();
String masterInstance = "";
for (String instanceName : partitionAssignmentMap.keySet()) {
if (partitionAssignmentMap.get(instanceName).equalsIgnoreCase(masterStateValue)
&& masterInstance.equals("")) {
masterInstance = instanceName;
} else {
partitionAssignmentPriorityList.add(instanceName);
}
}
Collections.shuffle(partitionAssignmentPriorityList, new Random(i++));
partitionAssignmentPriorityList.add(0, masterInstance);
idealState.setListField(partitionId, partitionAssignmentPriorityList);
}
return idealState;
}
Map<String, Object> result = calculateInitialIdealState(instanceNames, partitions, replicas);
return convertToZNRecord(result, resourceName, masterStateValue, slaveStateValue);
}
public static ZNRecord calculateIdealStateBatch(List<List<String>> instanceBatches,
int partitions, int replicas, String resourceName, String masterStateValue,
String slaveStateValue) {
Map<String, Object> result =
calculateInitialIdealState(instanceBatches.get(0), partitions, replicas);
for (int i = 1; i < instanceBatches.size(); i++) {
result = calculateNextIdealState(instanceBatches.get(i), result);
}
return convertToZNRecord(result, resourceName, masterStateValue, slaveStateValue);
}
/**
* Convert the internal result (stored as a Map<String, Object>) into ZNRecord.
*/
public static ZNRecord convertToZNRecord(Map<String, Object> result, String resourceName,
String masterStateValue, String slaveStateValue) {
Map<String, List<Integer>> nodeMasterAssignmentMap =
(Map<String, List<Integer>>) (result.get(_MasterAssignmentMap));
Map<String, Map<String, List<Integer>>> nodeSlaveAssignmentMap =
(Map<String, Map<String, List<Integer>>>) (result.get(_SlaveAssignmentMap));
int partitions = (Integer) (result.get("partitions"));
ZNRecord idealState = new ZNRecord(resourceName);
idealState.setSimpleField(IdealStateProperty.NUM_PARTITIONS.toString(),
String.valueOf(partitions));
for (String instanceName : nodeMasterAssignmentMap.keySet()) {
for (Integer partitionId : nodeMasterAssignmentMap.get(instanceName)) {
String partitionName = resourceName + "_" + partitionId;
if (!idealState.getMapFields().containsKey(partitionName)) {
idealState.setMapField(partitionName, new TreeMap<String, String>());
}
idealState.getMapField(partitionName).put(instanceName, masterStateValue);
}
}
for (String instanceName : nodeSlaveAssignmentMap.keySet()) {
Map<String, List<Integer>> slaveAssignmentMap = nodeSlaveAssignmentMap.get(instanceName);
for (String slaveNode : slaveAssignmentMap.keySet()) {
List<Integer> slaveAssignment = slaveAssignmentMap.get(slaveNode);
for (Integer partitionId : slaveAssignment) {
String partitionName = resourceName + "_" + partitionId;
idealState.getMapField(partitionName).put(slaveNode, slaveStateValue);
}
}
}
// generate the priority list of instances per partition. Master should be at front and slave
// follows.
for (String partitionId : idealState.getMapFields().keySet()) {
Map<String, String> partitionAssignmentMap = idealState.getMapField(partitionId);
List<String> partitionAssignmentPriorityList = new ArrayList<String>();
String masterInstance = "";
for (String instanceName : partitionAssignmentMap.keySet()) {
if (partitionAssignmentMap.get(instanceName).equalsIgnoreCase(masterStateValue)
&& masterInstance.equals("")) {
masterInstance = instanceName;
} else {
partitionAssignmentPriorityList.add(instanceName);
}
}
Collections.shuffle(partitionAssignmentPriorityList);
partitionAssignmentPriorityList.add(0, masterInstance);
idealState.setListField(partitionId, partitionAssignmentPriorityList);
}
assert (result.containsKey("replicas"));
idealState.setSimpleField(IdealStateProperty.REPLICAS.toString(), result.get("replicas")
.toString());
return idealState;
}
/**
* Calculate the initial ideal state given a batch of storage instances, the replication factor
* and
* number of partitions
* 1. Calculate the master assignment by random shuffling
* 2. for each storage instance, calculate the 1st slave assignment map, by another random
* shuffling
* 3. for each storage instance, calculate the i-th slave assignment map
* 4. Combine the i-th slave assignment maps together
* @param instanceNames
* list of storage node instances
* @param partitions
* number of partitions
* @param replicas
* The number of replicas (slave partitions) per master partition
* @return a map that contain the idealstate info
*/
public static Map<String, Object> calculateInitialIdealState(List<String> instanceNames,
int partitions, int replicas) {
Random r = new Random(54321);
assert (replicas <= instanceNames.size() - 1);
ArrayList<Integer> masterPartitionAssignment = new ArrayList<Integer>();
for (int i = 0; i < partitions; i++) {
masterPartitionAssignment.add(i);
}
// shuffle the partition id array
Collections.shuffle(masterPartitionAssignment, new Random(r.nextInt()));
// 1. Generate the random master partition assignment
// instanceName -> List of master partitions on that instance
Map<String, List<Integer>> nodeMasterAssignmentMap = new TreeMap<String, List<Integer>>();
for (int i = 0; i < masterPartitionAssignment.size(); i++) {
String instanceName = instanceNames.get(i % instanceNames.size());
if (!nodeMasterAssignmentMap.containsKey(instanceName)) {
nodeMasterAssignmentMap.put(instanceName, new ArrayList<Integer>());
}
nodeMasterAssignmentMap.get(instanceName).add(masterPartitionAssignment.get(i));
}
// instanceName -> slave assignment for its master partitions
// slave assignment: instanceName -> list of slave partitions on it
List<Map<String, Map<String, List<Integer>>>> nodeSlaveAssignmentMapsList =
new ArrayList<Map<String, Map<String, List<Integer>>>>(replicas);
Map<String, Map<String, List<Integer>>> firstNodeSlaveAssignmentMap =
new TreeMap<String, Map<String, List<Integer>>>();
Map<String, Map<String, List<Integer>>> combinedNodeSlaveAssignmentMap =
new TreeMap<String, Map<String, List<Integer>>>();
if (replicas > 0) {
// 2. For each node, calculate the evenly distributed slave as the first slave assignment
// We will figure out the 2nd ...replicas-th slave assignment based on the first level slave
// assignment
for (int i = 0; i < instanceNames.size(); i++) {
List<String> slaveInstances = new ArrayList<String>();
ArrayList<Integer> slaveAssignment = new ArrayList<Integer>();
TreeMap<String, List<Integer>> slaveAssignmentMap = new TreeMap<String, List<Integer>>();
for (int j = 0; j < instanceNames.size(); j++) {
if (j != i) {
slaveInstances.add(instanceNames.get(j));
slaveAssignmentMap.put(instanceNames.get(j), new ArrayList<Integer>());
}
}
// Get the number of master partitions on instanceName
List<Integer> masterAssignment = nodeMasterAssignmentMap.get(instanceNames.get(i));
// do a random shuffling as in step 1, so that the first-level slave are distributed among
// rest instances
for (int j = 0; j < masterAssignment.size(); j++) {
slaveAssignment.add(j);
}
Collections.shuffle(slaveAssignment, new Random(r.nextInt()));
Collections.shuffle(slaveInstances, new Random(instanceNames.get(i).hashCode()));
// Get the slave assignment map of node instanceName
for (int j = 0; j < masterAssignment.size(); j++) {
String slaveInstanceName =
slaveInstances.get(slaveAssignment.get(j) % slaveInstances.size());
if (!slaveAssignmentMap.containsKey(slaveInstanceName)) {
slaveAssignmentMap.put(slaveInstanceName, new ArrayList<Integer>());
}
slaveAssignmentMap.get(slaveInstanceName).add(masterAssignment.get(j));
}
firstNodeSlaveAssignmentMap.put(instanceNames.get(i), slaveAssignmentMap);
}
nodeSlaveAssignmentMapsList.add(firstNodeSlaveAssignmentMap);
// From the first slave assignment map, calculate the rest slave assignment maps
for (int replicaOrder = 1; replicaOrder < replicas; replicaOrder++) {
// calculate the next slave partition assignment map
Map<String, Map<String, List<Integer>>> nextNodeSlaveAssignmentMap =
calculateNextSlaveAssignemntMap(firstNodeSlaveAssignmentMap, replicaOrder);
nodeSlaveAssignmentMapsList.add(nextNodeSlaveAssignmentMap);
}
// Combine the calculated 1...replicas-th slave assignment map together
for (String instanceName : nodeMasterAssignmentMap.keySet()) {
Map<String, List<Integer>> combinedSlaveAssignmentMap =
new TreeMap<String, List<Integer>>();
for (Map<String, Map<String, List<Integer>>> slaveNodeAssignmentMap : nodeSlaveAssignmentMapsList) {
Map<String, List<Integer>> slaveAssignmentMap = slaveNodeAssignmentMap.get(instanceName);
for (String slaveInstance : slaveAssignmentMap.keySet()) {
if (!combinedSlaveAssignmentMap.containsKey(slaveInstance)) {
combinedSlaveAssignmentMap.put(slaveInstance, new ArrayList<Integer>());
}
combinedSlaveAssignmentMap.get(slaveInstance).addAll(
slaveAssignmentMap.get(slaveInstance));
}
}
migrateSlaveAssignMapToNewInstances(combinedSlaveAssignmentMap, new ArrayList<String>());
combinedNodeSlaveAssignmentMap.put(instanceName, combinedSlaveAssignmentMap);
}
}
/*
* // Print the result master and slave assignment maps
* System.out.println("Master assignment:");
* for(String instanceName : nodeMasterAssignmentMap.keySet())
* {
* System.out.println(instanceName+":");
* for(Integer x : nodeMasterAssignmentMap.get(instanceName))
* {
* System.out.print(x+" ");
* }
* System.out.println();
* System.out.println("Slave assignment:");
* int slaveOrder = 1;
* for(Map<String, Map<String, List<Integer>>> slaveNodeAssignmentMap :
* nodeSlaveAssignmentMapsList)
* {
* System.out.println("Slave assignment order :" + (slaveOrder++));
* Map<String, List<Integer>> slaveAssignmentMap = slaveNodeAssignmentMap.get(instanceName);
* for(String slaveName : slaveAssignmentMap.keySet())
* {
* System.out.print("\t" + slaveName +":\n\t" );
* for(Integer x : slaveAssignmentMap.get(slaveName))
* {
* System.out.print(x + " ");
* }
* System.out.println("\n");
* }
* }
* System.out.println("\nCombined slave assignment map");
* Map<String, List<Integer>> slaveAssignmentMap =
* combinedNodeSlaveAssignmentMap.get(instanceName);
* for(String slaveName : slaveAssignmentMap.keySet())
* {
* System.out.print("\t" + slaveName +":\n\t" );
* for(Integer x : slaveAssignmentMap.get(slaveName))
* {
* System.out.print(x + " ");
* }
* System.out.println("\n");
* }
* }
*/
Map<String, Object> result = new TreeMap<String, Object>();
result.put("MasterAssignmentMap", nodeMasterAssignmentMap);
result.put("SlaveAssignmentMap", combinedNodeSlaveAssignmentMap);
result.put("replicas", new Integer(replicas + 1));
result.put("partitions", new Integer(partitions));
return result;
}
/**
* In the case there are more than 1 slave, we use the following algorithm to calculate the n-th
* slave
* assignment map based on the first level slave assignment map.
* @param firstInstanceSlaveAssignmentMap the first slave assignment map for all instances
* @param replicaOrder of the slave
* @return the n-th slave assignment map for all the instances
*/
static Map<String, Map<String, List<Integer>>> calculateNextSlaveAssignemntMap(
Map<String, Map<String, List<Integer>>> firstInstanceSlaveAssignmentMap, int replicaOrder) {
Map<String, Map<String, List<Integer>>> result =
new TreeMap<String, Map<String, List<Integer>>>();
for (String currentInstance : firstInstanceSlaveAssignmentMap.keySet()) {
Map<String, List<Integer>> resultAssignmentMap = new TreeMap<String, List<Integer>>();
result.put(currentInstance, resultAssignmentMap);
}
for (String currentInstance : firstInstanceSlaveAssignmentMap.keySet()) {
Map<String, List<Integer>> previousSlaveAssignmentMap =
firstInstanceSlaveAssignmentMap.get(currentInstance);
Map<String, List<Integer>> resultAssignmentMap = result.get(currentInstance);
int offset = replicaOrder - 1;
for (String instance : previousSlaveAssignmentMap.keySet()) {
List<String> otherInstances = new ArrayList<String>(previousSlaveAssignmentMap.size() - 1);
// Obtain an array of other instances
for (String otherInstance : previousSlaveAssignmentMap.keySet()) {
otherInstances.add(otherInstance);
}
Collections.sort(otherInstances);
int instanceIndex = -1;
for (int index = 0; index < otherInstances.size(); index++) {
if (otherInstances.get(index).equalsIgnoreCase(instance)) {
instanceIndex = index;
}
}
assert (instanceIndex >= 0);
if (instanceIndex == otherInstances.size() - 1) {
instanceIndex--;
}
// Since we need to evenly distribute the slaves on "instance" to other partitions, we
// need to remove "instance" from the array.
otherInstances.remove(instance);
// distribute previous slave assignment to other instances.
List<Integer> previousAssignmentList = previousSlaveAssignmentMap.get(instance);
for (int i = 0; i < previousAssignmentList.size(); i++) {
// Evenly distribute the previousAssignmentList to the remaining other instances
int newInstanceIndex = (i + offset + instanceIndex) % otherInstances.size();
String newInstance = otherInstances.get(newInstanceIndex);
if (!resultAssignmentMap.containsKey(newInstance)) {
resultAssignmentMap.put(newInstance, new ArrayList<Integer>());
}
resultAssignmentMap.get(newInstance).add(previousAssignmentList.get(i));
}
}
}
return result;
}
/**
* Given the current idealState, and the list of new Instances needed to be added, calculate the
* new Ideal state.
* 1. Calculate how many master partitions should be moved to the new cluster of instances
* 2. assign the number of master partitions px to be moved to each previous node
* 3. for each previous node,
* 3.1 randomly choose px nodes, move them to temp list
* 3.2 for each px nodes, remove them from the slave assignment map; record the map position of
* the partition;
* 3.3 calculate # of new nodes that should be put in the slave assignment map
* 3.4 even-fy the slave assignment map;
* 3.5 randomly place # of new nodes that should be placed in
* 4. from all the temp master node list get from 3.1,
* 4.1 randomly assign them to nodes in the new cluster
* 5. for each node in the new cluster,
* 5.1 assemble the slave assignment map
* 5.2 even-fy the slave assignment map
* @param newInstances
* list of new added storage node instances
* @param previousIdealState
* The previous ideal state
* @return a map that contain the updated idealstate info
*/
public static Map<String, Object> calculateNextIdealState(List<String> newInstances,
Map<String, Object> previousIdealState) {
// Obtain the master / slave assignment info maps
Collections.sort(newInstances);
Map<String, List<Integer>> previousMasterAssignmentMap =
(Map<String, List<Integer>>) (previousIdealState.get("MasterAssignmentMap"));
Map<String, Map<String, List<Integer>>> nodeSlaveAssignmentMap =
(Map<String, Map<String, List<Integer>>>) (previousIdealState.get("SlaveAssignmentMap"));
List<String> oldInstances = new ArrayList<String>();
for (String oldInstance : previousMasterAssignmentMap.keySet()) {
oldInstances.add(oldInstance);
}
int previousInstanceNum = previousMasterAssignmentMap.size();
int partitions = (Integer) (previousIdealState.get("partitions"));
// TODO: take weight into account when calculate this
int totalMasterParitionsToMove =
partitions * (newInstances.size()) / (previousInstanceNum + newInstances.size());
int numMastersFromEachNode = totalMasterParitionsToMove / previousInstanceNum;
int remain = totalMasterParitionsToMove % previousInstanceNum;
// Note that when remain > 0, we should make [remain] moves with (numMastersFromEachNode + 1)
// partitions.
// And we should first choose those (numMastersFromEachNode + 1) moves from the instances that
// has more
// master partitions
List<Integer> masterPartitionListToMove = new ArrayList<Integer>();
// For corresponding moved slave partitions, keep track of their original location; the new node
// does not
// need to migrate all of them.
Map<String, List<Integer>> slavePartitionsToMoveMap = new TreeMap<String, List<Integer>>();
// Make sure that the instances that holds more master partitions are put in front
List<String> bigList = new ArrayList<String>(), smallList = new ArrayList<String>();
for (String oldInstance : previousMasterAssignmentMap.keySet()) {
List<Integer> masterAssignmentList = previousMasterAssignmentMap.get(oldInstance);
if (masterAssignmentList.size() > numMastersFromEachNode) {
bigList.add(oldInstance);
} else {
smallList.add(oldInstance);
}
}
// "sort" the list, such that the nodes that has more master partitions moves more partitions to
// the
// new added batch of instances.
bigList.addAll(smallList);
int totalSlaveMoves = 0;
for (String oldInstance : bigList) {
List<Integer> masterAssignmentList = previousMasterAssignmentMap.get(oldInstance);
int numToChoose = numMastersFromEachNode;
if (remain > 0) {
numToChoose = numMastersFromEachNode + 1;
remain--;
}
// randomly remove numToChoose of master partitions to the new added nodes
ArrayList<Integer> masterPartionsMoved = new ArrayList<Integer>();
randomSelect(masterAssignmentList, masterPartionsMoved, numToChoose);
masterPartitionListToMove.addAll(masterPartionsMoved);
Map<String, List<Integer>> slaveAssignmentMap = nodeSlaveAssignmentMap.get(oldInstance);
removeFromSlaveAssignmentMap(slaveAssignmentMap, masterPartionsMoved,
slavePartitionsToMoveMap);
// Make sure that for old instances, the slave placement map is evenly distributed
// Trace the "local slave moves", which should together contribute to most of the slave
// migrations
int movesWithinInstance =
migrateSlaveAssignMapToNewInstances(slaveAssignmentMap, newInstances);
// System.out.println("local moves: "+ movesWithinInstance);
totalSlaveMoves += movesWithinInstance;
}
// System.out.println("local slave moves total: "+ totalSlaveMoves);
// calculate the master /slave assignment for the new added nodes
// We already have the list of master partitions that will migrate to new batch of instances,
// shuffle the partitions and assign them to new instances
Collections.shuffle(masterPartitionListToMove, new Random(12345));
for (int i = 0; i < newInstances.size(); i++) {
String newInstance = newInstances.get(i);
List<Integer> masterPartitionList = new ArrayList<Integer>();
for (int j = 0; j < masterPartitionListToMove.size(); j++) {
if (j % newInstances.size() == i) {
masterPartitionList.add(masterPartitionListToMove.get(j));
}
}
Map<String, List<Integer>> slavePartitionMap = new TreeMap<String, List<Integer>>();
for (String oldInstance : oldInstances) {
slavePartitionMap.put(oldInstance, new ArrayList<Integer>());
}
// Build the slave assignment map for the new instance, based on the saved information
// about those slave partition locations in slavePartitionsToMoveMap
for (Integer x : masterPartitionList) {
for (String oldInstance : slavePartitionsToMoveMap.keySet()) {
List<Integer> slaves = slavePartitionsToMoveMap.get(oldInstance);
if (slaves.contains(x)) {
slavePartitionMap.get(oldInstance).add(x);
}
}
}
// add entry for other new instances into the slavePartitionMap
List<String> otherNewInstances = new ArrayList<String>();
for (String instance : newInstances) {
if (!instance.equalsIgnoreCase(newInstance)) {
otherNewInstances.add(instance);
}
}
// Make sure that slave partitions are evenly distributed
migrateSlaveAssignMapToNewInstances(slavePartitionMap, otherNewInstances);
// Update the result in the result map. We can reuse the input previousIdealState map as
// the result.
previousMasterAssignmentMap.put(newInstance, masterPartitionList);
nodeSlaveAssignmentMap.put(newInstance, slavePartitionMap);
}
/*
* // Print content of the master/ slave assignment maps
* for(String instanceName : previousMasterAssignmentMap.keySet())
* {
* System.out.println(instanceName+":");
* for(Integer x : previousMasterAssignmentMap.get(instanceName))
* {
* System.out.print(x+" ");
* }
* System.out.println("\nmaster partition moved:");
* System.out.println();
* System.out.println("Slave assignment:");
* Map<String, List<Integer>> slaveAssignmentMap = nodeSlaveAssignmentMap.get(instanceName);
* for(String slaveName : slaveAssignmentMap.keySet())
* {
* System.out.print("\t" + slaveName +":\n\t" );
* for(Integer x : slaveAssignmentMap.get(slaveName))
* {
* System.out.print(x + " ");
* }
* System.out.println("\n");
* }
* }
* System.out.println("Master partitions migrated to new instances");
* for(Integer x : masterPartitionListToMove)
* {
* System.out.print(x+" ");
* }
* System.out.println();
* System.out.println("Slave partitions migrated to new instances");
* for(String oldInstance : slavePartitionsToMoveMap.keySet())
* {
* System.out.print(oldInstance + ": ");
* for(Integer x : slavePartitionsToMoveMap.get(oldInstance))
* {
* System.out.print(x+" ");
* }
* System.out.println();
* }
*/
return previousIdealState;
}
public ZNRecord calculateNextIdealState(List<String> newInstances,
Map<String, Object> previousIdealState, String resourceName, String masterStateValue,
String slaveStateValue) {
return convertToZNRecord(calculateNextIdealState(newInstances, previousIdealState),
resourceName, masterStateValue, slaveStateValue);
}
/**
* Given the list of master partition that will be migrated away from the storage instance,
* Remove their entries from the local instance slave assignment map.
* @param slaveAssignmentMap the local instance slave assignment map
* @param masterPartionsMoved the list of master partition ids that will be migrated away
* @param removedAssignmentMap keep track of the removed slave assignment info. The info can be
* used by new added storage nodes.
*/
static void removeFromSlaveAssignmentMap(Map<String, List<Integer>> slaveAssignmentMap,
List<Integer> masterPartionsMoved, Map<String, List<Integer>> removedAssignmentMap) {
for (String instanceName : slaveAssignmentMap.keySet()) {
List<Integer> slaveAssignment = slaveAssignmentMap.get(instanceName);
for (Integer partitionId : masterPartionsMoved) {
if (slaveAssignment.contains(partitionId)) {
slaveAssignment.remove(partitionId);
if (!removedAssignmentMap.containsKey(instanceName)) {
removedAssignmentMap.put(instanceName, new ArrayList<Integer>());
}
removedAssignmentMap.get(instanceName).add(partitionId);
}
}
}
}
/**
* Since some new storage instances are added, each existing storage instance should migrate some
* slave partitions to the new added instances.
* The algorithm keeps moving one partition to from the instance that hosts most slave partitions
* to the instance that hosts least number of partitions, until max-min <= 1.
* In this way we can guarantee that all instances hosts almost same number of slave partitions,
* also
* slave partitions are evenly distributed.
* @param nodeSlaveAssignmentMap the local instance slave assignment map
* @param newInstances the list of master partition ids that will be migrated away
*/
static int migrateSlaveAssignMapToNewInstances(Map<String, List<Integer>> nodeSlaveAssignmentMap,
List<String> newInstances) {
int moves = 0;
boolean done = false;
for (String newInstance : newInstances) {
nodeSlaveAssignmentMap.put(newInstance, new ArrayList<Integer>());
}
while (!done) {
List<Integer> maxAssignment = null, minAssignment = null;
int minCount = Integer.MAX_VALUE, maxCount = Integer.MIN_VALUE;
String minInstance = "";
for (String instanceName : nodeSlaveAssignmentMap.keySet()) {
List<Integer> slaveAssignment = nodeSlaveAssignmentMap.get(instanceName);
if (minCount > slaveAssignment.size()) {
minCount = slaveAssignment.size();
minAssignment = slaveAssignment;
minInstance = instanceName;
}
if (maxCount < slaveAssignment.size()) {
maxCount = slaveAssignment.size();
maxAssignment = slaveAssignment;
}
}
if (maxCount - minCount <= 1) {
done = true;
} else {
int indexToMove = -1;
// find a partition that is not contained in the minAssignment list
for (int i = 0; i < maxAssignment.size(); i++) {
if (!minAssignment.contains(maxAssignment.get(i))) {
indexToMove = i;
break;
}
}
minAssignment.add(maxAssignment.get(indexToMove));
maxAssignment.remove(indexToMove);
if (newInstances.contains(minInstance)) {
moves++;
}
}
}
return moves;
}
/**
* Randomly select a number of elements from original list and put them in the selectedList
* The algorithm is used to select master partitions to be migrated when new instances are added.
* @param originalList the original list
* @param selectedList the list that contain selected elements
* @param num number of elements to be selected
*/
static void randomSelect(List<Integer> originalList, List<Integer> selectedList, int num) {
assert (originalList.size() >= num);
int[] indexArray = new int[originalList.size()];
for (int i = 0; i < indexArray.length; i++) {
indexArray[i] = i;
}
int numRemains = originalList.size();
Random r = new Random(numRemains);
for (int j = 0; j < num; j++) {
int randIndex = r.nextInt(numRemains);
selectedList.add(originalList.get(randIndex));
originalList.remove(randIndex);
numRemains--;
}
}
public static void main(String args[]) {
List<String> instanceNames = new ArrayList<String>();
for (int i = 0; i < 10; i++) {
instanceNames.add("localhost:123" + i);
}
int partitions = 48 * 3, replicas = 3;
Map<String, Object> resultOriginal =
DefaultIdealStateCalculator.calculateInitialIdealState(instanceNames, partitions, replicas);
}
}
| 9,949 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/YAISCalculator.java | package org.apache.helix.tools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
public class YAISCalculator {
static class Assignment {
private final int numNodes;
private final int replication;
Partition[] partitions;
Node[] nodes;
public Assignment(int numNodes, int numPartitions, int replication) {
this.numNodes = numNodes;
this.replication = replication;
partitions = new Partition[numPartitions];
for (int i = 0; i < numPartitions; i++) {
partitions[i] = new Partition(i, replication);
}
nodes = new Node[numNodes];
for (int i = 0; i < numNodes; i++) {
nodes[i] = new Node(replication);
}
}
public void assign(int partitionId, int replicaId, int nodeId) {
System.out.println("Assigning (" + partitionId + "," + replicaId + ") to " + nodeId);
partitions[partitionId].nodeIds[replicaId] = nodeId;
nodes[nodeId].partitionLists.get(replicaId).push(partitionId);
}
public void unassign(int partitionId, int replicaId) {
}
Integer[] getPartitionsPerNode(int nodeId, int replicaId) {
List<Integer> partitionsList = new ArrayList<Integer>();
for (Partition p : partitions) {
if (p.nodeIds[replicaId] == nodeId) {
partitionsList.add(p.partionId);
}
}
Integer[] array = new Integer[partitionsList.size()];
partitionsList.toArray(array);
return array;
}
public void printPerNode() {
for (int nodeId = 0; nodeId < numNodes; nodeId++) {
for (int r = 0; r < replication; r++) {
StringBuilder sb = new StringBuilder();
sb.append("(").append(nodeId).append(",").append(r).append("):\t");
Node node = nodes[nodeId];
LinkedList<Integer> linkedList = node.partitionLists.get(r);
for (int partitionId : linkedList) {
sb.append(partitionId).append(",");
}
System.out.println(sb.toString());
}
}
}
}
static class Partition {
final int partionId;
public Partition(int partionId, int replication) {
this.partionId = partionId;
nodeIds = new int[replication];
Arrays.fill(nodeIds, -1);
}
int nodeIds[];
}
static class Node {
private final int replication;
ArrayList<LinkedList<Integer>> partitionLists;
public Node(int replication) {
this.replication = replication;
partitionLists = new ArrayList<LinkedList<Integer>>(replication);
for (int i = 0; i < replication; i++) {
partitionLists.add(new LinkedList<Integer>());
}
}
}
public static void main(String[] args) {
doAssignment(new int[] {
5
}, 120, 3);
}
private static void doAssignment(int[] nodes, int partitions, int replication) {
int N = nodes[0];
int totalNodes = 0;
for (int temp : nodes) {
totalNodes += temp;
}
Assignment assignment = new Assignment(totalNodes, partitions, replication);
int nodeId = 0;
for (int i = 0; i < partitions; i++) {
assignment.assign(i, 0, nodeId);
nodeId = (nodeId + 1) % N;
}
Random random = new Random();
for (int r = 1; r < replication; r++) {
for (int id = 0; id < N; id++) {
Integer[] partitionsPerNode = assignment.getPartitionsPerNode(id, 0);
boolean[] used = new boolean[partitionsPerNode.length];
Arrays.fill(used, false);
System.out.println(id + "-" + partitionsPerNode.length);
nodeId = (id + r) % N;
int count = partitionsPerNode.length;
boolean done = false;
do {
if (nodeId != id) {
int nextInt = random.nextInt(count);
int temp = 0;
for (int b = 0; b < used.length; b++) {
if (!used[b] && temp == nextInt) {
assignment.assign(partitionsPerNode[b], r, nodeId);
used[b] = true;
break;
}
}
}
nodeId = (nodeId + 1) % N;
} while (count > 0);
}
}
if (nodes.length > 1) {
int prevNodeCount = nodes[0];
for (int i = 1; i < nodes.length; i++) {
int newNodeCount = prevNodeCount + nodes[i];
int masterPartitionsToMove =
(int) ((partitions * 1.0 / prevNodeCount - partitions * 1.0 / newNodeCount) * 1 * prevNodeCount);
while (masterPartitionsToMove > 0) {
}
}
}
assignment.printPerNode();
}
}
| 9,950 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/TestCommand.java | package org.apache.helix.tools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.HelixManager;
public class TestCommand {
public enum CommandType {
MODIFY,
VERIFY,
START,
STOP
}
public static class NodeOpArg {
public HelixManager _manager;
public Thread _thread;
public NodeOpArg(HelixManager manager, Thread thread) {
_manager = manager;
_thread = thread;
}
}
public TestTrigger _trigger;
public CommandType _commandType;
public ZnodeOpArg _znodeOpArg;
public NodeOpArg _nodeOpArg;
public long _startTimestamp;
public long _finishTimestamp;
/**
* @param type
* @param arg
*/
public TestCommand(CommandType type, ZnodeOpArg arg) {
this(type, new TestTrigger(), arg);
}
/**
* @param type
* @param trigger
* @param arg
*/
public TestCommand(CommandType type, TestTrigger trigger, ZnodeOpArg arg) {
_commandType = type;
_trigger = trigger;
_znodeOpArg = arg;
}
/**
* @param type
* @param trigger
* @param arg
*/
public TestCommand(CommandType type, TestTrigger trigger, NodeOpArg arg) {
_commandType = type;
_trigger = trigger;
_nodeOpArg = arg;
}
@Override
public String toString() {
String ret = super.toString().substring(super.toString().lastIndexOf(".") + 1) + " ";
if (_finishTimestamp > 0) {
ret +=
"FINISH@" + _finishTimestamp + "-START@" + _startTimestamp + "="
+ (_finishTimestamp - _startTimestamp) + "ms ";
}
if (_commandType == CommandType.MODIFY || _commandType == CommandType.VERIFY) {
ret += _commandType.toString() + "|" + _trigger.toString() + "|" + _znodeOpArg.toString();
} else if (_commandType == CommandType.START || _commandType == CommandType.STOP) {
ret += _commandType.toString() + "|" + _trigger.toString() + "|" + _nodeOpArg.toString();
}
return ret;
}
}
| 9,951 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/IdealCalculatorByConsistentHashing.java | package org.apache.helix.tools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.model.IdealState.IdealStateProperty;
public class IdealCalculatorByConsistentHashing {
/**
* Interface to calculate the hash function value of a string
*/
public interface HashFunction {
public int getHashValue(String key);
}
/**
* The default string hash function. Same as the default function used by
* Voldmort
*/
public static class FnvHash implements HashFunction {
private static final long FNV_BASIS = 0x811c9dc5;
private static final long FNV_PRIME = (1 << 24) + 0x193;
public static final long FNV_BASIS_64 = 0xCBF29CE484222325L;
public static final long FNV_PRIME_64 = 1099511628211L;
public int hash(byte[] key) {
long hash = FNV_BASIS;
for (int i = 0; i < key.length; i++) {
hash ^= 0xFF & key[i];
hash *= FNV_PRIME;
}
return (int) hash;
}
public long hash64(long val) {
long hashval = FNV_BASIS_64;
for (int i = 0; i < 8; i++) {
long octet = val & 0x00ff;
val = val >> 8;
hashval = hashval ^ octet;
hashval = hashval * FNV_PRIME_64;
}
return Math.abs(hashval);
}
@Override
public int getHashValue(String key) {
return hash(key.getBytes());
}
}
/**
* Calculate the ideal state for list of instances clusters using consistent
* hashing.
* @param instanceNames
* List of instance names.
* @param partitions
* the partition number of the database
* @param replicas
* the replication degree
* @param resourceName
* the name of the database
* @return The ZNRecord that contains the ideal state
*/
public static ZNRecord calculateIdealState(List<String> instanceNames, int partitions,
int replicas, String resourceName, HashFunction hashFunc) {
return calculateIdealState(instanceNames, partitions, replicas, resourceName, hashFunc, 65536);
}
/**
* Calculate the ideal state for list of instances clusters using consistent
* hashing.
* @param instanceNames
* List of instance names.
* @param partitions
* the partition number of the database
* @param replicas
* the replication degree
* @param resourceName
* the name of the database
* @param hashRingSize
* the size of the hash ring used by consistent hashing
* @return The ZNRecord that contains the ideal state
*/
public static ZNRecord calculateIdealState(List<String> instanceNames, int partitions,
int replicas, String resourceName, HashFunction hashFunc, int hashRingSize) {
ZNRecord result = new ZNRecord(resourceName);
int[] hashRing = generateEvenHashRing(instanceNames, hashRingSize);
result.setSimpleField(IdealStateProperty.NUM_PARTITIONS.toString(), String.valueOf(partitions));
Random rand = new Random(0xc0ffee);
for (int i = 0; i < partitions; i++) {
String partitionName = resourceName + ".partition-" + i;
int hashPos = rand.nextInt() % hashRingSize;
// (int)(hashFunc.getHashValue(partitionName) % hashRingSize);
hashPos = hashPos < 0 ? (hashPos + hashRingSize) : hashPos;
// System.out.print(hashPos+ " ");
// if(i % 120 == 0) System.out.println();
Map<String, String> partitionAssignment = new TreeMap<String, String>();
// the first in the list is the node that contains the master
int masterPos = hashRing[hashPos];
partitionAssignment.put(instanceNames.get(masterPos), "MASTER");
// partitionAssignment.put("hash", "" + hashPos + " " + masterPos);
// Put slaves in next has ring positions. We need to make sure that no
// more than 2 slaves
// are mapped to one node.
for (int j = 1; j <= replicas; j++) {
String next = instanceNames.get(hashRing[(hashPos + j) % hashRingSize]);
while (partitionAssignment.containsKey(next)) {
hashPos++;
next = instanceNames.get(hashRing[(hashPos + j) % hashRingSize]);
}
partitionAssignment.put(next, "SLAVE");
}
result.setMapField(partitionName, partitionAssignment);
}
return result;
}
/**
* Generate the has ring for consistent hashing.
* @param instanceNames
* List of instance names.
* @param hashRingSize
* the size of the hash ring used by consistent hashing
* @return The int array as the hashing. it contains random values ranges from
* 0..size of instanceNames-1
*/
public static int[] generateHashRing(List<String> instanceNames, int hashRingSize) {
int[] result = new int[hashRingSize];
for (int i = 0; i < result.length; i++) {
result[i] = 0;
}
int instances = instanceNames.size();
// The following code generates the random distribution
for (int i = 1; i < instances; i++) {
putNodeOnHashring(result, i, hashRingSize / (i + 1), i);
}
return result;
}
public static int[] generateEvenHashRing(List<String> instanceNames, int hashRingSize) {
int[] result = new int[hashRingSize];
for (int i = 0; i < result.length; i++) {
result[i] = 0;
}
int instances = instanceNames.size();
// The following code generates the random distribution
for (int i = 1; i < instances; i++) {
putNodeEvenOnHashRing(result, i, i + 1);
}
return result;
}
private static void putNodeEvenOnHashRing(int[] hashRing, int nodeVal, int totalValues) {
int newValNum = hashRing.length / totalValues;
assert (newValNum > 0);
Map<Integer, List<Integer>> valueIndex = buildValueIndex(hashRing);
int nSources = valueIndex.size();
int remainder = newValNum % nSources;
List<List<Integer>> positionLists = new ArrayList<List<Integer>>();
for (List<Integer> list : valueIndex.values()) {
positionLists.add(list);
}
class ListComparator implements Comparator<List<Integer>> {
@Override
public int compare(List<Integer> o1, List<Integer> o2) {
return (o1.size() > o2.size() ? -1 : (o1.size() == o2.size() ? 0 : 1));
}
}
Collections.sort(positionLists, new ListComparator());
for (List<Integer> oldValPositions : positionLists) {
// List<Integer> oldValPositions = valueIndex.get(oldVal);
int nValsToReplace = newValNum / nSources;
assert (nValsToReplace > 0);
if (remainder > 0) {
nValsToReplace++;
remainder--;
}
// System.out.print(oldValPositions.size()+" "+nValsToReplace+" ");
putNodeValueOnHashRing(hashRing, nodeVal, nValsToReplace, oldValPositions);
// randomly take nValsToReplace positions in oldValPositions and make them
}
// System.out.println();
}
private static void putNodeValueOnHashRing(int[] hashRing, int nodeVal, int numberOfValues,
List<Integer> positions) {
Random rand = new Random(nodeVal);
// initialize the index array
int[] index = new int[positions.size()];
for (int i = 0; i < index.length; i++) {
index[i] = i;
}
int nodesLeft = index.length;
for (int i = 0; i < numberOfValues; i++) {
// Calculate a random index
int randIndex = rand.nextInt() % nodesLeft;
if (randIndex < 0) {
randIndex += nodesLeft;
}
hashRing[positions.get(index[randIndex])] = nodeVal;
// swap the random index and the last available index, and decrease the
// nodes left
int temp = index[randIndex];
index[randIndex] = index[nodesLeft - 1];
index[nodesLeft - 1] = temp;
nodesLeft--;
}
}
private static Map<Integer, List<Integer>> buildValueIndex(int[] hashRing) {
Map<Integer, List<Integer>> result = new TreeMap<Integer, List<Integer>>();
for (int i = 0; i < hashRing.length; i++) {
if (!result.containsKey(hashRing[i])) {
List<Integer> list = new ArrayList<Integer>();
result.put(hashRing[i], list);
}
result.get(hashRing[i]).add(i);
}
return result;
}
/**
* Uniformly put node values on the hash ring. Derived from the shuffling
* algorithm
* @param result
* the hash ring array.
* @param nodeValue
* the int value to be added to the hash ring this time
* @param numberOfNodes
* number of node values to put on the hash ring array
* @param randomSeed
* the random seed
*/
public static void putNodeOnHashring(int[] result, int nodeValue, int numberOfNodes,
int randomSeed) {
Random rand = new Random(randomSeed);
// initialize the index array
int[] index = new int[result.length];
for (int i = 0; i < index.length; i++) {
index[i] = i;
}
int nodesLeft = index.length;
for (int i = 0; i < numberOfNodes; i++) {
// Calculate a random index
int randIndex = rand.nextInt() % nodesLeft;
if (randIndex < 0) {
randIndex += nodesLeft;
}
if (result[index[randIndex]] == nodeValue) {
assert (false);
}
result[index[randIndex]] = nodeValue;
// swap the random index and the last available index, and decrease the
// nodes left
int temp = index[randIndex];
index[randIndex] = index[nodesLeft - 1];
index[nodesLeft - 1] = temp;
nodesLeft--;
}
}
/**
* Helper function to see how many partitions are mapped to different
* instances in two ideal states
*/
public static void printDiff(ZNRecord record1, ZNRecord record2) {
int diffCount = 0;
for (String key : record1.getMapFields().keySet()) {
Map<String, String> map1 = record1.getMapField(key);
Map<String, String> map2 = record2.getMapField(key);
for (String k : map1.keySet()) {
if (!map2.containsKey(k)) {
diffCount++;
} else if (!map1.get(k).equalsIgnoreCase(map2.get(k))) {
diffCount++;
}
}
}
System.out.println("diff count = " + diffCount);
}
/**
* Helper function to compare the difference between two hashing buffers
*/
public static void compareHashrings(int[] ring1, int[] ring2) {
int diff = 0;
for (int i = 0; i < ring1.length; i++) {
if (ring1[i] != ring2[i]) {
diff++;
}
}
System.out.println("ring diff: " + diff);
}
public static void printNodeOfflineOverhead(ZNRecord record) {
// build node -> partition map
Map<String, Set<String>> nodeNextMap = new TreeMap<String, Set<String>>();
for (String partitionName : record.getMapFields().keySet()) {
Map<String, String> map1 = record.getMapField(partitionName);
String master = "", slave = "";
for (String nodeName : map1.keySet()) {
if (!nodeNextMap.containsKey(nodeName)) {
nodeNextMap.put(nodeName, new TreeSet<String>());
}
// String master = "", slave = "";
if (map1.get(nodeName).equalsIgnoreCase("MASTER")) {
master = nodeName;
} else {
if (slave.equalsIgnoreCase("")) {
slave = nodeName;
}
}
}
nodeNextMap.get(master).add(slave);
}
System.out.println("next count: ");
for (String key : nodeNextMap.keySet()) {
System.out.println(nodeNextMap.get(key).size() + " ");
}
System.out.println();
}
/**
* Helper function to calculate and print the standard deviation of the
* partition assignment ideal state, also the min/max of master partitions
* that is hosted on each node
*/
public static void printIdealStateStats(ZNRecord record, String value) {
Map<String, Integer> countsMap = new TreeMap<String, Integer>();
for (String key : record.getMapFields().keySet()) {
Map<String, String> map1 = record.getMapField(key);
for (String k : map1.keySet()) {
if (!countsMap.containsKey(k)) {
countsMap.put(k, new Integer(0));//
}
if (value.equals("") || map1.get(k).equalsIgnoreCase(value)) {
countsMap.put(k, countsMap.get(k).intValue() + 1);
}
}
}
double sum = 0;
int maxCount = 0;
int minCount = Integer.MAX_VALUE;
System.out.println("Partition distributions: ");
for (String k : countsMap.keySet()) {
int count = countsMap.get(k);
sum += count;
if (maxCount < count) {
maxCount = count;
}
if (minCount > count) {
minCount = count;
}
System.out.print(count + " ");
}
System.out.println();
double mean = sum / (countsMap.size());
// calculate the deviation of the node distribution
double deviation = 0;
for (String k : countsMap.keySet()) {
double count = countsMap.get(k);
deviation += (count - mean) * (count - mean);
}
System.out.println("Mean: " + mean + " normal deviation:"
+ Math.sqrt(deviation / countsMap.size()));
System.out.println("Max count: " + maxCount + " min count:" + minCount);
/*
* int steps = 10; int stepLen = (maxCount - minCount)/steps; List<Integer>
* histogram = new ArrayList<Integer>((maxCount - minCount)/stepLen + 1);
* for(int i = 0; i< (maxCount - minCount)/stepLen + 1; i++) {
* histogram.add(0); } for(String k :countsMap.keySet()) { int count =
* countsMap.get(k); int stepNo = (count - minCount)/stepLen;
* histogram.set(stepNo, histogram.get(stepNo) +1); }
* System.out.println("histogram:"); for(Integer x : histogram) {
* System.out.print(x+" "); }
*/
}
public static void printHashRingStat(int[] hashRing) {
double sum = 0, mean = 0, deviation = 0;
Map<Integer, Integer> countsMap = new TreeMap<Integer, Integer>();
for (int i = 0; i < hashRing.length; i++) {
if (!countsMap.containsKey(hashRing[i])) {
countsMap.put(hashRing[i], new Integer(0));//
}
countsMap.put(hashRing[i], countsMap.get(hashRing[i]).intValue() + 1);
}
int maxCount = Integer.MIN_VALUE;
int minCount = Integer.MAX_VALUE;
for (Integer k : countsMap.keySet()) {
int count = countsMap.get(k);
sum += count;
if (maxCount < count) {
maxCount = count;
}
if (minCount > count) {
minCount = count;
}
}
mean = sum / countsMap.size();
for (Integer k : countsMap.keySet()) {
int count = countsMap.get(k);
deviation += (count - mean) * (count - mean);
}
System.out.println("hashring Mean: " + mean + " normal deviation:"
+ Math.sqrt(deviation / countsMap.size()));
}
static int[] getFnvHashArray(List<String> strings) {
int[] result = new int[strings.size()];
int i = 0;
IdealCalculatorByConsistentHashing.FnvHash hashfunc =
new IdealCalculatorByConsistentHashing.FnvHash();
for (String s : strings) {
int val = hashfunc.getHashValue(s) % 65536;
if (val < 0)
val += 65536;
result[i++] = val;
}
return result;
}
static void printArrayStat(int[] vals) {
double sum = 0, mean = 0, deviation = 0;
for (int i = 0; i < vals.length; i++) {
sum += vals[i];
}
mean = sum / vals.length;
for (int i = 0; i < vals.length; i++) {
deviation += (mean - vals[i]) * (mean - vals[i]);
}
System.out.println("normalized deviation: " + Math.sqrt(deviation / vals.length) / mean);
}
public static void main(String args[]) throws Exception {
// Test the hash ring generation
List<String> instanceNames = new ArrayList<String>();
for (int i = 0; i < 10; i++) {
instanceNames.add("localhost_123" + i);
}
// int[] ring1 =
// IdealCalculatorByConsistentHashing.generateEvenHashRing(instanceNames,
// 65535);
// printHashRingStat(ring1);
// int[] ring1 = getFnvHashArray(instanceNames);
// printArrayStat(ring1);
int partitions = 200, replicas = 2;
String dbName = "espressoDB1";
ZNRecord result =
IdealCalculatorByConsistentHashing.calculateIdealState(instanceNames, partitions, replicas,
dbName, new IdealCalculatorByConsistentHashing.FnvHash());
System.out.println("\nMaster :");
printIdealStateStats(result, "MASTER");
System.out.println("\nSlave :");
printIdealStateStats(result, "SLAVE");
System.out.println("\nTotal :");
printIdealStateStats(result, "");
printNodeOfflineOverhead(result);
/*
* ZNRecordSerializer serializer = new ZNRecordSerializer(); byte[] bytes;
* bytes = serializer.serialize(result); // System.out.println(new
* String(bytes));
* List<String> instanceNames2 = new ArrayList<String>(); for(int i = 0;i <
* 40; i++) { instanceNames2.add("localhost_123"+i); }
* ZNRecord result2 =
* IdealCalculatorByConsistentHashing.calculateIdealState( instanceNames2,
* partitions, replicas, dbName, new
* IdealCalculatorByConsistentHashing.FnvHash());
* printDiff(result, result2);
* //IdealCalculatorByConsistentHashing.printIdealStateStats(result2);
* int[] ring2 =
* IdealCalculatorByConsistentHashing.generateHashRing(instanceNames2,
* 30000);
* IdealCalculatorByConsistentHashing.compareHashrings(ring1, ring2);
* //printNodeStats(result); //printNodeStats(result2); bytes =
* serializer.serialize(result2); printHashRingStat(ring2); //
* System.out.println(new String(bytes));
*/
}
}
| 9,952 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/ClusterSetup.java | package org.apache.helix.tools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.DataInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.OptionGroup;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixConstants;
import org.apache.helix.HelixException;
import org.apache.helix.PropertyKey;
import org.apache.helix.SystemPropertyKeys;
import org.apache.helix.cloud.azure.AzureConstants;
import org.apache.helix.cloud.constants.CloudProvider;
import org.apache.helix.manager.zk.GenericZkHelixApiBuilder;
import org.apache.helix.manager.zk.ZKHelixAdmin;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.model.BuiltInStateModelDefinitions;
import org.apache.helix.model.CloudConfig;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.ClusterConstraints;
import org.apache.helix.model.ClusterConstraints.ConstraintType;
import org.apache.helix.model.ConstraintItem;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.HelixConfigScope;
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.IdealState.RebalanceMode;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.model.builder.ConstraintItemBuilder;
import org.apache.helix.model.builder.HelixConfigScopeBuilder;
import org.apache.helix.msdcommon.exception.InvalidRoutingDataException;
import org.apache.helix.util.HelixUtil;
import org.apache.helix.util.InstanceValidationUtil;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.api.client.RealmAwareZkClient;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.datamodel.serializer.ZNRecordSerializer;
import org.apache.helix.zookeeper.impl.client.FederatedZkClient;
import org.apache.helix.zookeeper.impl.factory.SharedZkClientFactory;
import org.apache.helix.zookeeper.introspect.CodehausJacksonIntrospector;
import org.apache.helix.zookeeper.zkclient.DataUpdater;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ClusterSetup {
private static Logger logger = LoggerFactory.getLogger(ClusterSetup.class);
public static final String zkServerAddress = "zkSvr";
// List info about the cluster / resource / Instances
public static final String listClusters = "listClusters";
public static final String listResources = "listResources";
public static final String listInstances = "listInstances";
// Add, drop, and rebalance
public static final String addCluster = "addCluster";
public static final String activateCluster = "activateCluster";
public static final String dropCluster = "dropCluster";
public static final String dropResource = "dropResource";
public static final String addInstance = "addNode";
public static final String addResource = "addResource";
public static final String addStateModelDef = "addStateModelDef";
public static final String addIdealState = "addIdealState";
public static final String swapInstance = "swapInstance";
public static final String dropInstance = "dropNode";
public static final String rebalance = "rebalance";
public static final String expandCluster = "expandCluster";
public static final String expandResource = "expandResource";
public static final String mode = "mode";
public static final String tag = "tag";
public static final String instanceGroupTag = "instanceGroupTag";
public static final String bucketSize = "bucketSize";
public static final String resourceKeyPrefix = "key";
public static final String maxPartitionsPerNode = "maxPartitionsPerNode";
public static final String addResourceProperty = "addResourceProperty";
public static final String removeResourceProperty = "removeResourceProperty";
public static final String addInstanceTag = "addInstanceTag";
public static final String removeInstanceTag = "removeInstanceTag";
public static final String enableResource = "enableResource";
// Query info (TBD in V2)
public static final String listClusterInfo = "listClusterInfo";
public static final String listInstanceInfo = "listInstanceInfo";
public static final String listResourceInfo = "listResourceInfo";
public static final String listPartitionInfo = "listPartitionInfo";
public static final String listStateModels = "listStateModels";
public static final String listStateModel = "listStateModel";
// enable/disable/reset instances/cluster/resource/partition
public static final String enableInstance = "enableInstance";
public static final String enablePartition = "enablePartition";
public static final String enableCluster = "enableCluster";
public static final String resetPartition = "resetPartition";
public static final String resetInstance = "resetInstance";
public static final String resetResource = "resetResource";
// help
public static final String help = "help";
// get/set/remove configs
public static final String getConfig = "getConfig";
public static final String setConfig = "setConfig";
public static final String removeConfig = "removeConfig";
// set/remove cloud configs
public static final String setCloudConfig = "setCloudConfig";
public static final String removeCloudConfig = "removeCloudConfig";
// get/set/remove constraints
public static final String getConstraints = "getConstraints";
public static final String setConstraint = "setConstraint";
public static final String removeConstraint = "removeConstraint";
private static final Logger _logger = LoggerFactory.getLogger(ClusterSetup.class);
private final RealmAwareZkClient _zkClient;
// true if ZkBaseDataAccessor was instantiated with a RealmAwareZkClient, false otherwise
// This is used for close() to determine how ZkBaseDataAccessor should close the underlying
// ZkClient
private final boolean _usesExternalZkClient;
private final HelixAdmin _admin;
protected static ObjectReader ZNRECORD_READER = new ObjectMapper()
.setAnnotationIntrospector(new CodehausJacksonIntrospector())
.readerFor(ZNRecord.class);
@Deprecated
public ClusterSetup(String zkServerAddress) {
// If the multi ZK config is enabled, use FederatedZkClient on multi-realm mode
if (Boolean.getBoolean(SystemPropertyKeys.MULTI_ZK_ENABLED) || zkServerAddress == null) {
try {
_zkClient = new FederatedZkClient(
new RealmAwareZkClient.RealmAwareZkConnectionConfig.Builder().build(),
new RealmAwareZkClient.RealmAwareZkClientConfig()
.setZkSerializer(new ZNRecordSerializer()));
} catch (InvalidRoutingDataException | IllegalStateException e) {
throw new HelixException("Failed to create ConfigAccessor!", e);
}
} else {
_zkClient = SharedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(zkServerAddress));
_zkClient.setZkSerializer(new ZNRecordSerializer());
}
_admin = new ZKHelixAdmin(_zkClient);
_usesExternalZkClient = false;
}
@Deprecated
public ClusterSetup(RealmAwareZkClient zkClient) {
_zkClient = zkClient;
_admin = new ZKHelixAdmin(_zkClient);
_usesExternalZkClient = true;
}
@Deprecated
public ClusterSetup(RealmAwareZkClient zkClient, HelixAdmin zkHelixAdmin) {
_zkClient = zkClient;
_admin = zkHelixAdmin;
_usesExternalZkClient = true;
}
private ClusterSetup(RealmAwareZkClient zkClient, boolean usesExternalZkClient) {
_zkClient = zkClient;
_admin = new ZKHelixAdmin(_zkClient);
_usesExternalZkClient = usesExternalZkClient;
}
/**
* Closes any stateful resources in ClusterSetup.
*/
public void close() {
if (_zkClient != null && !_usesExternalZkClient) {
_admin.close();
_zkClient.close();
}
}
@Override
public void finalize() {
close();
}
public void addCluster(String clusterName, boolean overwritePrevious, CloudConfig cloudConfig)
throws HelixException {
if (!_admin.addCluster(clusterName, overwritePrevious)) {
String error = "Cluster creation failed for " + clusterName;
_logger.error(error);
throw new HelixException(error);
}
for (BuiltInStateModelDefinitions def : BuiltInStateModelDefinitions.values()) {
addStateModelDef(clusterName, def.getStateModelDefinition().getId(),
def.getStateModelDefinition(), overwritePrevious);
}
if (cloudConfig != null) {
_admin.addCloudConfig(clusterName, cloudConfig);
// If cloud is enabled and Cloud Provider is Azure, populated the Topology information in cluster config
if (cloudConfig.isCloudEnabled()
&& cloudConfig.getCloudProvider().equals(CloudProvider.AZURE.name())) {
ConfigAccessor configAccessor = new ConfigAccessor(_zkClient);
ClusterConfig clusterConfig = new ClusterConfig(clusterName);
clusterConfig.setTopology(AzureConstants.AZURE_TOPOLOGY);
clusterConfig.setTopologyAwareEnabled(true);
clusterConfig.setFaultZoneType(AzureConstants.AZURE_FAULT_ZONE_TYPE);
configAccessor.updateClusterConfig(clusterName, clusterConfig);
}
}
}
public void addCluster(String clusterName, boolean overwritePrevious) {
addCluster(clusterName, overwritePrevious, null);
}
public void activateCluster(String clusterName, String grandCluster, boolean enable) {
if (enable) {
_admin.addClusterToGrandCluster(clusterName, grandCluster);
} else {
_admin.dropResource(grandCluster, clusterName);
}
}
public void deleteCluster(String clusterName) {
_admin.dropCluster(clusterName);
}
public void addInstancesToCluster(String clusterName, String[] instanceInfoArray) {
for (String instanceInfo : instanceInfoArray) {
if (instanceInfo.length() > 0) {
addInstanceToCluster(clusterName, instanceInfo);
}
}
}
public void addInstanceToCluster(String clusterName, String instanceId) {
InstanceConfig config = InstanceConfig.toInstanceConfig(instanceId);
_admin.addInstance(clusterName, config);
}
public void addInstanceTag(String clusterName, String instanceName, String tag) {
_admin.addInstanceTag(clusterName, instanceName, tag);
}
public void dropInstancesFromCluster(String clusterName, String[] instanceInfoArray) {
for (String instanceInfo : instanceInfoArray) {
if (instanceInfo.length() > 0) {
dropInstanceFromCluster(clusterName, instanceInfo);
}
}
}
public void dropInstanceFromCluster(String clusterName, String instanceId) {
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_zkClient));
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
InstanceConfig instanceConfig = InstanceConfig.toInstanceConfig(instanceId);
instanceId = instanceConfig.getInstanceName();
// ensure node is not live
LiveInstance liveInstance = accessor.getProperty(keyBuilder.liveInstance(instanceId));
if (liveInstance != null) {
throw new HelixException(String
.format("Cannot drop instance %s as it is still live. Please stop it first", instanceId));
}
InstanceConfig config = accessor.getProperty(keyBuilder.instanceConfig(instanceId));
if (config == null) {
String error = "Node " + instanceId + " does not exist, cannot drop";
_logger.warn(error);
throw new HelixException(error);
}
ClusterConfig clusterConfig = accessor.getProperty(keyBuilder.clusterConfig());
// ensure node is disabled, otherwise fail
if (InstanceValidationUtil.isInstanceEnabled(config, clusterConfig)) {
String error = "Node " + instanceId + " is enabled, cannot drop";
_logger.warn(error);
throw new HelixException(error);
}
_admin.dropInstance(clusterName, config);
}
/**
* For CUSTOMIZED and SEMI_AUTO resources, this tool is used to change instance mapping
* in the cluster. When a node is replaced in the cluster, we just change preference list
* and map field in IdealState lf all resource, to replace old instance with new instance
*
* This method will ignore all resource with FULL_AUTO.
* This method will ensure that old instance is disabled AND not alive, but it's OK that new
* instance is just created, not live / enabled yet
*
* @param clusterName cluster name
* @param oldInstanceName old instance to swap out
* @param newInstanceName new instance to add to
*/
public void swapInstance(String clusterName, final String oldInstanceName, final String newInstanceName) {
if (oldInstanceName.equals(newInstanceName)) {
_logger.info("Old instance has same name as new instance, no need to swap");
return;
}
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_zkClient));
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
// If new instance config is missing, new instance is not in good state and therefore
// should not perform swap.
// It is OK that we miss old instance config for idempotency of this method
InstanceConfig newConfig = accessor.getProperty(keyBuilder.instanceConfig(newInstanceName));
if (newConfig == null) {
String error = "New instance " + newInstanceName + " does not exist, cannot swap";
_logger.warn(error);
throw new HelixException(error);
}
try {
// drop instance will ensure the old instance is disabled, and not live, or it will
// throw exception
dropInstanceFromCluster(clusterName, oldInstanceName);
} catch (HelixException e) {
// If old instance is already gone, continue to swap. Note that it is possible
// that do to some error, we still keep a disabled record of old instance in
// cluster config, we don't strictly check and fix that
if (e.toString().contains("does not exist")) {
_logger.warn("Instance {} does not exist, continue to swap instance for cluster {}",
oldInstanceName, clusterName);
} else {
_logger.warn("Failed to drop instance {} from cluster {}", oldInstanceName, clusterName, e);
throw e;
}
}
// When the amount of ideal state data is huge, we might only read partially from ZK
// so the safest way is to list first and read each individual ideal state
List<String> existingIdealStateNames =
accessor.getChildNames(accessor.keyBuilder().idealStates());
for (final String resourceName : existingIdealStateNames) {
IdealState resourceIdealState =
accessor.getProperty(accessor.keyBuilder().idealStates(resourceName));
if (resourceIdealState.getRebalanceMode().equals(RebalanceMode.FULL_AUTO)) {
_logger.warn("Resource {} is in FULL_AUTO rebalance mode, don't swap", resourceName);
continue;
}
// For CUSTOMIZED and SEMI_AUTO rebalance mode, swap instance
swapInstanceInIdealState(resourceIdealState, oldInstanceName, newInstanceName);
// Update ideal state
accessor.updateProperty(accessor.keyBuilder().idealStates(resourceName),
new DataUpdater<ZNRecord>() {
@Override
public ZNRecord update(ZNRecord znRecord) {
if (znRecord == null) {
throw new HelixException(String.format(
"swapInstance DataUpdater: IdealState for resource %s no longer exists!",
resourceName));
}
// Need to swap again in case there are added partition with old instance
swapInstanceInIdealState(new IdealState(znRecord), oldInstanceName, newInstanceName);
return znRecord;
}
}, resourceIdealState);
_logger.info("Successfully swapped instance for resource {}", resourceName);
}
}
/**
* Replace old instance name in map field and list field with new instance name
* @param idealState ideal state object
* @param oldInstance old instance name
* @param newInstance new instance name
*/
void swapInstanceInIdealState(IdealState idealState, String oldInstance, String newInstance) {
for (String partition : idealState.getRecord().getMapFields().keySet()) {
Map<String, String> valMap = idealState.getRecord().getMapField(partition);
if (valMap.containsKey(oldInstance)) {
valMap.put(newInstance, valMap.get(oldInstance));
valMap.remove(oldInstance);
}
}
for (String partition : idealState.getRecord().getListFields().keySet()) {
List<String> valList = idealState.getRecord().getListField(partition);
for (int i = 0; i < valList.size(); i++) {
if (valList.get(i).equals(oldInstance)) {
valList.remove(i);
valList.add(i, newInstance);
}
}
}
}
public HelixAdmin getClusterManagementTool() {
return _admin;
}
public void addStateModelDef(String clusterName, String stateModelDef,
StateModelDefinition record) {
_admin.addStateModelDef(clusterName, stateModelDef, record);
}
public void addStateModelDef(String clusterName, String stateModelDef,
StateModelDefinition record, boolean overwritePrevious) {
_admin.addStateModelDef(clusterName, stateModelDef, record, overwritePrevious);
}
public void addResourceToCluster(String clusterName, String resourceName, IdealState idealState) {
_admin.addResource(clusterName, resourceName, idealState);
}
public void addResourceToCluster(String clusterName, String resourceName, int numPartitions,
String stateModelRef) {
addResourceToCluster(clusterName, resourceName, numPartitions, stateModelRef,
RebalanceMode.SEMI_AUTO.toString());
}
public void addResourceToCluster(String clusterName, String resourceName, int numPartitions,
String stateModelRef, String rebalancerMode) {
_admin.addResource(clusterName, resourceName, numPartitions, stateModelRef, rebalancerMode);
}
public void addResourceToCluster(String clusterName, String resourceName, int numPartitions,
String stateModelRef, String rebalancerMode, String rebalanceStrategy) {
_admin.addResource(clusterName, resourceName, numPartitions, stateModelRef, rebalancerMode,
rebalanceStrategy);
}
public void addResourceToCluster(String clusterName, String resourceName, int numPartitions,
String stateModelRef, String rebalancerMode, int bucketSize) {
_admin.addResource(clusterName, resourceName, numPartitions, stateModelRef, rebalancerMode,
bucketSize);
}
public void addResourceToCluster(String clusterName, String resourceName, int numPartitions,
String stateModelRef, String rebalancerMode, int bucketSize, int maxPartitionsPerInstance) {
_admin.addResource(clusterName, resourceName, numPartitions, stateModelRef, rebalancerMode,
bucketSize, maxPartitionsPerInstance);
}
/**
* Get the mangled IdealState name if resourceGroup/resourceTag is enable.
*/
public static String genIdealStateNameWithResourceTag(String resourceName, String resourceTag) {
return resourceName + "$" + resourceTag;
}
/**
* Create an IdealState for a resource that belongs to a resource group We use
* "resourceGroupName$resourceInstanceTag" as the IdealState znode name to differetiate different
* resources from the same resourceGroup.
*/
public IdealState createIdealStateForResourceGroup(String resourceGroupName,
String resourceTag, int numPartition, int replica, String rebalanceMode, String stateModelDefName) {
String idealStateId = genIdealStateNameWithResourceTag(resourceGroupName, resourceTag);
IdealState idealState = new IdealState(idealStateId);
idealState.setNumPartitions(numPartition);
idealState.setStateModelDefRef(stateModelDefName);
IdealState.RebalanceMode mode =
idealState.rebalanceModeFromString(rebalanceMode, IdealState.RebalanceMode.SEMI_AUTO);
idealState.setRebalanceMode(mode);
idealState.setReplicas("" + replica);
idealState.setStateModelFactoryName(HelixConstants.DEFAULT_STATE_MODEL_FACTORY);
idealState.setResourceGroupName(resourceGroupName);
idealState.setInstanceGroupTag(resourceTag);
idealState.enableGroupRouting(true);
return idealState;
}
/**
* Enable or disable a resource within a resource group associated with a given resource tag
*
* @param clusterName
* @param resourceName
* @param resourceTag
*/
public void enableResource(String clusterName, String resourceName, String resourceTag,
boolean enabled) {
String idealStateId = genIdealStateNameWithResourceTag(resourceName, resourceTag);
_admin.enableResource(clusterName, idealStateId, enabled);
}
public void dropResourceFromCluster(String clusterName, String resourceName) {
_admin.dropResource(clusterName, resourceName);
}
// TODO: remove this. has moved to ZkHelixAdmin
public void rebalanceStorageCluster(String clusterName, String resourceName, int replica) {
rebalanceStorageCluster(clusterName, resourceName, replica, resourceName);
}
public void rebalanceResource(String clusterName, String resourceName, int replica) {
rebalanceStorageCluster(clusterName, resourceName, replica, resourceName);
}
public void expandResource(String clusterName, String resourceName) {
IdealState idealState = _admin.getResourceIdealState(clusterName, resourceName);
if (idealState.getRebalanceMode() == RebalanceMode.FULL_AUTO
|| idealState.getRebalanceMode() == RebalanceMode.CUSTOMIZED) {
_logger.info("Skipping idealState " + idealState.getResourceName() + " "
+ idealState.getRebalanceMode());
return;
}
boolean anyLiveInstance = false;
for (List<String> list : idealState.getRecord().getListFields().values()) {
if (list.contains(IdealState.IdealStateConstants.ANY_LIVEINSTANCE.toString())) {
_logger.info("Skipping idealState " + idealState.getResourceName()
+ " with ANY_LIVEINSTANCE");
anyLiveInstance = true;
continue;
}
}
if (anyLiveInstance) {
return;
}
try {
int replica = Integer.parseInt(idealState.getReplicas());
} catch (Exception e) {
_logger.error("", e);
return;
}
if (idealState.getRecord().getListFields().size() == 0) {
_logger.warn("Resource " + resourceName + " not balanced, skip");
return;
}
balanceIdealState(clusterName, idealState);
}
public void expandCluster(String clusterName) {
List<String> resources = _admin.getResourcesInCluster(clusterName);
for (String resourceName : resources) {
expandResource(clusterName, resourceName);
}
}
public void balanceIdealState(String clusterName, IdealState idealState) {
// The new instances are added into the cluster already. So we need to find out the
// instances that
// already have partitions assigned to them.
List<String> instanceNames = _admin.getInstancesInCluster(clusterName);
rebalanceResource(clusterName, idealState, instanceNames);
}
private void rebalanceResource(String clusterName, IdealState idealState,
List<String> instanceNames) {
_admin.rebalance(clusterName, idealState, instanceNames);
}
public void rebalanceStorageCluster(String clusterName, String resourceName, int replica,
String keyPrefix) {
_admin.rebalance(clusterName, resourceName, replica, keyPrefix, "");
}
public void rebalanceCluster(String clusterName, String resourceName, int replica,
String keyPrefix, String group) {
_admin.rebalance(clusterName, resourceName, replica, keyPrefix, group);
}
public void rebalanceStorageCluster(String clusterName, String resourceName, String group,
int replica) {
_admin.rebalance(clusterName, resourceName, replica, resourceName, group);
}
/**
* set configs
* @param type config-scope type, e.g. CLUSTER, RESOURCE, etc.
* @param scopeArgsCsv scopeArgsCsv csv-formatted scope-args, e.g myCluster,testDB
* @param keyValuePairs csv-formatted key-value pairs. e.g. k1=v1,k2=v2
*/
public void setConfig(ConfigScopeProperty type, String scopeArgsCsv, String keyValuePairs) {
// ConfigScope scope = new ConfigScopeBuilder().build(scopesKeyValuePairs);
String[] scopeArgs = scopeArgsCsv.split("[\\s,]");
HelixConfigScope scope = new HelixConfigScopeBuilder(type, scopeArgs).build();
Map<String, String> keyValueMap = HelixUtil.parseCsvFormatedKeyValuePairs(keyValuePairs);
_admin.setConfig(scope, keyValueMap);
}
/**
* remove configs
* @param type config-scope type, e.g. CLUSTER, RESOURCE, etc.
* @param scopeArgsCsv csv-formatted scope-args, e.g myCluster,testDB
* @param keysCsv csv-formatted keys. e.g. k1,k2
*/
public void removeConfig(ConfigScopeProperty type, String scopeArgsCsv, String keysCsv) {
// ConfigScope scope = new ConfigScopeBuilder().build(scopesStr);
//
// // parse keys
// String[] keys = keysStr.split("[\\s,]");
// Set<String> keysSet = new HashSet<String>(Arrays.asList(keys));
String[] scopeArgs = scopeArgsCsv.split("[\\s,]");
HelixConfigScope scope = new HelixConfigScopeBuilder(type, scopeArgs).build();
String[] keys = keysCsv.split("[\\s,]");
_admin.removeConfig(scope, Arrays.asList(keys));
}
/**
* set cloud configs
* @param clusterName
* @param cloudConfigManifest
*/
public void setCloudConfig(String clusterName, String cloudConfigManifest) {
ZNRecord record;
try {
record = ZNRECORD_READER.readValue(cloudConfigManifest);
} catch (IOException e) {
_logger
.error("Failed to deserialize user's input " + cloudConfigManifest + ", Exception: " + e);
throw new IllegalArgumentException("Failed to deserialize user's input ");
}
CloudConfig cloudConfig = new CloudConfig.Builder(record).build();
_admin.addCloudConfig(clusterName, cloudConfig);
}
/**
* remove cloud configs
* @param clusterName
*/
public void removeCloudConfig(String clusterName) {
_admin.removeCloudConfig(clusterName);
}
/**
* get configs
* @param type config-scope-type, e.g. CLUSTER, RESOURCE, etc.
* @param scopeArgsCsv csv-formatted scope-args, e.g myCluster,testDB
* @param keysCsv csv-formatted keys. e.g. k1,k2
* @return json-formated key-value pairs, e.g. {k1=v1,k2=v2}
*/
public String getConfig(ConfigScopeProperty type, String scopeArgsCsv, String keysCsv) {
// ConfigScope scope = new ConfigScopeBuilder().build(scopesStr);
String[] scopeArgs = scopeArgsCsv.split("[\\s,]");
HelixConfigScope scope = new HelixConfigScopeBuilder(type, scopeArgs).build();
String[] keys = keysCsv.split("[\\s,]");
// parse keys
// String[] keys = keysStr.split("[\\s,]");
// Set<String> keysSet = new HashSet<String>(Arrays.asList(keys));
Map<String, String> keyValueMap = _admin.getConfig(scope, Arrays.asList(keys));
ZNRecord record = new ZNRecord(type.toString());
// record.setMapField(scopesStr, propertiesMap);
record.getSimpleFields().putAll(keyValueMap);
ZNRecordSerializer serializer = new ZNRecordSerializer();
return new String(serializer.serialize(record));
}
/**
* set constraint
* @param clusterName
* @param constraintType
* @param constraintId
* @param constraintAttributesMap : csv-formated constraint key-value pairs
*/
public void setConstraint(String clusterName, String constraintType, String constraintId,
String constraintAttributesMap) {
if (clusterName == null || constraintType == null || constraintId == null
|| constraintAttributesMap == null) {
throw new IllegalArgumentException(
"fail to set constraint. missing clusterName|constraintType|constraintId|constraintAttributesMap");
}
ConstraintType type = ConstraintType.valueOf(constraintType);
ConstraintItemBuilder builder = new ConstraintItemBuilder();
Map<String, String> constraintAttributes =
HelixUtil.parseCsvFormatedKeyValuePairs(constraintAttributesMap);
ConstraintItem constraintItem = builder.addConstraintAttributes(constraintAttributes).build();
_admin.setConstraint(clusterName, type, constraintId, constraintItem);
}
/**
* remove constraint
* @param clusterName
* @param constraintType
* @param constraintId
*/
public void removeConstraint(String clusterName, String constraintType, String constraintId) {
if (clusterName == null || constraintType == null || constraintId == null) {
throw new IllegalArgumentException(
"fail to remove constraint. missing clusterName|constraintType|constraintId");
}
ConstraintType type = ConstraintType.valueOf(constraintType);
_admin.removeConstraint(clusterName, type, constraintId);
}
/**
* get constraints associated with given type
* @param constraintType : constraint-type. e.g. MESSAGE_CONSTRAINT
* @return json-formated constraints
*/
public String getConstraints(String clusterName, String constraintType) {
if (clusterName == null || constraintType == null) {
throw new IllegalArgumentException(
"fail to get constraint. missing clusterName|constraintType");
}
ConstraintType type = ConstraintType.valueOf(constraintType);
ClusterConstraints constraints = _admin.getConstraints(clusterName, type);
return new String(constraints.serialize(new ZNRecordSerializer()));
}
/**
* Sets up a cluster<br/>
* 6 Instances[localhost:8900 to localhost:8905], <br/>
* 1 resource[TestDB] with a replication factor of 3 and using MasterSlave state model<br/>
* @param clusterName
*/
public void setupTestCluster(String clusterName) {
addCluster(clusterName, true);
String instanceInfoArray[] = new String[6];
for (int i = 0; i < instanceInfoArray.length; i++) {
instanceInfoArray[i] = "localhost_" + (8900 + i);
}
addInstancesToCluster(clusterName, instanceInfoArray);
addResourceToCluster(clusterName, "TestDB", 10, "MasterSlave");
rebalanceStorageCluster(clusterName, "TestDB", 3);
}
public static void printUsage(Options cliOptions) {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.setWidth(1000);
helpFormatter.printHelp("java " + ClusterSetup.class.getName(), cliOptions);
}
@SuppressWarnings("static-access")
private static Options constructCommandLineOptions() {
Option helpOption =
OptionBuilder.withLongOpt(help).withDescription("Prints command-line options info")
.create();
Option zkServerOption =
OptionBuilder.withLongOpt(zkServerAddress).withDescription("Provide zookeeper address")
.create();
zkServerOption.setArgs(1);
zkServerOption.setRequired(true);
zkServerOption.setArgName("ZookeeperServerAddress(Required)");
Option listClustersOption =
OptionBuilder.withLongOpt(listClusters).withDescription("List existing clusters").create();
listClustersOption.setArgs(0);
listClustersOption.setRequired(false);
Option listResourceOption =
OptionBuilder.withLongOpt(listResources)
.withDescription("List resources hosted in a cluster").create();
listResourceOption.setArgs(1);
listResourceOption.setRequired(false);
listResourceOption.setArgName("clusterName <-tag TagValue>");
Option listInstancesOption =
OptionBuilder.withLongOpt(listInstances).withDescription("List Instances in a cluster")
.create();
listInstancesOption.setArgs(1);
listInstancesOption.setRequired(false);
listInstancesOption.setArgName("clusterName <-tag tagName>");
Option addClusterOption =
OptionBuilder.withLongOpt(addCluster).withDescription("Add a new cluster").create();
addClusterOption.setArgs(1);
addClusterOption.setRequired(false);
addClusterOption.setArgName("clusterName");
Option activateClusterOption =
OptionBuilder.withLongOpt(activateCluster)
.withDescription("Enable/disable a cluster in distributed controller mode").create();
activateClusterOption.setArgs(3);
activateClusterOption.setRequired(false);
activateClusterOption.setArgName("clusterName grandCluster true/false");
Option deleteClusterOption =
OptionBuilder.withLongOpt(dropCluster).withDescription("Delete a cluster").create();
deleteClusterOption.setArgs(1);
deleteClusterOption.setRequired(false);
deleteClusterOption.setArgName("clusterName");
Option addInstanceOption =
OptionBuilder.withLongOpt(addInstance).withDescription("Add a new Instance to a cluster")
.create();
addInstanceOption.setArgs(2);
addInstanceOption.setRequired(false);
addInstanceOption.setArgName("clusterName InstanceId");
Option addResourceOption =
OptionBuilder.withLongOpt(addResource).withDescription("Add a resource to a cluster")
.create();
addResourceOption.setArgs(4);
addResourceOption.setRequired(false);
addResourceOption
.setArgName("clusterName resourceName partitionNum stateModelRef <-mode modeValue>");
Option expandResourceOption =
OptionBuilder.withLongOpt(expandResource)
.withDescription("Expand resource to additional nodes").create();
expandResourceOption.setArgs(2);
expandResourceOption.setRequired(false);
expandResourceOption.setArgName("clusterName resourceName");
Option expandClusterOption =
OptionBuilder.withLongOpt(expandCluster)
.withDescription("Expand a cluster and all the resources").create();
expandClusterOption.setArgs(1);
expandClusterOption.setRequired(false);
expandClusterOption.setArgName("clusterName");
Option resourceModeOption =
OptionBuilder.withLongOpt(mode)
.withDescription("Specify resource mode, used with addResourceGroup command").create();
resourceModeOption.setArgs(1);
resourceModeOption.setRequired(false);
resourceModeOption.setArgName("IdealState mode");
Option resourceTagOption =
OptionBuilder.withLongOpt(tag)
.withDescription("Specify resource tag, used with listResources command").create();
resourceTagOption.setArgs(1);
resourceTagOption.setRequired(false);
resourceTagOption.setArgName("tag");
Option resourceBucketSizeOption =
OptionBuilder.withLongOpt(bucketSize)
.withDescription("Specify size of a bucket, used with addResourceGroup command")
.create();
resourceBucketSizeOption.setArgs(1);
resourceBucketSizeOption.setRequired(false);
resourceBucketSizeOption.setArgName("Size of a bucket for a resource");
Option maxPartitionsPerNodeOption =
OptionBuilder.withLongOpt(maxPartitionsPerNode)
.withDescription("Specify max partitions per node, used with addResourceGroup command")
.create();
maxPartitionsPerNodeOption.setArgs(1);
maxPartitionsPerNodeOption.setRequired(false);
maxPartitionsPerNodeOption.setArgName("Max partitions per node for a resource");
Option resourceKeyOption =
OptionBuilder.withLongOpt(resourceKeyPrefix)
.withDescription("Specify resource key prefix, used with rebalance command").create();
resourceKeyOption.setArgs(1);
resourceKeyOption.setRequired(false);
resourceKeyOption.setArgName("Resource key prefix");
Option instanceGroupTagOption =
OptionBuilder.withLongOpt(instanceGroupTag)
.withDescription("Specify instance group tag, used with rebalance command").create();
instanceGroupTagOption.setArgs(1);
instanceGroupTagOption.setRequired(false);
instanceGroupTagOption.setArgName("Instance group tag");
Option addStateModelDefOption =
OptionBuilder.withLongOpt(addStateModelDef)
.withDescription("Add a State model to a cluster").create();
addStateModelDefOption.setArgs(2);
addStateModelDefOption.setRequired(false);
addStateModelDefOption.setArgName("clusterName <filename>");
Option addIdealStateOption =
OptionBuilder.withLongOpt(addIdealState).withDescription("Add a State model to a cluster")
.create();
addIdealStateOption.setArgs(3);
addIdealStateOption.setRequired(false);
addIdealStateOption.setArgName("clusterName resourceName <filename>");
Option dropInstanceOption =
OptionBuilder.withLongOpt(dropInstance)
.withDescription("Drop an existing Instance from a cluster").create();
dropInstanceOption.setArgs(2);
dropInstanceOption.setRequired(false);
dropInstanceOption.setArgName("clusterName InstanceId");
Option swapInstanceOption =
OptionBuilder.withLongOpt(swapInstance)
.withDescription("Swap an old instance from a cluster with a new instance").create();
swapInstanceOption.setArgs(3);
swapInstanceOption.setRequired(false);
swapInstanceOption.setArgName("clusterName oldInstance newInstance");
Option dropResourceOption =
OptionBuilder.withLongOpt(dropResource)
.withDescription("Drop an existing resource from a cluster").create();
dropResourceOption.setArgs(2);
dropResourceOption.setRequired(false);
dropResourceOption.setArgName("clusterName resourceName");
Option enableResourceOption =
OptionBuilder.withLongOpt(enableResource).withDescription("Enable/disable a resource")
.hasArgs(3).isRequired(false)
.withArgName("clusterName resourceName true/false <-tag resourceTag>")
.create();
Option rebalanceOption =
OptionBuilder.withLongOpt(rebalance).withDescription("Rebalance a resource in a cluster")
.create();
rebalanceOption.setArgs(3);
rebalanceOption.setRequired(false);
rebalanceOption.setArgName("clusterName resourceName replicas");
Option instanceInfoOption =
OptionBuilder.withLongOpt(listInstanceInfo)
.withDescription("Query info of a Instance in a cluster").create();
instanceInfoOption.setArgs(2);
instanceInfoOption.setRequired(false);
instanceInfoOption.setArgName("clusterName InstanceName");
Option clusterInfoOption =
OptionBuilder.withLongOpt(listClusterInfo).withDescription("Query info of a cluster")
.create();
clusterInfoOption.setArgs(1);
clusterInfoOption.setRequired(false);
clusterInfoOption.setArgName("clusterName");
Option resourceInfoOption =
OptionBuilder.withLongOpt(listResourceInfo).withDescription("Query info of a resource")
.create();
resourceInfoOption.setArgs(2);
resourceInfoOption.setRequired(false);
resourceInfoOption.setArgName("clusterName resourceName");
Option addResourcePropertyOption =
OptionBuilder.withLongOpt(addResourceProperty).withDescription("Add a resource property")
.create();
addResourcePropertyOption.setArgs(4);
addResourcePropertyOption.setRequired(false);
addResourcePropertyOption.setArgName("clusterName resourceName propertyName propertyValue");
Option removeResourcePropertyOption =
OptionBuilder.withLongOpt(removeResourceProperty)
.withDescription("Remove a resource property").create();
removeResourcePropertyOption.setArgs(3);
removeResourcePropertyOption.setRequired(false);
removeResourcePropertyOption.setArgName("clusterName resourceName propertyName");
Option partitionInfoOption =
OptionBuilder.withLongOpt(listPartitionInfo).withDescription("Query info of a partition")
.create();
partitionInfoOption.setArgs(3);
partitionInfoOption.setRequired(false);
partitionInfoOption.setArgName("clusterName resourceName partitionName");
Option enableInstanceOption =
OptionBuilder.withLongOpt(enableInstance).withDescription("Enable/disable an instance")
.create();
enableInstanceOption.setArgs(3);
enableInstanceOption.setRequired(false);
enableInstanceOption.setArgName("clusterName instanceName true/false");
Option enablePartitionOption =
OptionBuilder.hasArgs().withLongOpt(enablePartition)
.withDescription("Enable/disable partitions").create();
enablePartitionOption.setRequired(false);
enablePartitionOption
.setArgName("true/false clusterName instanceName resourceName partitionName1...");
Option enableClusterOption =
OptionBuilder.withLongOpt(enableCluster)
.withDescription("pause/resume the controller of a cluster").create();
enableClusterOption.setArgs(2);
enableClusterOption.setRequired(false);
enableClusterOption.setArgName("clusterName true/false");
Option resetPartitionOption =
OptionBuilder.withLongOpt(resetPartition)
.withDescription("Reset a partition in error state").create();
resetPartitionOption.setArgs(4);
resetPartitionOption.setRequired(false);
resetPartitionOption.setArgName("clusterName instanceName resourceName partitionName");
Option resetInstanceOption =
OptionBuilder.withLongOpt(resetInstance)
.withDescription("Reset all partitions in error state for an instance").create();
resetInstanceOption.setArgs(2);
resetInstanceOption.setRequired(false);
resetInstanceOption.setArgName("clusterName instanceName");
Option resetResourceOption =
OptionBuilder.withLongOpt(resetResource)
.withDescription("Reset all partitions in error state for a resource").create();
resetResourceOption.setArgs(2);
resetResourceOption.setRequired(false);
resetResourceOption.setArgName("clusterName resourceName");
Option listStateModelsOption =
OptionBuilder.withLongOpt(listStateModels)
.withDescription("Query info of state models in a cluster").create();
listStateModelsOption.setArgs(1);
listStateModelsOption.setRequired(false);
listStateModelsOption.setArgName("clusterName");
Option listStateModelOption =
OptionBuilder.withLongOpt(listStateModel)
.withDescription("Query info of a state model in a cluster").create();
listStateModelOption.setArgs(2);
listStateModelOption.setRequired(false);
listStateModelOption.setArgName("clusterName stateModelName");
Option addInstanceTagOption =
OptionBuilder.withLongOpt(addInstanceTag).withDescription("Add a tag to instance").create();
addInstanceTagOption.setArgs(3);
addInstanceTagOption.setRequired(false);
addInstanceTagOption.setArgName("clusterName instanceName tag");
Option removeInstanceTagOption =
OptionBuilder.withLongOpt(removeInstanceTag).withDescription("Remove tag from instance")
.create();
removeInstanceTagOption.setArgs(3);
removeInstanceTagOption.setRequired(false);
removeInstanceTagOption.setArgName("clusterName instanceName tag");
// TODO need deal with resource-names containing ","
// set/get/remove configs options
Option setConfOption =
OptionBuilder
.hasArgs(3)
.isRequired(false)
.withArgName(
"ConfigScope(e.g. RESOURCE) ConfigScopeArgs(e.g. myCluster,testDB) KeyValueMap(e.g. k1=v1,k2=v2)")
.withLongOpt(setConfig).withDescription("Set configs").create();
Option getConfOption =
OptionBuilder
.hasArgs(3)
.isRequired(false)
.withArgName(
"ConfigScope(e.g. RESOURCE) ConfigScopeArgs(e.g. myCluster,testDB) Keys(e.g. k1,k2)")
.withLongOpt(getConfig).withDescription("Get configs").create();
Option removeConfOption =
OptionBuilder
.hasArgs(3)
.isRequired(false)
.withArgName(
"ConfigScope(e.g. RESOURCE) ConfigScopeArgs(e.g. myCluster,testDB) Keys(e.g. k1,k2)")
.withLongOpt(removeConfig).withDescription("Remove configs").create();
// set/get/remove constraints options
Option setConstraintOption =
OptionBuilder
.hasArgs(4)
.isRequired(false)
.withArgName(
"clusterName ConstraintType(e.g. MESSAGE_CONSTRAINT) ConstraintId KeyValueMap(e.g. k1=v1,k2=v2)")
.withLongOpt(setConstraint)
.withDescription("Set a constraint associated with a give id. create if not exist")
.create();
Option getConstraintsOption =
OptionBuilder.hasArgs(2).isRequired(false)
.withArgName("clusterName ConstraintType(e.g. MESSAGE_CONSTRAINT)")
.withLongOpt(getConstraints)
.withDescription("Get constraints associated with given type").create();
Option removeConstraintOption =
OptionBuilder.hasArgs(3).isRequired(false)
.withArgName("clusterName ConstraintType(e.g. MESSAGE_CONSTRAINT) ConstraintId")
.withLongOpt(removeConstraint)
.withDescription("Remove a constraint associated with given id").create();
Option setCloudConfigOption = OptionBuilder.withLongOpt(setCloudConfig).withDescription(
"Set the Cloud Configuration of the cluster. Example:\n sh helix-admin.sh --zkSvr ZookeeperServerAddress --setCloudConfig ClusterName '{\"simpleFields\" : {\"CLOUD_ENABLED\" : \"true\",\"CLOUD_PROVIDER\": \"AZURE\"}}'")
.create();
setCloudConfigOption.setArgs(2);
setCloudConfigOption.setRequired(false);
setCloudConfigOption.setArgName("clusterName CloudConfigurationManifest");
Option removeCloudConfigOption = OptionBuilder.withLongOpt(removeCloudConfig)
.withDescription("Remove the Cloud Configuration of the cluster").create();
removeCloudConfigOption.setArgs(1);
removeCloudConfigOption.setRequired(false);
removeCloudConfigOption.setArgName("clusterName");
OptionGroup group = new OptionGroup();
group.setRequired(true);
group.addOption(rebalanceOption);
group.addOption(addResourceOption);
group.addOption(resourceModeOption);
group.addOption(resourceTagOption);
group.addOption(resourceBucketSizeOption);
group.addOption(maxPartitionsPerNodeOption);
group.addOption(expandResourceOption);
group.addOption(expandClusterOption);
group.addOption(resourceKeyOption);
group.addOption(addClusterOption);
group.addOption(activateClusterOption);
group.addOption(deleteClusterOption);
group.addOption(addInstanceOption);
group.addOption(listInstancesOption);
group.addOption(listResourceOption);
group.addOption(listClustersOption);
group.addOption(addIdealStateOption);
group.addOption(rebalanceOption);
group.addOption(dropInstanceOption);
group.addOption(swapInstanceOption);
group.addOption(dropResourceOption);
group.addOption(enableResourceOption);
group.addOption(instanceInfoOption);
group.addOption(clusterInfoOption);
group.addOption(resourceInfoOption);
group.addOption(partitionInfoOption);
group.addOption(enableInstanceOption);
group.addOption(enablePartitionOption);
group.addOption(enableClusterOption);
group.addOption(resetPartitionOption);
group.addOption(resetInstanceOption);
group.addOption(resetResourceOption);
group.addOption(addStateModelDefOption);
group.addOption(listStateModelsOption);
group.addOption(listStateModelOption);
group.addOption(addResourcePropertyOption);
group.addOption(removeResourcePropertyOption);
// set/get/remove config options
group.addOption(setConfOption);
group.addOption(getConfOption);
group.addOption(removeConfOption);
// set/get/remove constraint options
group.addOption(setConstraintOption);
group.addOption(getConstraintsOption);
group.addOption(removeConstraintOption);
// set/remove cloud configs
group.addOption(setCloudConfigOption);
group.addOption(removeCloudConfigOption);
group.addOption(addInstanceTagOption);
group.addOption(removeInstanceTagOption);
group.addOption(instanceGroupTagOption);
Options options = new Options();
options.addOption(helpOption);
options.addOption(zkServerOption);
options.addOptionGroup(group);
return options;
}
// TODO: remove this. has moved to ZkHelixAdmin
private static byte[] readFile(String filePath) throws IOException {
File file = new File(filePath);
int size = (int) file.length();
byte[] bytes = new byte[size];
DataInputStream dis = new DataInputStream(new FileInputStream(file));
int read = 0;
int numRead = 0;
while (read < bytes.length && (numRead = dis.read(bytes, read, bytes.length - read)) >= 0) {
read = read + numRead;
}
return bytes;
}
public static int processCommandLineArgs(String[] cliArgs) throws Exception {
CommandLineParser cliParser = new GnuParser();
Options cliOptions = constructCommandLineOptions();
CommandLine cmd = null;
try {
cmd = cliParser.parse(cliOptions, cliArgs);
} catch (ParseException pe) {
System.err.println("CommandLineClient: failed to parse command-line options: "
+ pe.toString());
printUsage(cliOptions);
System.exit(1);
}
ClusterSetup setupTool = new ClusterSetup(cmd.getOptionValue(zkServerAddress));
if (cmd.hasOption(addCluster)) {
String clusterName = cmd.getOptionValue(addCluster);
setupTool.addCluster(clusterName, false);
return 0;
}
if (cmd.hasOption(activateCluster)) {
String clusterName = cmd.getOptionValues(activateCluster)[0];
String grandCluster = cmd.getOptionValues(activateCluster)[1];
boolean enable = Boolean.parseBoolean(cmd.getOptionValues(activateCluster)[2]);
setupTool.activateCluster(clusterName, grandCluster, enable);
return 0;
}
if (cmd.hasOption(dropCluster)) {
String clusterName = cmd.getOptionValue(dropCluster);
setupTool.deleteCluster(clusterName);
return 0;
}
if (cmd.hasOption(addInstance)) {
String clusterName = cmd.getOptionValues(addInstance)[0];
String instanceAddressInfo = cmd.getOptionValues(addInstance)[1];
String[] instanceAddresses = instanceAddressInfo.split(";");
setupTool.addInstancesToCluster(clusterName, instanceAddresses);
return 0;
}
if (cmd.hasOption(addResource)) {
String clusterName = cmd.getOptionValues(addResource)[0];
String resourceName = cmd.getOptionValues(addResource)[1];
int partitions = Integer.parseInt(cmd.getOptionValues(addResource)[2]);
String stateModelRef = cmd.getOptionValues(addResource)[3];
String modeValue = RebalanceMode.SEMI_AUTO.toString();
if (cmd.hasOption(mode)) {
modeValue = cmd.getOptionValues(mode)[0];
}
int bucketSizeVal = 0;
if (cmd.hasOption(bucketSize)) {
bucketSizeVal = Integer.parseInt(cmd.getOptionValues(bucketSize)[0]);
}
int maxPartitionsPerNodeVal = -1;
if (cmd.hasOption(maxPartitionsPerNode)) {
maxPartitionsPerNodeVal = Integer.parseInt(cmd.getOptionValues(maxPartitionsPerNode)[0]);
}
setupTool.addResourceToCluster(clusterName, resourceName, partitions, stateModelRef,
modeValue, bucketSizeVal, maxPartitionsPerNodeVal);
return 0;
}
if (cmd.hasOption(rebalance)) {
String clusterName = cmd.getOptionValues(rebalance)[0];
String resourceName = cmd.getOptionValues(rebalance)[1];
int replicas = Integer.parseInt(cmd.getOptionValues(rebalance)[2]);
String keyPrefixVal = "";
String instanceGroupTagVal = "";
if (cmd.hasOption(resourceKeyPrefix)) {
keyPrefixVal = cmd.getOptionValue(resourceKeyPrefix);
}
if (cmd.hasOption(instanceGroupTag)) {
instanceGroupTagVal = cmd.getOptionValue(instanceGroupTag);
}
setupTool.rebalanceCluster(clusterName, resourceName, replicas, keyPrefixVal,
instanceGroupTagVal);
return 0;
}
if (cmd.hasOption(expandCluster)) {
String clusterName = cmd.getOptionValues(expandCluster)[0];
setupTool.expandCluster(clusterName);
return 0;
}
if (cmd.hasOption(expandResource)) {
String clusterName = cmd.getOptionValues(expandResource)[0];
String resourceName = cmd.getOptionValues(expandResource)[1];
setupTool.expandResource(clusterName, resourceName);
return 0;
}
if (cmd.hasOption(dropInstance)) {
String clusterName = cmd.getOptionValues(dropInstance)[0];
String instanceAddressInfo = cmd.getOptionValues(dropInstance)[1];
String[] instanceAddresses = instanceAddressInfo.split(";");
setupTool.dropInstancesFromCluster(clusterName, instanceAddresses);
return 0;
}
if (cmd.hasOption(listClusters)) {
List<String> clusters = setupTool.getClusterManagementTool().getClusters();
System.out.println("Existing clusters:");
for (String cluster : clusters) {
System.out.println(cluster);
}
return 0;
}
if (cmd.hasOption(listResources)) {
String clusterName = cmd.getOptionValue(listResources);
List<String> resourceNames = null;
if (cmd.hasOption(tag)) {
String tagValue = cmd.getOptionValues(tag)[0];
resourceNames = setupTool.getClusterManagementTool()
.getResourcesInClusterWithTag(clusterName, tagValue);
System.out.println(
"Existing resources in cluster " + clusterName + " with tag " + tagValue + " :");
} else {
resourceNames = setupTool.getClusterManagementTool().getResourcesInCluster(clusterName);
System.out.println("Existing resources in cluster " + clusterName + ":");
}
for (String resourceName : resourceNames) {
System.out.println(resourceName);
}
return 0;
} else if (cmd.hasOption(listClusterInfo)) {
String clusterName = cmd.getOptionValue(listClusterInfo);
List<String> resourceNames =
setupTool.getClusterManagementTool().getResourcesInCluster(clusterName);
List<String> instances =
setupTool.getClusterManagementTool().getInstancesInCluster(clusterName);
System.out.println("Existing resources in cluster " + clusterName + ":");
for (String resourceName : resourceNames) {
System.out.println(resourceName);
}
System.out.println("Instances in cluster " + clusterName + ":");
for (String InstanceName : instances) {
System.out.println(InstanceName);
}
return 0;
} else if (cmd.hasOption(listInstances)) {
String clusterName = cmd.getOptionValue(listInstances);
List<String> instances;
if (cmd.hasOption(tag)) {
String instanceTag = cmd.getOptionValues(tag)[0];
instances = setupTool.getClusterManagementTool()
.getInstancesInClusterWithTag(clusterName, instanceTag);
} else {
instances =
setupTool.getClusterManagementTool().getInstancesInCluster(clusterName);
}
System.out.println("Instances in cluster " + clusterName + ":");
for (String instanceName : instances) {
System.out.println(instanceName);
}
return 0;
} else if (cmd.hasOption(listInstanceInfo)) {
String clusterName = cmd.getOptionValues(listInstanceInfo)[0];
String instanceName = cmd.getOptionValues(listInstanceInfo)[1];
InstanceConfig config =
setupTool.getClusterManagementTool().getInstanceConfig(clusterName, instanceName);
String result = new String(config.serialize(new ZNRecordSerializer()));
System.out.println("InstanceConfig: " + result);
return 0;
} else if (cmd.hasOption(listResourceInfo)) {
// print out partition number, resource name and replication number
// Also the ideal states and current states
String clusterName = cmd.getOptionValues(listResourceInfo)[0];
String resourceName = cmd.getOptionValues(listResourceInfo)[1];
IdealState idealState =
setupTool.getClusterManagementTool().getResourceIdealState(clusterName, resourceName);
ExternalView externalView =
setupTool.getClusterManagementTool().getResourceExternalView(clusterName, resourceName);
if (idealState != null) {
System.out.println("IdealState for " + resourceName + ":");
System.out.println(new String(idealState.serialize(new ZNRecordSerializer())));
} else {
System.out.println("No idealState for " + resourceName);
}
System.out.println();
if (externalView != null) {
System.out.println("ExternalView for " + resourceName + ":");
System.out.println(new String(externalView.serialize(new ZNRecordSerializer())));
} else {
System.out.println("No externalView for " + resourceName);
}
return 0;
} else if (cmd.hasOption(listPartitionInfo)) {
// print out where the partition master / slaves locates
String clusterName = cmd.getOptionValues(listPartitionInfo)[0];
String resourceName = cmd.getOptionValues(listPartitionInfo)[1];
String partitionName = cmd.getOptionValues(listPartitionInfo)[2];
IdealState idealState =
setupTool.getClusterManagementTool().getResourceIdealState(clusterName, resourceName);
ExternalView externalView =
setupTool.getClusterManagementTool().getResourceExternalView(clusterName, resourceName);
if (idealState != null) {
ZNRecord partInfo = new ZNRecord(resourceName + "/" + partitionName);
ZNRecord idealStateRec = idealState.getRecord();
partInfo.setSimpleFields(idealStateRec.getSimpleFields());
if (idealStateRec.getMapField(partitionName) != null) {
partInfo.setMapField(partitionName, idealStateRec.getMapField(partitionName));
}
if (idealStateRec.getListField(partitionName) != null) {
partInfo.setListField(partitionName, idealStateRec.getListField(partitionName));
}
System.out.println("IdealState for " + resourceName + "/" + partitionName + ":");
System.out.println(new String(new ZNRecordSerializer().serialize(partInfo)));
} else {
System.out.println("No idealState for " + resourceName + "/" + partitionName);
}
System.out.println();
if (externalView != null) {
ZNRecord partInfo = new ZNRecord(resourceName + "/" + partitionName);
ZNRecord extViewRec = externalView.getRecord();
partInfo.setSimpleFields(extViewRec.getSimpleFields());
if (extViewRec.getMapField(partitionName) != null) {
partInfo.setMapField(partitionName, extViewRec.getMapField(partitionName));
}
if (extViewRec.getListField(partitionName) != null) {
partInfo.setListField(partitionName, extViewRec.getListField(partitionName));
}
System.out.println("ExternalView for " + resourceName + "/" + partitionName + ":");
System.out.println(new String(new ZNRecordSerializer().serialize(partInfo)));
} else {
System.out.println("No externalView for " + resourceName + "/" + partitionName);
}
return 0;
} else if (cmd.hasOption(enableInstance)) {
String clusterName = cmd.getOptionValues(enableInstance)[0];
String instanceName = cmd.getOptionValues(enableInstance)[1];
if (instanceName.contains(":")) {
instanceName = instanceName.replaceAll(":", "_");
}
boolean enabled = Boolean.parseBoolean(cmd.getOptionValues(enableInstance)[2].toLowerCase());
setupTool.getClusterManagementTool().enableInstance(clusterName, instanceName, enabled);
return 0;
} else if (cmd.hasOption(enableResource)) {
String clusterName = cmd.getOptionValues(enableResource)[0];
String resourceName = cmd.getOptionValues(enableResource)[1];
boolean enabled = Boolean.parseBoolean(cmd.getOptionValues(enableResource)[2].toLowerCase());
if (cmd.hasOption(tag)) {
String resourceTag = cmd.getOptionValues(tag)[0];
setupTool.enableResource(clusterName, resourceName, resourceTag, enabled);
} else {
setupTool.getClusterManagementTool().enableResource(clusterName, resourceName, enabled);
}
} else if (cmd.hasOption(enablePartition)) {
String[] args = cmd.getOptionValues(enablePartition);
boolean enabled = Boolean.parseBoolean(args[0].toLowerCase());
String clusterName = args[1];
String instanceName = args[2];
String resourceName = args[3];
List<String> partitionNames = Arrays.asList(Arrays.copyOfRange(args, 4, args.length));
setupTool.getClusterManagementTool().enablePartition(enabled, clusterName, instanceName,
resourceName, partitionNames);
return 0;
} else if (cmd.hasOption(resetPartition)) {
String[] args = cmd.getOptionValues(resetPartition);
String clusterName = args[0];
String instanceName = args[1];
String resourceName = args[2];
List<String> partitionNames = Arrays.asList(Arrays.copyOfRange(args, 3, args.length));
setupTool.getClusterManagementTool().resetPartition(clusterName, instanceName, resourceName,
partitionNames);
return 0;
} else if (cmd.hasOption(resetInstance)) {
String[] args = cmd.getOptionValues(resetInstance);
String clusterName = args[0];
List<String> instanceNames = Arrays.asList(Arrays.copyOfRange(args, 1, args.length));
setupTool.getClusterManagementTool().resetInstance(clusterName, instanceNames);
return 0;
} else if (cmd.hasOption(resetResource)) {
String[] args = cmd.getOptionValues(resetResource);
String clusterName = args[0];
List<String> resourceNames = Arrays.asList(Arrays.copyOfRange(args, 1, args.length));
setupTool.getClusterManagementTool().resetResource(clusterName, resourceNames);
return 0;
} else if (cmd.hasOption(enableCluster)) {
String[] params = cmd.getOptionValues(enableCluster);
String clusterName = params[0];
boolean enabled = Boolean.parseBoolean(params[1].toLowerCase());
setupTool.getClusterManagementTool().enableCluster(clusterName, enabled);
return 0;
} else if (cmd.hasOption(listStateModels)) {
String clusterName = cmd.getOptionValues(listStateModels)[0];
List<String> stateModels =
setupTool.getClusterManagementTool().getStateModelDefs(clusterName);
System.out.println("Existing state models:");
for (String stateModel : stateModels) {
System.out.println(stateModel);
}
return 0;
} else if (cmd.hasOption(listStateModel)) {
String clusterName = cmd.getOptionValues(listStateModel)[0];
String stateModel = cmd.getOptionValues(listStateModel)[1];
StateModelDefinition stateModelDef =
setupTool.getClusterManagementTool().getStateModelDef(clusterName, stateModel);
String result = new String(new ZNRecordSerializer().serialize(stateModelDef.getRecord()));
System.out.println("StateModelDefinition: " + result);
return 0;
} else if (cmd.hasOption(addStateModelDef)) {
String clusterName = cmd.getOptionValues(addStateModelDef)[0];
String stateModelFile = cmd.getOptionValues(addStateModelDef)[1];
ZNRecord stateModelRecord =
(ZNRecord) (new ZNRecordSerializer().deserialize(readFile(stateModelFile)));
if (stateModelRecord.getId() == null || stateModelRecord.getId().length() == 0) {
throw new IllegalArgumentException("ZNRecord for state model definition must have an id");
}
setupTool.getClusterManagementTool().addStateModelDef(clusterName, stateModelRecord.getId(),
new StateModelDefinition(stateModelRecord));
return 0;
} else if (cmd.hasOption(addIdealState)) {
String clusterName = cmd.getOptionValues(addIdealState)[0];
String resourceName = cmd.getOptionValues(addIdealState)[1];
String idealStateFile = cmd.getOptionValues(addIdealState)[2];
setupTool.addIdealState(clusterName, resourceName, idealStateFile);
return 0;
} else if (cmd.hasOption(dropResource)) {
String clusterName = cmd.getOptionValues(dropResource)[0];
String resourceName = cmd.getOptionValues(dropResource)[1];
setupTool.getClusterManagementTool().dropResource(clusterName, resourceName);
} else if (cmd.hasOption(swapInstance)) {
String clusterName = cmd.getOptionValues(swapInstance)[0];
String oldInstanceName = cmd.getOptionValues(swapInstance)[1];
String newInstanceName = cmd.getOptionValues(swapInstance)[2];
setupTool.swapInstance(clusterName, oldInstanceName, newInstanceName);
}
// set/get/remove config options
else if (cmd.hasOption(setConfig)) {
String values[] = cmd.getOptionValues(setConfig);
ConfigScopeProperty type = ConfigScopeProperty.valueOf(values[0]);
String scopeArgs = values[1];
String keyValueMap = values[2];
setupTool.setConfig(type, scopeArgs, keyValueMap);
} else if (cmd.hasOption(getConfig)) {
String values[] = cmd.getOptionValues(getConfig);
ConfigScopeProperty type = ConfigScopeProperty.valueOf(values[0]);
String scopeArgs = values[1];
String keys = values[2];
setupTool.getConfig(type, scopeArgs, keys);
} else if (cmd.hasOption(removeConfig)) {
String values[] = cmd.getOptionValues(removeConfig);
ConfigScopeProperty type = ConfigScopeProperty.valueOf(values[0]);
String scoepArgs = values[1];
String keys = values[2];
setupTool.removeConfig(type, scoepArgs, keys);
}
// set/get/remove constraint options
else if (cmd.hasOption(setConstraint)) {
String values[] = cmd.getOptionValues(setConstraint);
String clusterName = values[0];
String constraintType = values[1];
String constraintId = values[2];
String constraintAttributesMap = values[3];
setupTool.setConstraint(clusterName, constraintType, constraintId, constraintAttributesMap);
} else if (cmd.hasOption(getConstraints)) {
String values[] = cmd.getOptionValues(getConstraints);
String clusterName = values[0];
String constraintType = values[1];
setupTool.getConstraints(clusterName, constraintType);
} else if (cmd.hasOption(removeConstraint)) {
String values[] = cmd.getOptionValues(removeConstraint);
String clusterName = values[0];
String constraintType = values[1];
String constraintId = values[2];
setupTool.removeConstraint(clusterName, constraintType, constraintId);
} else if (cmd.hasOption(addInstanceTag)) {
String clusterName = cmd.getOptionValues(addInstanceTag)[0];
String instanceName = cmd.getOptionValues(addInstanceTag)[1];
String tag = cmd.getOptionValues(addInstanceTag)[2];
setupTool.getClusterManagementTool().addInstanceTag(clusterName, instanceName, tag);
} else if (cmd.hasOption(removeInstanceTag)) {
String clusterName = cmd.getOptionValues(removeInstanceTag)[0];
String instanceName = cmd.getOptionValues(removeInstanceTag)[1];
String tag = cmd.getOptionValues(removeInstanceTag)[2];
setupTool.getClusterManagementTool().removeInstanceTag(clusterName, instanceName, tag);
}
// help option
else if (cmd.hasOption(help)) {
printUsage(cliOptions);
return 0;
} else if (cmd.hasOption(addResourceProperty)) {
String clusterName = cmd.getOptionValues(addResourceProperty)[0];
String resourceName = cmd.getOptionValues(addResourceProperty)[1];
String propertyKey = cmd.getOptionValues(addResourceProperty)[2];
String propertyVal = cmd.getOptionValues(addResourceProperty)[3];
setupTool.addResourceProperty(clusterName, resourceName, propertyKey, propertyVal);
return 0;
} else if (cmd.hasOption(removeResourceProperty)) {
String clusterName = cmd.getOptionValues(removeResourceProperty)[0];
String resourceName = cmd.getOptionValues(removeResourceProperty)[1];
String propertyKey = cmd.getOptionValues(removeResourceProperty)[2];
setupTool.removeResourceProperty(clusterName, resourceName, propertyKey);
return 0;
} else if (cmd.hasOption(setCloudConfig)) {
String clusterName = cmd.getOptionValues(setCloudConfig)[0];
String cloudConfigManifest = cmd.getOptionValues(setCloudConfig)[1];
setupTool.setCloudConfig(clusterName, cloudConfigManifest);
return 0;
} else if (cmd.hasOption(removeCloudConfig)) {
String clusterName = cmd.getOptionValues(removeCloudConfig)[0];
setupTool.removeCloudConfig(clusterName);
return 0;
}
return 0;
}
// TODO: remove this. has moved to ZkHelixAdmin
public void addIdealState(String clusterName, String resourceName, String idealStateFile)
throws IOException {
ZNRecord idealStateRecord =
(ZNRecord) (new ZNRecordSerializer().deserialize(readFile(idealStateFile)));
if (idealStateRecord.getId() == null || !idealStateRecord.getId().equals(resourceName)) {
throw new IllegalArgumentException("ideal state must have same id as resource name");
}
_admin.setResourceIdealState(clusterName, resourceName, new IdealState(idealStateRecord));
}
public void addResourceProperty(String clusterName, String resourceName, String propertyKey,
String propertyVal) {
IdealState idealState = _admin.getResourceIdealState(clusterName, resourceName);
if (idealState == null) {
throw new HelixException("Resource: " + resourceName + " has NOT been added yet");
}
idealState.getRecord().setSimpleField(propertyKey, propertyVal);
_admin.setResourceIdealState(clusterName, resourceName, idealState);
}
public void removeResourceProperty(String clusterName, String resourceName, String propertyKey) {
IdealState idealState = _admin.getResourceIdealState(clusterName, resourceName);
if (idealState == null) {
throw new HelixException("Resource: " + resourceName + " has NOT been added yet");
}
idealState.getRecord().getSimpleFields().remove(propertyKey);
_admin.setResourceIdealState(clusterName, resourceName, idealState);
}
/**
* @param args
* @throws Exception
*/
public static void main(String[] args) throws Exception {
if (args.length == 1 && args[0].equals("setup-test-cluster")) {
System.out
.println("By default setting up TestCluster with 6 instances, 10 partitions, Each partition will have 3 replicas");
new ClusterSetup("localhost:2181").setupTestCluster("TestCluster");
System.exit(0);
}
int ret = processCommandLineArgs(args);
System.exit(ret);
}
public static class Builder extends GenericZkHelixApiBuilder<Builder> {
public Builder() {
}
public ClusterSetup build() {
validate();
return new ClusterSetup(
createZkClient(_realmMode, _realmAwareZkConnectionConfig, _realmAwareZkClientConfig,
_zkAddress), false);
}
}
}
| 9,953 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/ClusterLiveNodesVerifier.java | package org.apache.helix.tools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collections;
import java.util.List;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
/**
* Please use the class is in tools.ClusterVerifiers.
*/
@Deprecated
public class ClusterLiveNodesVerifier extends ClusterVerifier {
final List<String> _expectSortedLiveNodes; // always sorted
public ClusterLiveNodesVerifier(HelixZkClient zkclient, String clusterName,
List<String> expectLiveNodes) {
super(zkclient, clusterName);
_expectSortedLiveNodes = expectLiveNodes;
Collections.sort(_expectSortedLiveNodes);
}
@Override
public boolean verify() throws Exception {
List<String> actualLiveNodes = _accessor.getChildNames(_keyBuilder.liveInstances());
Collections.sort(actualLiveNodes);
return _expectSortedLiveNodes.equals(actualLiveNodes);
}
}
| 9,954 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/RUSHrHash.java | package org.apache.helix.tools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Random;
import java.util.zip.CRC32;
public class RUSHrHash {
/**
* @var int holds the value for how many replicas to create for an object
*/
protected int replicationDegree = 1;
/**
* an array of hash maps where each hash map holds info on the sub cluster
* that corresponds to the array indices meaning that array element 0 holds
* data for server 0
* that is the total number of nodes in the cluster this property is populated
* at construction time only
* @var
*/
protected HashMap[] clusters;
/**
* an array of hash maps where each element holds data for a sub cluster
*/
protected HashMap[] clusterConfig;
/**
* total number of sub-clusters in our data configuration this property is
* populated at construction time only
* @var integer
*/
protected int totalClusters = 0;
/**
* the total number of nodes in all of the subClusters this property is
* populated at construction time only
* @var integer
*/
protected int totalNodes = 0;
/**
* the total number of nodes in all of the clusters this property is populated
* at construction time only
* @var integer
*/
protected int totalNodesW = 0;
/**
* an array of HashMaps where each HashMap holds the data for a single node
*/
protected HashMap[] nodes = null;
/**
* @var integer value used to help seed the random number generator
*/
protected final int SEED_PARAM = 1560;
/**
* random number generator
*/
Random ran = new Random();
/**
* maximum value we can have from the ran generator
*/
float ranMax = (float) Math.pow(2.0, 16.0);
/**
* The constructor analyzes the passed config to obtain the fundamental values
* and data structures for locating a node. Each of those values is described
* in detail above with each property. briefly:
* this.clusters this.totalClusters this.totalNodes
* The values above are derived from the HashMap[] oonfig passed to the
* locator.
* @param conf
* dataConfig
* @throws Exception
*/
public RUSHrHash(HashMap<String, Object> conf) throws Exception {
clusterConfig = (HashMap[]) conf.get("subClusters");
replicationDegree = (Integer) conf.get("replicationDegree");
HashMap[] subClusters = (HashMap[]) conf.get("subClusters");
totalClusters = subClusters.length;
clusters = new HashMap[totalClusters];
// check the confg for all of the params
// throw a exception if they are not there
if (totalClusters <= 0) {
throw new Exception(
"data config to the RUSHr locator does not contain a valid clusters property");
}
int nodeCt = 0;
HashMap[] nodeData = null;
ArrayList<HashMap> tempNodes = new ArrayList<HashMap>();
HashMap<String, Object> subCluster = null, clusterData = null;
Integer clusterDataList[] = null;
for (int i = 0; i < totalClusters; i++) {
subCluster = subClusters[i];
nodeData = (HashMap[]) subCluster.get("nodes");
nodeCt = nodeData.length;
clusterDataList = new Integer[nodeCt];
for (int n = 0; n < nodeCt; n++) {
tempNodes.add(nodeData[n]);
clusterDataList[n] = n;
}
totalNodes += nodeCt;
totalNodesW += nodeCt * (Integer) subCluster.get("weight");
clusterData = new HashMap<String, Object>();
clusterData.put("count", nodeCt);
clusterData.put("list", clusterDataList);
clusters[i] = clusterData;
}
nodes = new HashMap[totalNodes];
tempNodes.toArray(nodes);
}
/**
* This function is an implementation of a RUSHr algorithm as described by R J
* Honicky and Ethan Miller
* @param objKey
* @throws Exception
* @return
*/
public ArrayList<HashMap> findNode(long objKey) throws Exception {
HashMap[] c = this.clusters;
int sumRemainingNodes = this.totalNodes;
int sumRemainingNodesW = this.totalNodesW;
int repDeg = this.replicationDegree;
int totClu = this.totalClusters;
int totNod = this.totalNodes;
HashMap[] clusConfig = this.clusterConfig;
// throw an exception if the data is no good
if ((totNod <= 0) || (totClu <= 0)) {
throw new Exception("the total nodes or total clusters is negative or 0. bad joo joos!");
}
// get the starting cluster
int currentCluster = totClu - 1;
/**
* this loop is an implementation of the RUSHr algorithm for fast placement
* and location of objects in a distributed storage system
* j = current cluster m = disks in current cluster n = remaining nodes
*/
ArrayList<HashMap> nodeData = new ArrayList<HashMap>();
while (true) {
// prevent an infinite loop, in case there is a bug
if (currentCluster < 0) {
throw new Exception(
"the cluster index became negative while we were looking for the following id: objKey. This should never happen with any key. There is a bug or maybe your joo joos are BAD!");
}
HashMap clusterData = clusConfig[currentCluster];
Integer weight = (Integer) clusterData.get("weight");
Integer disksInCurrentCluster = (Integer) c[currentCluster].get("count");
sumRemainingNodes -= disksInCurrentCluster;
Integer disksInCurrentClusterW = disksInCurrentCluster * weight;
sumRemainingNodesW -= disksInCurrentClusterW;
// set the seed to our set id
long seed = objKey + currentCluster;
ran.setSeed(seed);
int t = (repDeg - sumRemainingNodes) > 0 ? (repDeg - sumRemainingNodes) : 0;
int u =
t
+ drawWHG(repDeg - t, disksInCurrentClusterW - t, disksInCurrentClusterW
+ sumRemainingNodesW - t, weight);
if (u > 0) {
if (u > disksInCurrentCluster) {
u = disksInCurrentCluster;
}
ran.setSeed(objKey + currentCluster + SEED_PARAM);
choose(u, currentCluster, sumRemainingNodes, nodeData);
reset(u, currentCluster);
repDeg -= u;
}
if (repDeg == 0) {
break;
}
currentCluster--;
}
return nodeData;
}
/**
* This function is an implementation of a RUSH algorithm as described by R J
* Honicky and Ethan Miller
* @param objKey
* - an int used as the prng seed. this int is usually derived from a
* string hash
* @return node - holds three values: abs_node - an int which is the absolute
* position of the located node in relation to all nodes on all
* subClusters rel_node - an int which is the relative postion located
* node within the located cluster cluster - an int which is the
* located cluster
* @throws Exception
*/
public ArrayList<HashMap> findNode(String objKey) throws Exception {
// turn a string identifier into an integer for the random seed
CRC32 crc32 = new CRC32();
byte[] bytes = objKey.getBytes();
crc32.update(bytes);
long crc32Value = crc32.getValue();
long objKeyLong = (crc32Value >> 16) & 0x7fff;
return findNode(objKeyLong);
}
public void reset(int nodesToRetrieve, int currentCluster) {
Integer[] list = (Integer[]) clusters[currentCluster].get("list");
Integer count = (Integer) clusters[currentCluster].get("count");
int listIdx;
int val;
for (int nodeIdx = 0; nodeIdx < nodesToRetrieve; nodeIdx++) {
listIdx = count - nodesToRetrieve + nodeIdx;
val = list[listIdx];
if (val < (count - nodesToRetrieve)) {
list[val] = val;
}
list[listIdx] = listIdx;
}
}
public void choose(int nodesToRetrieve, int currentCluster, int remainingNodes,
ArrayList<HashMap> nodeData) {
Integer[] list = (Integer[]) clusters[currentCluster].get("list");
Integer count = (Integer) clusters[currentCluster].get("count");
int maxIdx;
int randNode;
int chosen;
for (int nodeIdx = 0; nodeIdx < nodesToRetrieve; nodeIdx++) {
maxIdx = count - nodeIdx - 1;
randNode = ran.nextInt(maxIdx + 1);
// swap
chosen = list[randNode];
list[randNode] = list[maxIdx];
list[maxIdx] = chosen;
// add the remaining nodes so we can find the node data when we are done
nodeData.add(nodes[remainingNodes + chosen]);
}
}
/**
* @param objKey
* @return
* @throws com.targetnode.data.locator.Exception
*/
public ArrayList<HashMap> findNodes(String objKey) throws Exception {
return findNode(objKey);
}
public int getReplicationDegree() {
return replicationDegree;
}
public int getTotalNodes() {
return totalNodes;
}
public int drawWHG(int replicas, int disksInCurrentCluster, int totalDisks, int weight) {
int found = 0;
float z;
float prob;
int ranInt;
for (int i = 0; i < replicas; i++) {
if (totalDisks != 0) {
ranInt = ran.nextInt((int) (ranMax + 1));
z = ((float) ranInt / ranMax);
prob = ((float) disksInCurrentCluster / (float) totalDisks);
if (z <= prob) {
found++;
disksInCurrentCluster -= weight;
}
totalDisks -= weight;
}
}
return found;
}
}
| 9,955 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/TestExecutor.java | package org.apache.helix.tools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import org.apache.helix.HelixManager;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.manager.zk.ZNRecordSerializer;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.impl.factory.SharedZkClientFactory;
import org.apache.helix.store.PropertyJsonComparator;
import org.apache.helix.store.PropertyJsonSerializer;
import org.apache.helix.store.PropertyStoreException;
import org.apache.helix.tools.TestCommand.CommandType;
import org.apache.helix.zookeeper.zkclient.exception.ZkBadVersionException;
import org.apache.helix.zookeeper.zkclient.exception.ZkNodeExistsException;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* a test is structured logically as a list of commands a command has three parts: COMMAND
* | TRIGGER | ARG'S COMMAND could be: modify, verify, start, stop
* TRIGGER is optional and consists of start-time, timeout, and expect-value which means
* the COMMAND is triggered between [start-time, start-time + timeout] and is triggered
* when the value in concern equals to expect-value
* ARG's format depends on COMMAND if COMMAND is modify/verify, arg is in form of:
* <znode-path, property-type (SIMPLE, LIST, or MAP), operation(+, -, ==, !=), key,
* update-value> in which key is k1 for SIMPLE, k1|index for LIST, and k1|k2 for MAP field
* if COMMAND is start/stop, arg is a thread handler
*/
public class TestExecutor {
/**
* SIMPLE: simple field change LIST: list field change MAP: map field change ZNODE:
* entire znode change
*/
public enum ZnodePropertyType {
SIMPLE,
LIST,
MAP,
ZNODE
}
private enum ZnodeModValueType {
INVALID,
SINGLE_VALUE,
LIST_VALUE,
MAP_VALUE,
ZNODE_VALUE
}
private static Logger logger = LoggerFactory.getLogger(TestExecutor.class);
private static final long SLEEP_TIME = 500; // in
// ms
private final static PropertyJsonComparator<String> STRING_COMPARATOR =
new PropertyJsonComparator<String>(String.class);
private final static PropertyJsonSerializer<ZNRecord> ZNRECORD_SERIALIZER =
new PropertyJsonSerializer<ZNRecord>(ZNRecord.class);
private static ZnodeModValueType getValueType(ZnodePropertyType type, String key) {
ZnodeModValueType valueType = ZnodeModValueType.INVALID;
switch (type) {
case SIMPLE:
if (key == null) {
logger.warn("invalid key for simple field: key is null");
} else {
String keyParts[] = key.split("/");
if (keyParts.length != 1) {
logger.warn("invalid key for simple field: " + key + ", expect 1 part: key1 (no slash)");
} else {
valueType = ZnodeModValueType.SINGLE_VALUE;
}
}
break;
case LIST:
if (key == null) {
logger.warn("invalid key for simple field: key is null");
} else {
String keyParts[] = key.split("/");
if (keyParts.length < 1 || keyParts.length > 2) {
logger.warn("invalid key for list field: " + key
+ ", expect 1 or 2 parts: key1 or key1/index)");
} else if (keyParts.length == 1) {
valueType = ZnodeModValueType.LIST_VALUE;
} else {
try {
int index = Integer.parseInt(keyParts[1]);
if (index < 0) {
logger.warn("invalid key for list field: " + key + ", index < 0");
} else {
valueType = ZnodeModValueType.SINGLE_VALUE;
}
} catch (NumberFormatException e) {
logger.warn("invalid key for list field: " + key + ", part-2 is NOT an integer");
}
}
}
break;
case MAP:
if (key == null) {
logger.warn("invalid key for simple field: key is null");
} else {
String keyParts[] = key.split("/");
if (keyParts.length < 1 || keyParts.length > 2) {
logger.warn("invalid key for map field: " + key
+ ", expect 1 or 2 parts: key1 or key1/key2)");
} else if (keyParts.length == 1) {
valueType = ZnodeModValueType.MAP_VALUE;
} else {
valueType = ZnodeModValueType.SINGLE_VALUE;
}
}
break;
case ZNODE:
valueType = ZnodeModValueType.ZNODE_VALUE;
break;
default:
break;
}
return valueType;
}
private static String getSingleValue(ZNRecord record, ZnodePropertyType type, String key) {
if (record == null || key == null) {
return null;
}
String value = null;
String keyParts[] = key.split("/");
switch (type) {
case SIMPLE:
value = record.getSimpleField(key);
break;
case LIST:
List<String> list = record.getListField(keyParts[0]);
if (list == null) {
logger.warn("invalid key for list field: " + key + ", map for key part-1 doesn't exist");
return null;
}
int idx = Integer.parseInt(keyParts[1]);
value = list.get(idx);
break;
case MAP:
Map<String, String> map = record.getMapField(keyParts[0]);
if (map == null) {
logger.warn("invalid key for map field: " + key + ", map for key part-1 doesn't exist");
return null;
}
value = map.get(keyParts[1]);
break;
default:
break;
}
return value;
}
private static List<String> getListValue(ZNRecord record, String key) {
if (record == null) {
return null;
}
return record.getListField(key);
}
private static Map<String, String> getMapValue(ZNRecord record, String key) {
return record.getMapField(key);
}
// comparator's for single/list/map-value
private static boolean compareSingleValue(String actual, String expect, String key, ZNRecord diff) {
boolean ret = (STRING_COMPARATOR.compare(actual, expect) == 0);
if (diff != null) {
diff.setSimpleField(key + "/expect", expect);
diff.setSimpleField(key + "/actual", actual);
}
return ret;
}
private static boolean compareListValue(List<String> actualList, List<String> expectList,
String key, ZNRecord diff) {
boolean ret = true;
if (actualList == null && expectList == null) {
ret = true;
} else if (actualList == null && expectList != null) {
ret = false;
if (diff != null) {
diff.setListField(key + "/expect", expectList);
}
} else if (actualList != null && expectList == null) {
ret = false;
if (diff != null) {
diff.setListField(key + "/actual", actualList);
}
} else {
Iterator<String> itrActual = actualList.iterator();
Iterator<String> itrExpect = expectList.iterator();
if (diff != null && diff.getListField(key + "/expect") == null) {
diff.setListField(key + "/expect", new ArrayList<String>());
}
if (diff != null && diff.getListField(key + "/actual") == null) {
diff.setListField(key + "/actual", new ArrayList<String>());
}
while (itrActual.hasNext() && itrExpect.hasNext()) {
String actual = itrActual.next();
String expect = itrExpect.next();
if (STRING_COMPARATOR.compare(actual, expect) != 0) {
ret = false;
if (diff != null) {
diff.getListField(key + "/expect").add(expect);
diff.getListField(key + "/actual").add(actual);
}
}
}
while (itrActual.hasNext()) {
String actual = itrActual.next();
if (diff != null) {
diff.getListField(key + "/actual").add(actual);
}
}
while (itrExpect.hasNext()) {
String expect = itrExpect.next();
if (diff != null) {
diff.getListField(key + "/expect").add(expect);
}
}
}
return ret;
}
private static void setMapField(ZNRecord record, String key1, String key2, String value) {
if (record.getMapField(key1) == null) {
record.setMapField(key1, new TreeMap<String, String>());
}
record.getMapField(key1).put(key2, value);
}
private static boolean compareMapValue(Map<String, String> actualMap,
Map<String, String> expectMap, String mapKey, ZNRecord diff) {
boolean ret = true;
if (actualMap == null && expectMap == null) {
ret = true;
} else if (actualMap == null && expectMap != null) {
ret = false;
if (diff != null) {
diff.setMapField(mapKey + "/expect", expectMap);
}
} else if (actualMap != null && expectMap == null) {
ret = false;
if (diff != null) {
diff.setMapField(mapKey + "/actual", actualMap);
}
} else {
for (String key : actualMap.keySet()) {
String actual = actualMap.get(key);
if (!expectMap.containsKey(key)) {
ret = false;
if (diff != null) {
setMapField(diff, mapKey + "/actual", key, actual);
}
} else {
String expect = expectMap.get(key);
if (STRING_COMPARATOR.compare(actual, expect) != 0) {
ret = false;
if (diff != null) {
setMapField(diff, mapKey + "/actual", key, actual);
setMapField(diff, mapKey + "/expect", key, expect);
}
}
}
}
for (String key : expectMap.keySet()) {
String expect = expectMap.get(key);
if (!actualMap.containsKey(key)) {
ret = false;
if (diff != null) {
setMapField(diff, mapKey + "/expect", key, expect);
}
} else {
String actual = actualMap.get(key);
if (STRING_COMPARATOR.compare(actual, expect) != 0) {
ret = false;
if (diff != null) {
setMapField(diff, mapKey + "/actual", key, actual);
setMapField(diff, mapKey + "/expect", key, expect);
}
}
}
}
}
return ret;
}
private static void setZNRecord(ZNRecord diff, ZNRecord record, String keySuffix) {
if (diff == null || record == null) {
return;
}
for (String key : record.getSimpleFields().keySet()) {
diff.setSimpleField(key + "/" + keySuffix, record.getSimpleField(key));
}
for (String key : record.getListFields().keySet()) {
diff.setListField(key + "/" + keySuffix, record.getListField(key));
}
for (String key : record.getMapFields().keySet()) {
diff.setMapField(key + "/" + keySuffix, record.getMapField(key));
}
}
private static boolean compareZnodeValue(ZNRecord actual, ZNRecord expect, ZNRecord diff) {
boolean ret = true;
if (actual == null && expect == null) {
ret = true;
} else if (actual == null && expect != null) {
ret = false;
if (diff != null) {
setZNRecord(diff, expect, "expect");
}
} else if (actual != null && expect == null) {
ret = false;
if (diff != null) {
setZNRecord(diff, actual, "actual");
}
} else {
for (String key : actual.getSimpleFields().keySet()) {
if (compareSingleValue(actual.getSimpleField(key), expect.getSimpleField(key), key, diff) == false) {
ret = false;
}
}
for (String key : expect.getMapFields().keySet()) {
if (!actual.getMapFields().containsKey(key)) {
if (diff != null) {
ret = false;
diff.setMapField(key + "/expect", expect.getMapField(key));
}
} else {
if (compareMapValue(actual.getMapField(key), expect.getMapField(key), key, diff) == false) {
ret = false;
}
}
}
for (String key : actual.getMapFields().keySet()) {
if (!expect.getMapFields().containsKey(key)) {
if (diff != null) {
ret = false;
diff.setMapField(key + "/actual", actual.getMapField(key));
}
} else {
if (compareMapValue(actual.getMapField(key), expect.getMapField(key), key, diff) == false) {
ret = false;
}
}
}
}
return ret;
}
private static void resetZNRecord(ZNRecord record) {
if (record != null) {
record.getSimpleFields().clear();
record.getListFields().clear();
record.getMapFields().clear();
}
}
private static boolean isValueExpected(ZNRecord current, ZnodePropertyType type, String key,
ZnodeValue expect, ZNRecord diff) {
// expect value = null means not expect any value
if (expect == null) {
return true;
}
boolean result = false;
resetZNRecord(diff);
ZnodeModValueType valueType = getValueType(type, key);
switch (valueType) {
case SINGLE_VALUE:
String singleValue = getSingleValue(current, type, key);
result = compareSingleValue(singleValue, expect._singleValue, key, diff);
break;
case LIST_VALUE:
List<String> listValue = getListValue(current, key);
result = compareListValue(listValue, expect._listValue, key, diff);
break;
case MAP_VALUE:
Map<String, String> mapValue = getMapValue(current, key);
result = compareMapValue(mapValue, expect._mapValue, key, diff);
break;
case ZNODE_VALUE:
result = compareZnodeValue(current, expect._znodeValue, diff);
break;
case INVALID:
break;
default:
break;
}
return result;
}
private static void setSingleValue(ZNRecord record, ZnodePropertyType type, String key,
String value) {
String keyParts[] = key.split("/");
switch (type) {
case SIMPLE:
record.setSimpleField(key, value);
break;
case LIST:
List<String> list = record.getListField(keyParts[0]);
if (list == null) {
logger.warn("invalid key for list field: " + key + ", value for key part-1 doesn't exist");
return;
}
int idx = Integer.parseInt(keyParts[1]);
list.remove(idx);
list.add(idx, value);
break;
case MAP:
Map<String, String> map = record.getMapField(keyParts[0]);
if (map == null) {
logger.warn("invalid key for map field: " + key + ", value for key part-1 doesn't exist");
return;
}
map.put(keyParts[1], value);
break;
default:
break;
}
}
private static void setListValue(ZNRecord record, String key, List<String> value) {
record.setListField(key, value);
}
private static void setMapValue(ZNRecord record, String key, Map<String, String> value) {
record.setMapField(key, value);
}
private static void removeSingleValue(ZNRecord record, ZnodePropertyType type, String key) {
if (record == null) {
return;
}
String keyParts[] = key.split("/");
switch (type) {
case SIMPLE:
record.getSimpleFields().remove(key);
break;
case LIST:
List<String> list = record.getListField(keyParts[0]);
if (list == null) {
logger.warn("invalid key for list field: " + key + ", value for key part-1 doesn't exist");
return;
}
int idx = Integer.parseInt(keyParts[1]);
list.remove(idx);
break;
case MAP:
Map<String, String> map = record.getMapField(keyParts[0]);
if (map == null) {
logger.warn("invalid key for map field: " + key + ", value for key part-1 doesn't exist");
return;
}
map.remove(keyParts[1]);
break;
default:
break;
}
}
private static void removeListValue(ZNRecord record, String key) {
if (record == null || record.getListFields() == null) {
record.getListFields().remove(key);
}
}
private static void removeMapValue(ZNRecord record, String key) {
record.getMapFields().remove(key);
}
private static boolean executeVerifier(ZNRecord actual, TestCommand command, ZNRecord diff) {
final ZnodeOpArg arg = command._znodeOpArg;
final ZnodeValue expectValue = command._trigger._expectValue;
boolean result = isValueExpected(actual, arg._propertyType, arg._key, expectValue, diff);
String operation = arg._operation;
if (operation.equals("!=")) {
result = !result;
} else if (!operation.equals("==")) {
logger.warn("fail to execute (unsupport operation=" + operation + "):" + operation);
result = false;
}
return result;
}
private static boolean compareAndSetZnode(ZnodeValue expect, ZnodeOpArg arg, HelixZkClient zkClient,
ZNRecord diff) {
String path = arg._znodePath;
ZnodePropertyType type = arg._propertyType;
String key = arg._key;
boolean success = true;
// retry 3 times in case there are write conflicts
long backoffTime = 20; // ms
for (int i = 0; i < 3; i++) {
try {
Stat stat = new Stat();
ZNRecord record = zkClient.<ZNRecord> readDataAndStat(path, stat, true);
if (isValueExpected(record, type, key, expect, diff)) {
if (arg._operation.compareTo("+") == 0) {
if (record == null) {
record = new ZNRecord("default");
}
ZnodeModValueType valueType = getValueType(arg._propertyType, arg._key);
switch (valueType) {
case SINGLE_VALUE:
setSingleValue(record, arg._propertyType, arg._key, arg._updateValue._singleValue);
break;
case LIST_VALUE:
setListValue(record, arg._key, arg._updateValue._listValue);
break;
case MAP_VALUE:
setMapValue(record, arg._key, arg._updateValue._mapValue);
break;
case ZNODE_VALUE:
// deep copy
record =
ZNRECORD_SERIALIZER.deserialize(ZNRECORD_SERIALIZER
.serialize(arg._updateValue._znodeValue));
break;
case INVALID:
break;
default:
break;
}
} else if (arg._operation.compareTo("-") == 0) {
ZnodeModValueType valueType = getValueType(arg._propertyType, arg._key);
switch (valueType) {
case SINGLE_VALUE:
removeSingleValue(record, arg._propertyType, arg._key);
break;
case LIST_VALUE:
removeListValue(record, arg._key);
break;
case MAP_VALUE:
removeMapValue(record, arg._key);
break;
case ZNODE_VALUE:
record = null;
break;
case INVALID:
break;
default:
break;
}
} else {
logger.warn("fail to execute (unsupport operation): " + arg._operation);
success = false;
}
if (success == true) {
if (record == null) {
zkClient.delete(path);
} else {
try {
zkClient.createPersistent(path, true);
} catch (ZkNodeExistsException e) {
// OK
}
zkClient.writeData(path, record, stat.getVersion());
}
return true;
} else {
return false;
}
}
} catch (ZkBadVersionException e) {
// e.printStackTrace();
} catch (PropertyStoreException e) {
// e.printStackTrace();
}
try {
Thread.sleep(backoffTime);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
backoffTime *= 2;
}
return false;
}
private static class ExecuteCommand implements Runnable {
private final TestCommand _command;
private final long _startTime;
private final HelixZkClient _zkClient;
private final CountDownLatch _countDown;
private final Map<TestCommand, Boolean> _testResults;
public ExecuteCommand(long startTime, TestCommand command, CountDownLatch countDown,
HelixZkClient zkClient, Map<TestCommand, Boolean> testResults) {
_startTime = startTime;
_command = command;
_countDown = countDown;
_zkClient = zkClient;
_testResults = testResults;
}
@Override
public void run() {
boolean result = false;
long now = System.currentTimeMillis();
final long timeout = now + _command._trigger._timeout;
ZNRecord diff = new ZNRecord("diff");
try {
if (now < _startTime) {
Thread.sleep(_startTime - now);
}
do {
if (_command._commandType == CommandType.MODIFY) {
ZnodeOpArg arg = _command._znodeOpArg;
final ZnodeValue expectValue = _command._trigger._expectValue;
result = compareAndSetZnode(expectValue, arg, _zkClient, diff);
// logger.error("result:" + result + ", " + _command);
if (result == true) {
_command._finishTimestamp = System.currentTimeMillis();
_testResults.put(_command, true);
break;
} else {
// logger.error("result:" + result + ", diff:" + diff);
}
} else if (_command._commandType == CommandType.VERIFY) {
ZnodeOpArg arg = _command._znodeOpArg;
final String znodePath = arg._znodePath;
ZNRecord record = _zkClient.<ZNRecord> readData(znodePath, true);
result = executeVerifier(record, _command, diff);
// logger.error("result:" + result + ", " + _command.toString());
if (result == true) {
_command._finishTimestamp = System.currentTimeMillis();
_testResults.put(_command, true);
break;
} else {
// logger.error("result:" + result + ", diff:" + diff);
}
} else if (_command._commandType == CommandType.START) {
// TODO add data trigger for START command
Thread thread = _command._nodeOpArg._thread;
thread.start();
result = true;
_command._finishTimestamp = System.currentTimeMillis();
logger.info("result:" + result + ", " + _command.toString());
_testResults.put(_command, true);
break;
} else if (_command._commandType == CommandType.STOP) {
// TODO add data trigger for STOP command
HelixManager manager = _command._nodeOpArg._manager;
manager.disconnect();
Thread thread = _command._nodeOpArg._thread;
thread.interrupt();
// System.err.println("stop " +
// _command._nodeOpArg._manager.getInstanceName());
result = true;
_command._finishTimestamp = System.currentTimeMillis();
logger.info("result:" + result + ", " + _command.toString());
_testResults.put(_command, true);
break;
} else {
throw new IllegalArgumentException("Unsupport command type (was "
+ _command._commandType + ")");
}
Thread.sleep(SLEEP_TIME);
now = System.currentTimeMillis();
} while (now <= timeout);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
} finally {
if (result == false) {
_command._finishTimestamp = System.currentTimeMillis();
logger.error("result:" + result + ", diff: " + diff);
}
_countDown.countDown();
if (_countDown.getCount() == 0) {
if (_zkClient != null && !_zkClient.isClosed()) {
_zkClient.close();
}
}
}
}
}
private static Map<TestCommand, Boolean> executeTestHelper(List<TestCommand> commandList,
String zkAddr, CountDownLatch countDown) {
final Map<TestCommand, Boolean> testResults = new ConcurrentHashMap<TestCommand, Boolean>();
HelixZkClient.ZkClientConfig clientConfig = new HelixZkClient.ZkClientConfig();
clientConfig.setZkSerializer(new ZNRecordSerializer());
HelixZkClient zkClient = SharedZkClientFactory
.getInstance().buildZkClient(new HelixZkClient.ZkConnectionConfig(zkAddr), clientConfig);
// sort on trigger's start time, stable sort
Collections.sort(commandList, new Comparator<TestCommand>() {
@Override
public int compare(TestCommand o1, TestCommand o2) {
return (int) (o1._trigger._startTime - o2._trigger._startTime);
}
});
for (TestCommand command : commandList) {
testResults.put(command, new Boolean(false));
TestTrigger trigger = command._trigger;
command._startTimestamp = System.currentTimeMillis() + trigger._startTime;
new Thread(new ExecuteCommand(command._startTimestamp, command, countDown, zkClient,
testResults)).start();
}
return testResults;
}
public static void executeTestAsync(List<TestCommand> commandList, String zkAddr)
throws InterruptedException {
CountDownLatch countDown = new CountDownLatch(commandList.size());
executeTestHelper(commandList, zkAddr, countDown);
}
public static Map<TestCommand, Boolean> executeTest(List<TestCommand> commandList, String zkAddr)
throws InterruptedException {
final CountDownLatch countDown = new CountDownLatch(commandList.size());
Map<TestCommand, Boolean> testResults = executeTestHelper(commandList, zkAddr, countDown);
// TODO add timeout
countDown.await();
return testResults;
}
}
| 9,956 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/MessagePoster.java | package org.apache.helix.tools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.UUID;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.manager.zk.ZNRecordSerializer;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.impl.factory.SharedZkClientFactory;
import org.apache.helix.model.LiveInstance.LiveInstanceProperty;
import org.apache.helix.model.Message;
import org.apache.helix.model.Message.MessageState;
import org.apache.helix.model.Message.MessageType;
public class MessagePoster {
public void post(String zkServer, Message message, String clusterName, String instanceName) {
HelixZkClient client = SharedZkClientFactory.getInstance().buildZkClient(new HelixZkClient.ZkConnectionConfig(
zkServer));
try {
client.setZkSerializer(new ZNRecordSerializer());
String path = PropertyPathBuilder.instanceMessage(clusterName, instanceName, message.getId());
client.delete(path);
ZNRecord record = client.readData(PropertyPathBuilder.liveInstance(clusterName, instanceName));
message.setTgtSessionId(record.getSimpleField(LiveInstanceProperty.SESSION_ID.toString()));
message.setTgtName(record.getId());
// System.out.println(message);
client.createPersistent(path, message.getRecord());
} finally {
client.close();
}
}
public void postFaultInjectionMessage(String zkServer, String clusterName, String instanceName,
String payloadString, String partition) {
Message message = new Message("FaultInjection", UUID.randomUUID().toString());
if (payloadString != null) {
message.getRecord().setSimpleField("faultType", payloadString);
}
if (partition != null) {
message.setPartitionName(partition);
}
post(zkServer, message, clusterName, instanceName);
}
public void postTestMessage(String zkServer, String clusterName, String instanceName) {
String msgSrc = "cm-instance-0";
String msgId = "TestMessageId-2";
Message message = new Message(MessageType.STATE_TRANSITION, msgId);
message.setMsgId(msgId);
message.setSrcName(msgSrc);
message.setTgtName(instanceName);
message.setMsgState(MessageState.NEW);
message.setFromState("Slave");
message.setToState("Master");
message.setPartitionName("EspressoDB.partition-0." + instanceName);
post(zkServer, message, clusterName, instanceName);
}
public static void main(String[] args) {
if (args.length < 4 || args.length > 6) {
System.err.println("Usage: java " + MessagePoster.class.getName()
+ " zkServer cluster instance msgType [payloadString] [partition]");
System.err.println("msgType can be one of test, fault");
System.err.println("payloadString is sent along with the fault msgType");
System.exit(1);
}
String zkServer = args[0];
String cluster = args[1];
String instance = args[2];
String msgType = args[3];
String payloadString = (args.length >= 5 ? args[4] : null);
String partition = (args.length == 6 ? args[5] : null);
MessagePoster messagePoster = new MessagePoster();
if (msgType.equals("test")) {
messagePoster.postTestMessage(zkServer, cluster, instance);
} else if (msgType.equals("fault")) {
messagePoster
.postFaultInjectionMessage(zkServer, cluster, instance, payloadString, partition);
System.out.println("Posted " + msgType);
} else {
System.err.println("Message was not posted. Unknown msgType:" + msgType);
}
}
}
| 9,957 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifier.java | package org.apache.helix.tools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.PropertyKey;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.api.listeners.PreFetch;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.zkclient.IZkChildListener;
import org.apache.helix.zookeeper.zkclient.IZkDataListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Please use implementations of HelixClusterVerifier (BestPossibleExternalViewVerifier, StrictMatchExternalViewVerifier, etc in tools.ClusterVerifiers).
*/
@Deprecated
public abstract class ClusterVerifier implements IZkChildListener, IZkDataListener {
private static Logger LOG = LoggerFactory.getLogger(ClusterVerifier.class);
protected final HelixZkClient _zkclient;
protected final String _clusterName;
protected final HelixDataAccessor _accessor;
protected final PropertyKey.Builder _keyBuilder;
private CountDownLatch _countdown;
static class ClusterVerifyTrigger {
final PropertyKey _triggerKey;
final boolean _triggerOnChildDataChange;
public ClusterVerifyTrigger(PropertyKey triggerKey, boolean triggerOnChildDataChange) {
_triggerKey = triggerKey;
_triggerOnChildDataChange = triggerOnChildDataChange;
}
}
public ClusterVerifier(HelixZkClient zkclient, String clusterName) {
_zkclient = zkclient;
_clusterName = clusterName;
_accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(zkclient));
_keyBuilder = _accessor.keyBuilder();
}
public boolean verifyByCallback(long timeout, List<ClusterVerifyTrigger> triggers) {
_countdown = new CountDownLatch(1);
for (ClusterVerifyTrigger trigger : triggers) {
String path = trigger._triggerKey.getPath();
_zkclient.subscribeChildChanges(path, this);
if (trigger._triggerOnChildDataChange) {
List<String> childs = _zkclient.getChildren(path);
for (String child : childs) {
String childPath = String.format("%s/%s", path, child);
_zkclient.subscribeDataChanges(childPath, this);
}
}
}
boolean success = false;
try {
success = verify();
if (!success) {
success = _countdown.await(timeout, TimeUnit.MILLISECONDS);
if (!success) {
// make a final try if timeout
success = verify();
}
}
} catch (Exception e) {
LOG.error("Exception in verifier", e);
}
// clean up
_zkclient.unsubscribeAll();
return success;
}
@Override
@PreFetch(enabled = false)
public void handleDataChange(String dataPath, Object data) throws Exception {
boolean success = verify();
if (success) {
_countdown.countDown();
}
}
@Override
public void handleDataDeleted(String dataPath) throws Exception {
_zkclient.unsubscribeDataChanges(dataPath, this);
}
@Override
public void handleChildChange(String parentPath, List<String> currentChilds) throws Exception {
for (String child : currentChilds) {
String childPath = String.format("%s/%s", parentPath, child);
_zkclient.subscribeDataChanges(childPath, this);
}
boolean success = verify();
if (success) {
_countdown.countDown();
}
}
public boolean verifyByPolling(long timeout) {
try {
long start = System.currentTimeMillis();
boolean success;
do {
success = verify();
if (success) {
return true;
}
TimeUnit.MILLISECONDS.sleep(500);
} while ((System.currentTimeMillis() - start) <= timeout);
} catch (Exception e) {
LOG.error("Exception in verifier", e);
}
return false;
}
/**
* verify
* @return
* @throws Exception
*/
public abstract boolean verify() throws Exception;
}
| 9,958 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/IdealStateCalculatorByShuffling.java | package org.apache.helix.tools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.TreeMap;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.model.IdealState.IdealStateProperty;
/*
* Ideal state calculator for the cluster manager V1. The ideal state is
* calculated by randomly assign master partitions to storage nodes.
*
* Note that the following code is a native strategy and is for cluster manager V1 only. We will
* use the other algorithm to calculate the ideal state in future milestones.
*
*
* */
public class IdealStateCalculatorByShuffling {
/*
* Given the number of nodes, partitions and replica number, calculate the
* ideal state in the following manner: For the master partition assignment,
* 1. construct Arraylist partitionList, with partitionList[i] = i; 2. Shuffle
* the partitions array 3. Scan the shuffled array, then assign
* partitionList[i] to node (i % nodes)
* for the slave partitions, simply put them in the node after the node that
* contains the master partition.
* The result of the method is a ZNRecord, which contains a list of maps; each
* map is from the name of nodes to either "MASTER" or "SLAVE".
*/
public static ZNRecord calculateIdealState(List<String> instanceNames, int partitions,
int replicas, String resourceName, long randomSeed) {
return calculateIdealState(instanceNames, partitions, replicas, resourceName, randomSeed,
"MASTER", "SLAVE");
}
public static ZNRecord calculateIdealState(List<String> instanceNames, int partitions,
int replicas, String resourceName, long randomSeed, String masterValue, String slaveValue) {
if (instanceNames.size() <= replicas) {
throw new IllegalArgumentException("Replicas must be less than number of storage nodes");
}
Collections.sort(instanceNames);
ZNRecord result = new ZNRecord(resourceName);
List<Integer> partitionList = new ArrayList<Integer>(partitions);
for (int i = 0; i < partitions; i++) {
partitionList.add(new Integer(i));
}
Random rand = new Random(randomSeed);
// Shuffle the partition list
Collections.shuffle(partitionList, rand);
for (int i = 0; i < partitionList.size(); i++) {
int partitionId = partitionList.get(i);
Map<String, String> partitionAssignment = new TreeMap<String, String>();
int masterNode = i % instanceNames.size();
// the first in the list is the node that contains the master
partitionAssignment.put(instanceNames.get(masterNode), masterValue);
// for the jth replica, we put it on (masterNode + j) % nodes-th
// node
for (int j = 1; j <= replicas; j++) {
int index = (masterNode + j * partitionList.size()) % instanceNames.size();
while (partitionAssignment.keySet().contains(instanceNames.get(index))) {
index = (index + 1) % instanceNames.size();
}
partitionAssignment.put(instanceNames.get(index), slaveValue);
}
String partitionName = resourceName + "_" + partitionId;
result.setMapField(partitionName, partitionAssignment);
}
result.setSimpleField(IdealStateProperty.NUM_PARTITIONS.toString(), String.valueOf(partitions));
return result;
}
public static ZNRecord calculateIdealState(List<String> instanceNames, int partitions,
int replicas, String resourceName) {
long randomSeed = 888997632;
// seed is a constant, so that the shuffle always give same result
return calculateIdealState(instanceNames, partitions, replicas, resourceName, randomSeed);
}
}
| 9,959 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/ClusterExternalViewVerifier.java | package org.apache.helix.tools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.pipeline.Stage;
import org.apache.helix.controller.pipeline.StageContext;
import org.apache.helix.controller.stages.AttributeName;
import org.apache.helix.controller.stages.BestPossibleStateCalcStage;
import org.apache.helix.controller.stages.BestPossibleStateOutput;
import org.apache.helix.controller.stages.ClusterEvent;
import org.apache.helix.controller.stages.ClusterEventType;
import org.apache.helix.controller.stages.CurrentStateComputationStage;
import org.apache.helix.controller.stages.ResourceComputationStage;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.Partition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* given zk, cluster, and a list of expected live-instances
* check whether cluster's external-view reaches best-possible states
*/
/**
* This class is deprecated, please use BestPossibleExternalViewVerifier in tools.ClusterVerifiers instead.
*/
@Deprecated
public class ClusterExternalViewVerifier extends ClusterVerifier {
private static Logger LOG = LoggerFactory.getLogger(ClusterExternalViewVerifier.class);
final List<String> _expectSortedLiveNodes; // always sorted
public ClusterExternalViewVerifier(HelixZkClient zkclient, String clusterName,
List<String> expectLiveNodes) {
super(zkclient, clusterName);
_expectSortedLiveNodes = expectLiveNodes;
Collections.sort(_expectSortedLiveNodes);
}
boolean verifyLiveNodes(List<String> actualLiveNodes) {
Collections.sort(actualLiveNodes);
return _expectSortedLiveNodes.equals(actualLiveNodes);
}
/**
* @param externalView
* @param bestPossibleState map of partition to map of instance to state
* @return
*/
boolean verifyExternalView(ExternalView externalView,
Map<Partition, Map<String, String>> bestPossibleState) {
Map<String, Map<String, String>> bestPossibleStateMap =
convertBestPossibleState(bestPossibleState);
// trimBestPossibleState(bestPossibleStateMap);
Map<String, Map<String, String>> externalViewMap = externalView.getRecord().getMapFields();
return externalViewMap.equals(bestPossibleStateMap);
}
static void runStage(ClusterEvent event, Stage stage) throws Exception {
StageContext context = new StageContext();
stage.init(context);
stage.preProcess();
stage.process(event);
stage.postProcess();
}
BestPossibleStateOutput calculateBestPossibleState(ResourceControllerDataProvider cache)
throws Exception {
ClusterEvent event = new ClusterEvent(ClusterEventType.StateVerifier);
event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
List<Stage> stages = new ArrayList<Stage>();
stages.add(new ResourceComputationStage());
stages.add(new CurrentStateComputationStage());
stages.add(new BestPossibleStateCalcStage());
for (Stage stage : stages) {
runStage(event, stage);
}
return event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.name());
}
/**
* remove empty map and DROPPED state from best possible state
* @param bestPossibleState
*/
// static void trimBestPossibleState(Map<String, Map<String, String>> bestPossibleState) {
// Iterator<Entry<String, Map<String, String>>> iter = bestPossibleState.entrySet().iterator();
// while (iter.hasNext()) {
// Map.Entry<String, Map<String, String>> entry = iter.next();
// Map<String, String> instanceStateMap = entry.getValue();
// if (instanceStateMap.isEmpty()) {
// iter.remove();
// } else {
// // remove instances with DROPPED state
// Iterator<Map.Entry<String, String>> insIter = instanceStateMap.entrySet().iterator();
// while (insIter.hasNext()) {
// Map.Entry<String, String> insEntry = insIter.next();
// String state = insEntry.getValue();
// if (state.equalsIgnoreCase(HelixDefinedState.DROPPED.toString())) {
// insIter.remove();
// }
// }
// }
// }
// }
static Map<String, Map<String, String>> convertBestPossibleState(
Map<Partition, Map<String, String>> bestPossibleState) {
Map<String, Map<String, String>> result = new HashMap<String, Map<String, String>>();
for (Partition partition : bestPossibleState.keySet()) {
result.put(partition.getPartitionName(), bestPossibleState.get(partition));
}
return result;
}
@Override
public boolean verify() throws Exception {
ResourceControllerDataProvider cache = new ResourceControllerDataProvider();
cache.refresh(_accessor);
List<String> liveInstances = new ArrayList<String>();
liveInstances.addAll(cache.getLiveInstances().keySet());
boolean success = verifyLiveNodes(liveInstances);
if (!success) {
LOG.info("liveNodes not match, expect: " + _expectSortedLiveNodes + ", actual: "
+ liveInstances);
return false;
}
BestPossibleStateOutput bestPossbileStates = calculateBestPossibleState(cache);
Map<String, ExternalView> externalViews =
_accessor.getChildValuesMap(_keyBuilder.externalViews(), true);
// TODO all ideal-states should be included in external-views
for (String resourceName : externalViews.keySet()) {
ExternalView externalView = externalViews.get(resourceName);
Map<Partition, Map<String, String>> bestPossbileState =
bestPossbileStates.getResourceMap(resourceName);
success = verifyExternalView(externalView, bestPossbileState);
if (!success) {
LOG.info("external-view for resource: " + resourceName + " not match");
return false;
}
}
return true;
}
}
| 9,960 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/ClusterStateVerifier.java | package org.apache.helix.tools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import com.google.common.collect.Sets;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixDefinedState;
import org.apache.helix.PropertyKey;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.PropertyPathBuilder;
import org.apache.helix.api.listeners.PreFetch;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.pipeline.Stage;
import org.apache.helix.controller.pipeline.StageContext;
import org.apache.helix.controller.stages.AttributeName;
import org.apache.helix.controller.stages.BestPossibleStateCalcStage;
import org.apache.helix.controller.stages.BestPossibleStateOutput;
import org.apache.helix.controller.stages.ClusterEvent;
import org.apache.helix.controller.stages.ClusterEventType;
import org.apache.helix.controller.stages.CurrentStateComputationStage;
import org.apache.helix.controller.stages.ResourceComputationStage;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZNRecordSerializer;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.Partition;
import org.apache.helix.model.Resource;
import org.apache.helix.task.TaskConstants;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.apache.helix.zookeeper.impl.factory.DedicatedZkClientFactory;
import org.apache.helix.zookeeper.zkclient.IZkChildListener;
import org.apache.helix.zookeeper.zkclient.IZkDataListener;
import org.apache.helix.zookeeper.zkclient.exception.ZkNodeExistsException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is deprecated, please use dedicated verifier classes, such as
* BestPossibleExternalViewVerifier, etc, in tools.ClusterVerifiers.
*/
@Deprecated
public class ClusterStateVerifier {
public static String cluster = "cluster";
public static String zkServerAddress = "zkSvr";
public static String help = "help";
public static String timeout = "timeout";
public static String period = "period";
public static String resources = "resources";
private static Logger LOG = LoggerFactory.getLogger(ClusterStateVerifier.class);
public interface Verifier {
boolean verify();
}
public interface ZkVerifier extends Verifier {
ZkClient getZkClient();
String getClusterName();
}
/** Use BestPossibleExternViewVerifier instead */
@Deprecated
static class ExtViewVeriferZkListener implements IZkChildListener, IZkDataListener {
final CountDownLatch _countDown;
final HelixZkClient _zkClient;
final Verifier _verifier;
public ExtViewVeriferZkListener(CountDownLatch countDown, HelixZkClient zkClient,
ZkVerifier verifier) {
_countDown = countDown;
_zkClient = zkClient;
_verifier = verifier;
}
@Override
@PreFetch(enabled = false)
public void handleDataChange(String dataPath, Object data) throws Exception {
boolean result = _verifier.verify();
if (result == true) {
_countDown.countDown();
}
}
@Override
public void handleDataDeleted(String dataPath) throws Exception {
// TODO Auto-generated method stub
}
@Override
public void handleChildChange(String parentPath, List<String> currentChilds) throws Exception {
for (String child : currentChilds) {
String childPath = parentPath.equals("/") ? parentPath + child : parentPath + "/" + child;
_zkClient.subscribeDataChanges(childPath, this);
}
boolean result = _verifier.verify();
if (result == true) {
_countDown.countDown();
}
}
}
private static HelixZkClient validateAndGetClient(String zkAddr, String clusterName) {
if (zkAddr == null || clusterName == null) {
throw new IllegalArgumentException("requires zkAddr|clusterName");
}
HelixZkClient.ZkClientConfig clientConfig = new HelixZkClient.ZkClientConfig();
clientConfig.setZkSerializer(new ZNRecordSerializer());
return DedicatedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(zkAddr), clientConfig);
}
@Deprecated
public static class BestPossAndExtViewZkVerifier implements ZkVerifier {
private final String clusterName;
private final Map<String, Map<String, String>> errStates;
private final HelixZkClient zkClient;
private final Set<String> resources;
public BestPossAndExtViewZkVerifier(String zkAddr, String clusterName) {
this(zkAddr, clusterName, null);
}
public BestPossAndExtViewZkVerifier(String zkAddr, String clusterName,
Map<String, Map<String, String>> errStates) {
this(zkAddr, clusterName, errStates, null);
}
public BestPossAndExtViewZkVerifier(String zkAddr, String clusterName,
Map<String, Map<String, String>> errStates, Set<String> resources) {
this(validateAndGetClient(zkAddr, clusterName), clusterName, errStates, resources);
}
private BestPossAndExtViewZkVerifier(HelixZkClient zkClient, String clusterName,
Map<String, Map<String, String>> errStates, Set<String> resources) {
if (zkClient == null || clusterName == null) {
throw new IllegalArgumentException("requires zkClient|clusterName");
}
this.clusterName = clusterName;
this.errStates = errStates;
this.zkClient = zkClient;
this.resources = resources;
}
public void close() {
if (zkClient != null) {
zkClient.close();
}
}
@Override
public boolean verify() {
try {
HelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(zkClient));
return verifyBestPossAndExtView(accessor, errStates, clusterName, resources);
} catch (Exception e) {
LOG.error("exception in verification", e);
}
return false;
}
private boolean verifyBestPossAndExtView(HelixDataAccessor accessor,
Map<String, Map<String, String>> errStates, String clusterName, Set<String> resources) {
try {
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
// read cluster once and do verification
ResourceControllerDataProvider cache = new ResourceControllerDataProvider(clusterName);
cache.refresh(accessor);
Map<String, IdealState> idealStates = new HashMap<>(cache.getIdealStates());
// filter out all resources that use Task state model
Iterator<Map.Entry<String, IdealState>> it = idealStates.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<String, IdealState> pair = it.next();
if (pair.getValue().getStateModelDefRef().equals(TaskConstants.STATE_MODEL_NAME)) {
it.remove();
}
}
Map<String, ExternalView> extViews =
accessor.getChildValuesMap(keyBuilder.externalViews(), true);
if (extViews == null) {
extViews = Collections.emptyMap();
}
// Filter resources if requested
if (resources != null && !resources.isEmpty()) {
idealStates.keySet().retainAll(resources);
extViews.keySet().retainAll(resources);
}
// if externalView is not empty and idealState doesn't exist
// add empty idealState for the resource
for (String resource : extViews.keySet()) {
if (!idealStates.containsKey(resource)) {
idealStates.put(resource, new IdealState(resource));
}
}
// calculate best possible state
BestPossibleStateOutput bestPossOutput = calcBestPossState(cache, resources);
Map<String, Map<Partition, Map<String, String>>> bestPossStateMap =
bestPossOutput.getStateMap();
// set error states
if (errStates != null) {
for (String resourceName : errStates.keySet()) {
Map<String, String> partErrStates = errStates.get(resourceName);
for (String partitionName : partErrStates.keySet()) {
String instanceName = partErrStates.get(partitionName);
if (!bestPossStateMap.containsKey(resourceName)) {
bestPossStateMap.put(resourceName, new HashMap<Partition, Map<String, String>>());
}
Partition partition = new Partition(partitionName);
if (!bestPossStateMap.get(resourceName).containsKey(partition)) {
bestPossStateMap.get(resourceName).put(partition, new HashMap<String, String>());
}
bestPossStateMap.get(resourceName).get(partition)
.put(instanceName, HelixDefinedState.ERROR.toString());
}
}
}
// System.out.println("stateMap: " + bestPossStateMap);
for (String resourceName : idealStates.keySet()) {
ExternalView extView = extViews.get(resourceName);
if (extView == null) {
IdealState is = idealStates.get(resourceName);
if (is.isExternalViewDisabled()) {
continue;
} else {
LOG.info("externalView for " + resourceName + " is not available");
return false;
}
}
// step 0: remove empty map and DROPPED state from best possible state
Map<Partition, Map<String, String>> bpStateMap =
bestPossOutput.getResourceMap(resourceName);
Iterator<Map.Entry<Partition, Map<String, String>>> iter = bpStateMap.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry<Partition, Map<String, String>> entry = iter.next();
Map<String, String> instanceStateMap = entry.getValue();
if (instanceStateMap.isEmpty()) {
iter.remove();
} else {
// remove instances with DROPPED state
Iterator<Map.Entry<String, String>> insIter = instanceStateMap.entrySet().iterator();
while (insIter.hasNext()) {
Map.Entry<String, String> insEntry = insIter.next();
String state = insEntry.getValue();
if (state.equalsIgnoreCase(HelixDefinedState.DROPPED.toString())) {
insIter.remove();
}
}
}
}
// System.err.println("resource: " + resourceName + ", bpStateMap: " + bpStateMap);
// step 1: externalView and bestPossibleState has equal size
int extViewSize = extView.getRecord().getMapFields().size();
int bestPossStateSize = bestPossOutput.getResourceMap(resourceName).size();
if (extViewSize != bestPossStateSize) {
LOG.info("exterView size (" + extViewSize + ") is different from bestPossState size ("
+ bestPossStateSize + ") for resource: " + resourceName);
// System.err.println("exterView size (" + extViewSize
// + ") is different from bestPossState size (" + bestPossStateSize
// + ") for resource: " + resourceName);
// System.out.println("extView: " + extView.getRecord().getMapFields());
// System.out.println("bestPossState: " +
// bestPossOutput.getResourceMap(resourceName));
return false;
}
// step 2: every entry in external view is contained in best possible state
for (String partition : extView.getRecord().getMapFields().keySet()) {
Map<String, String> evInstanceStateMap = extView.getRecord().getMapField(partition);
Map<String, String> bpInstanceStateMap =
bestPossOutput.getInstanceStateMap(resourceName, new Partition(partition));
boolean result = compareMap(evInstanceStateMap, bpInstanceStateMap);
if (result == false) {
LOG.info("externalView is different from bestPossibleState for partition:" + partition);
// System.err.println("externalView is different from bestPossibleState for partition: "
// + partition + ", actual: " + evInstanceStateMap + ", bestPoss: " +
// bpInstanceStateMap);
return false;
}
}
}
return true;
} catch (Exception e) {
LOG.error("exception in verification", e);
return false;
}
}
/**
* calculate the best possible state note that DROPPED states are not checked since when
* kick off the BestPossibleStateCalcStage we are providing an empty current state map
*
* @param cache
* @return
* @throws Exception
*/
private BestPossibleStateOutput calcBestPossState(ResourceControllerDataProvider cache,
Set<String> resources) throws Exception {
ClusterEvent event = new ClusterEvent(ClusterEventType.StateVerifier);
event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
ResourceComputationStage rcState = new ResourceComputationStage();
CurrentStateComputationStage csStage = new CurrentStateComputationStage();
BestPossibleStateCalcStage bpStage = new BestPossibleStateCalcStage();
runStage(event, rcState);
// Filter resources if specified
if (resources != null) {
Map<String, Resource> resourceMap =
event.getAttribute(AttributeName.RESOURCES_TO_REBALANCE.name());
resourceMap.keySet().retainAll(resources);
}
runStage(event, csStage);
runStage(event, bpStage);
BestPossibleStateOutput output =
event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.name());
// System.out.println("output:" + output);
return output;
}
private void runStage(ClusterEvent event, Stage stage) throws Exception {
StageContext context = new StageContext();
stage.init(context);
stage.preProcess();
stage.process(event);
stage.postProcess();
}
private <K, V> boolean compareMap(Map<K, V> map1, Map<K, V> map2) {
boolean isEqual = true;
if (map1 == null && map2 == null) {
// OK
} else if (map1 == null && map2 != null) {
if (!map2.isEmpty()) {
isEqual = false;
}
} else if (map1 != null && map2 == null) {
if (!map1.isEmpty()) {
isEqual = false;
}
} else {
// verify size
if (map1.size() != map2.size()) {
isEqual = false;
}
// verify each <key, value> in map1 is contained in map2
for (K key : map1.keySet()) {
if (!map1.get(key).equals(map2.get(key))) {
LOG.debug(
"different value for key: " + key + "(map1: " + map1.get(key) + ", map2: " + map2
.get(key) + ")");
isEqual = false;
break;
}
}
}
return isEqual;
}
@Override
public ZkClient getZkClient() {
return (ZkClient) zkClient;
}
@Override
public String getClusterName() {
return clusterName;
}
@Override
public String toString() {
String verifierName = getClass().getName();
verifierName = verifierName.substring(verifierName.lastIndexOf('.') + 1, verifierName.length());
return verifierName + "(" + clusterName + "@" + zkClient.getServers() + ")";
}
@Override
public void finalize() {
if (zkClient != null) {
zkClient.close();
}
}
}
@Deprecated
public static class MasterNbInExtViewVerifier implements ZkVerifier {
private final String clusterName;
private final HelixZkClient zkClient;
public MasterNbInExtViewVerifier(String zkAddr, String clusterName) {
this(validateAndGetClient(zkAddr, clusterName), clusterName);
}
private MasterNbInExtViewVerifier(HelixZkClient zkClient, String clusterName) {
if (zkClient == null || clusterName == null) {
throw new IllegalArgumentException("requires zkClient|clusterName");
}
this.clusterName = clusterName;
this.zkClient = zkClient;
}
@Override
public boolean verify() {
try {
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(zkClient));
return verifyMasterNbInExtView(accessor);
} catch (Exception e) {
LOG.error("exception in verification", e);
}
return false;
}
@Override
public ZkClient getZkClient() {
return (ZkClient) zkClient;
}
@Override
public String getClusterName() {
return clusterName;
}
private boolean verifyMasterNbInExtView(HelixDataAccessor accessor) {
Builder keyBuilder = accessor.keyBuilder();
Map<String, IdealState> idealStates =
accessor.getChildValuesMap(keyBuilder.idealStates(), true);
if (idealStates == null || idealStates.size() == 0) {
LOG.info("No resource idealState");
return true;
}
Map<String, ExternalView> extViews =
accessor.getChildValuesMap(keyBuilder.externalViews(), true);
if (extViews == null || extViews.size() < idealStates.size()) {
LOG.info("No externalViews | externalView.size() < idealState.size()");
return false;
}
for (String resource : extViews.keySet()) {
int partitions = idealStates.get(resource).getNumPartitions();
Map<String, Map<String, String>> instanceStateMap =
extViews.get(resource).getRecord().getMapFields();
if (instanceStateMap.size() < partitions) {
LOG.info("Number of externalViews (" + instanceStateMap.size() + ") < partitions ("
+ partitions + ")");
return false;
}
for (String partition : instanceStateMap.keySet()) {
boolean foundMaster = false;
for (String instance : instanceStateMap.get(partition).keySet()) {
if (instanceStateMap.get(partition).get(instance).equalsIgnoreCase("MASTER")) {
foundMaster = true;
break;
}
}
if (!foundMaster) {
LOG.info("No MASTER for partition: " + partition);
return false;
}
}
}
return true;
}
@Override
public void finalize() {
if (zkClient != null) {
zkClient.close();
}
}
}
public static boolean verifyByPolling(Verifier verifier) {
return verifyByPolling(verifier, 30 * 1000);
}
public static boolean verifyByPolling(Verifier verifier, long timeout) {
return verifyByPolling(verifier, timeout, 1000);
}
public static boolean verifyByPolling(Verifier verifier, long timeout, long period) {
long startTime = System.currentTimeMillis();
boolean result = false;
try {
long curTime;
do {
Thread.sleep(period);
result = verifier.verify();
if (result == true) {
break;
}
curTime = System.currentTimeMillis();
} while (curTime <= startTime + timeout);
return result;
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
} finally {
long endTime = System.currentTimeMillis();
// debug
System.err.println(result + ": " + verifier + ": wait " + (endTime - startTime)
+ "ms to verify");
}
return false;
}
public static boolean verifyByZkCallback(ZkVerifier verifier) {
return verifyByZkCallback(verifier, 30000);
}
/**
* This function should be always single threaded
*
* @param verifier
* @param timeout
* @return
*/
public static boolean verifyByZkCallback(ZkVerifier verifier, long timeout) {
long startTime = System.currentTimeMillis();
CountDownLatch countDown = new CountDownLatch(1);
HelixZkClient zkClient = verifier.getZkClient();
String clusterName = verifier.getClusterName();
// add an ephemeral node to /{clusterName}/CONFIGS/CLUSTER/verify
// so when analyze zk log, we know when a test ends
try {
zkClient.createEphemeral("/" + clusterName + "/CONFIGS/CLUSTER/verify");
} catch (ZkNodeExistsException ex) {
LOG.error("There is already a verification in progress", ex);
throw ex;
}
ExtViewVeriferZkListener listener = new ExtViewVeriferZkListener(countDown, zkClient, verifier);
String extViewPath = PropertyPathBuilder.externalView(clusterName);
zkClient.subscribeChildChanges(extViewPath, listener);
for (String child : zkClient.getChildren(extViewPath)) {
String childPath = extViewPath.equals("/") ? extViewPath + child : extViewPath + "/" + child;
zkClient.subscribeDataChanges(childPath, listener);
}
// do initial verify
boolean result = verifier.verify();
if (result == false) {
try {
result = countDown.await(timeout, TimeUnit.MILLISECONDS);
if (result == false) {
// make a final try if timeout
result = verifier.verify();
}
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
// clean up
zkClient.unsubscribeChildChanges(extViewPath, listener);
for (String child : zkClient.getChildren(extViewPath)) {
String childPath = extViewPath.equals("/") ? extViewPath + child : extViewPath + "/" + child;
zkClient.unsubscribeDataChanges(childPath, listener);
}
long endTime = System.currentTimeMillis();
zkClient.delete("/" + clusterName + "/CONFIGS/CLUSTER/verify");
// debug
System.err.println(result + ": wait " + (endTime - startTime) + "ms, " + verifier);
return result;
}
@SuppressWarnings("static-access")
private static Options constructCommandLineOptions() {
Option helpOption =
OptionBuilder.withLongOpt(help).withDescription("Prints command-line options info")
.create();
Option zkServerOption =
OptionBuilder.withLongOpt(zkServerAddress).withDescription("Provide zookeeper address")
.create();
zkServerOption.setArgs(1);
zkServerOption.setRequired(true);
zkServerOption.setArgName("ZookeeperServerAddress(Required)");
Option clusterOption =
OptionBuilder.withLongOpt(cluster).withDescription("Provide cluster name").create();
clusterOption.setArgs(1);
clusterOption.setRequired(true);
clusterOption.setArgName("Cluster name (Required)");
Option timeoutOption =
OptionBuilder.withLongOpt(timeout).withDescription("Timeout value for verification")
.create();
timeoutOption.setArgs(1);
timeoutOption.setArgName("Timeout value (Optional), default=30s");
Option sleepIntervalOption =
OptionBuilder.withLongOpt(period).withDescription("Polling period for verification")
.create();
sleepIntervalOption.setArgs(1);
sleepIntervalOption.setArgName("Polling period value (Optional), default=1s");
Option resourcesOption =
OptionBuilder.withLongOpt(resources).withDescription("Specific set of resources to verify")
.create();
resourcesOption.setArgs(1);
resourcesOption.setArgName("Comma-separated resource names, default is all resources");
Options options = new Options();
options.addOption(helpOption);
options.addOption(zkServerOption);
options.addOption(clusterOption);
options.addOption(timeoutOption);
options.addOption(sleepIntervalOption);
options.addOption(resourcesOption);
return options;
}
public static void printUsage(Options cliOptions) {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.setWidth(1000);
helpFormatter.printHelp("java " + ClusterSetup.class.getName(), cliOptions);
}
public static CommandLine processCommandLineArgs(String[] cliArgs) {
CommandLineParser cliParser = new GnuParser();
Options cliOptions = constructCommandLineOptions();
// CommandLine cmd = null;
try {
return cliParser.parse(cliOptions, cliArgs);
} catch (ParseException pe) {
System.err.println("CommandLineClient: failed to parse command-line options: "
+ pe.toString());
printUsage(cliOptions);
System.exit(1);
}
return null;
}
public static boolean verifyState(String[] args) {
// TODO Auto-generated method stub
String clusterName = "storage-cluster";
String zkServer = "localhost:2181";
long timeoutValue = 0;
long periodValue = 1000;
Set<String> resourceSet = null;
if (args.length > 0) {
CommandLine cmd = processCommandLineArgs(args);
zkServer = cmd.getOptionValue(zkServerAddress);
clusterName = cmd.getOptionValue(cluster);
String timeoutStr = cmd.getOptionValue(timeout);
String periodStr = cmd.getOptionValue(period);
String resourceStr = cmd.getOptionValue(resources);
if (timeoutStr != null) {
try {
timeoutValue = Long.parseLong(timeoutStr);
} catch (Exception e) {
System.err.println("Exception in converting " + timeoutStr + " to long. Use default (0)");
}
}
if (periodStr != null) {
try {
periodValue = Long.parseLong(periodStr);
} catch (Exception e) {
System.err.println("Exception in converting " + periodStr
+ " to long. Use default (1000)");
}
}
// Allow specifying resources explicitly
if (resourceStr != null) {
String[] resources = resourceStr.split("[\\s,]");
resourceSet = Sets.newHashSet(resources);
}
}
// return verifyByPolling(new BestPossAndExtViewZkVerifier(zkServer, clusterName),
// timeoutValue,
// periodValue);
ZkVerifier verifier;
if (resourceSet == null) {
verifier = new BestPossAndExtViewZkVerifier(zkServer, clusterName);
} else {
verifier = new BestPossAndExtViewZkVerifier(zkServer, clusterName, null, resourceSet);
}
return verifyByZkCallback(verifier, timeoutValue);
}
public static void main(String[] args) {
boolean result = verifyState(args);
System.out.println(result ? "Successful" : "failed");
System.exit(1);
}
}
| 9,961 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/ZnodeOpArg.java | package org.apache.helix.tools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import java.util.Map;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.tools.TestExecutor.ZnodePropertyType;
public class ZnodeOpArg {
public String _znodePath;
public ZnodePropertyType _propertyType;
/**
* "+" for update/create if not exist
* '-' for remove
* "==" for test equals
* "!=" for test not equal
*/
public String _operation;
public String _key;
public ZnodeValue _updateValue;
public ZnodeOpArg() {
}
/**
* verify simple/list/map field: no update value
* @param znodePath
* @param type
* @param op
* @param key
*/
public ZnodeOpArg(String znodePath, ZnodePropertyType type, String op, String key) {
this(znodePath, type, op, key, new ZnodeValue());
}
/**
* verify znode: no update value
* @param znodePath
* @param type
* @param op
*/
public ZnodeOpArg(String znodePath, ZnodePropertyType type, String op) {
this(znodePath, type, op, null, new ZnodeValue());
}
/**
* simple field change
* @param znodePath
* @param type
* @param op
* @param key
* @param update
*/
public ZnodeOpArg(String znodePath, ZnodePropertyType type, String op, String key, String update) {
this(znodePath, type, op, key, new ZnodeValue(update));
}
/**
* list field change
* @param znodePath
* @param type
* @param op
* @param key
* @param update
*/
public ZnodeOpArg(String znodePath, ZnodePropertyType type, String op, String key,
List<String> update) {
this(znodePath, type, op, key, new ZnodeValue(update));
}
/**
* map field change
* @param znodePath
* @param type
* @param op
* @param key
* @param update
*/
public ZnodeOpArg(String znodePath, ZnodePropertyType type, String op, String key,
Map<String, String> update) {
this(znodePath, type, op, key, new ZnodeValue(update));
}
/**
* znode change
* @param znodePath
* @param type
* @param op
* @param key
* @param update
*/
public ZnodeOpArg(String znodePath, ZnodePropertyType type, String op, ZNRecord update) {
this(znodePath, type, op, null, new ZnodeValue(update));
}
/**
* @param znodePath
* @param type
* @param op
* @param key
* @param update
*/
public ZnodeOpArg(String znodePath, ZnodePropertyType type, String op, String key,
ZnodeValue update) {
_znodePath = znodePath;
_propertyType = type;
_operation = op;
_key = key;
_updateValue = update;
}
@Override
public String toString() {
String ret =
"={\"" + _znodePath + "\", " + _propertyType + "/" + _key + " " + _operation + " "
+ _updateValue + "}";
return ret;
}
// TODO temp test; remove it
/*
* public static void main(String[] args)
* {
* // null modification command
* ZnodeOpArg command = new ZnodeOpArg();
* System.out.println(command);
* // simple modification command
* command = new ZnodeOpArg("/testPath", ZnodePropertyType.SIMPLE, "+", "key1", "simpleValue1");
* System.out.println(command);
* // list modification command
* List<String> list = new ArrayList<String>();
* list.add("listValue1");
* list.add("listValue2");
* command = new ZnodeOpArg("/testPath", ZnodePropertyType.LIST, "+", "key1", list);
* System.out.println(command);
* // map modification command
* Map<String, String> map = new HashMap<String, String>();
* map.put("mapKey1", "mapValue1");
* map.put("mapKey2", "mapValue2");
* command = new ZnodeOpArg("/testPath", ZnodePropertyType.MAP, "+", "key1", map);
* System.out.println(command);
* // map modification command
* ZNRecord record = new ZNRecord("znrecord");
* record.setSimpleField("key1", "simpleValue1");
* record.setListField("key1", list);
* record.setMapField("key1", map);
* command = new ZnodeOpArg("/testPath", ZnodePropertyType.ZNODE, "+", record);
* System.out.println(command);
* }
*/
}
| 9,962 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/IdealStateCalculatorByRush.java | package org.apache.helix.tools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.TreeMap;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.model.IdealState.IdealStateProperty;
public class IdealStateCalculatorByRush {
/**
* Build the config map for RUSH algorithm. The input of RUSH algorithm groups
* nodes into "cluster"s, and different clusters can be assigned with
* different weights.
* @param numClusters
* number of node clusters
* @param instancesPerCluster
* List of clusters, each contain a list of node name strings.
* @param replicationDegree
* the replication degree
* @param clusterWeights
* the weight for each node cluster
* @return this config map structure for RUSH algorithm.
*/
static HashMap<String, Object> buildRushConfig(int numClusters,
List<List<String>> instancesPerCluster, int replicationDegree, List<Integer> clusterWeights) {
HashMap<String, Object> config = new HashMap<String, Object>();
config.put("replicationDegree", replicationDegree);
HashMap[] clusterList = new HashMap[numClusters];
config.put("subClusters", clusterList);
HashMap[] nodes;
HashMap<String, String> node;
HashMap<String, Object> clusterData;
for (int n = 0; n < numClusters; n++) {
int numNodes = instancesPerCluster.get(n).size();
nodes = new HashMap[numNodes];
for (int i = 0; i < numNodes; i++) {
node = new HashMap<String, String>();
node.put("partition", instancesPerCluster.get(n).get(i));
nodes[i] = node;
}
clusterData = new HashMap<String, Object>();
clusterData.put("weight", clusterWeights.get(n));
clusterData.put("nodes", nodes);
clusterList[n] = clusterData;
}
return config;
}
/**
* Calculate the ideal state for list of instances clusters.
* @param numClusters
* number of node clusters
* @param instanceClusters
* List of clusters, each contain a list of node name strings.
* @param instanceClusterWeights
* the weight for each instance cluster
* @param partitions
* the partition number of the database
* @param replicas
* the replication degree
* @param resourceName
* the name of the database
* @return The ZNRecord that contains the ideal state
*/
public static ZNRecord calculateIdealState(List<List<String>> instanceClusters,
List<Integer> instanceClusterWeights, int partitions, int replicas, String resourceName)
throws Exception {
ZNRecord result = new ZNRecord(resourceName);
int numberOfClusters = instanceClusters.size();
List<List<String>> nodesInClusters = instanceClusters;
List<Integer> clusterWeights = instanceClusterWeights;
HashMap<String, Object> rushConfig =
buildRushConfig(numberOfClusters, nodesInClusters, replicas + 1, clusterWeights);
RUSHrHash rushHash = new RUSHrHash(rushConfig);
Random r = new Random(0);
for (int i = 0; i < partitions; i++) {
int partitionId = i;
String partitionName = resourceName + ".partition-" + partitionId;
ArrayList<HashMap> partitionAssignmentResult = rushHash.findNode(i);
List<String> nodeNames = new ArrayList<String>();
for (HashMap<?, ?> p : partitionAssignmentResult) {
for (Object key : p.keySet()) {
if (p.get(key) instanceof String) {
nodeNames.add(p.get(key).toString());
}
}
}
Map<String, String> partitionAssignment = new TreeMap<String, String>();
for (int j = 0; j < nodeNames.size(); j++) {
partitionAssignment.put(nodeNames.get(j), "SLAVE");
}
int master = r.nextInt(nodeNames.size());
// master = nodeNames.size()/2;
partitionAssignment.put(nodeNames.get(master), "MASTER");
result.setMapField(partitionName, partitionAssignment);
}
result.setSimpleField(IdealStateProperty.NUM_PARTITIONS.toString(), String.valueOf(partitions));
return result;
}
public static ZNRecord calculateIdealState(List<String> instanceClusters,
int instanceClusterWeight, int partitions, int replicas, String resourceName)
throws Exception {
List<List<String>> instanceClustersList = new ArrayList<List<String>>();
instanceClustersList.add(instanceClusters);
List<Integer> instanceClusterWeightList = new ArrayList<Integer>();
instanceClusterWeightList.add(instanceClusterWeight);
return calculateIdealState(instanceClustersList, instanceClusterWeightList, partitions,
replicas, resourceName);
}
/**
* Helper function to see how many partitions are mapped to different
* instances in two ideal states
*/
public static void printDiff(ZNRecord record1, ZNRecord record2) {
int diffCount = 0;
int diffCountMaster = 0;
for (String key : record1.getMapFields().keySet()) {
Map<String, String> map1 = record1.getMapField(key);
Map<String, String> map2 = record2.getMapField(key);
for (String k : map1.keySet()) {
if (!map2.containsKey(k)) {
diffCount++;
} else if (!map1.get(k).equalsIgnoreCase(map2.get(k))) {
diffCountMaster++;
}
}
}
System.out.println("\ndiff count = " + diffCount);
System.out.println("\nmaster diff count:" + diffCountMaster);
}
/**
* Helper function to calculate and print the standard deviation of the
* partition assignment ideal state.
*/
public static void printIdealStateStats(ZNRecord record) {
Map<String, Integer> countsMap = new TreeMap<String, Integer>();
Map<String, Integer> masterCountsMap = new TreeMap<String, Integer>();
for (String key : record.getMapFields().keySet()) {
Map<String, String> map1 = record.getMapField(key);
for (String k : map1.keySet()) {
if (!countsMap.containsKey(k)) {
countsMap.put(k, new Integer(0));
} else {
countsMap.put(k, countsMap.get(k).intValue() + 1);
}
if (!masterCountsMap.containsKey(k)) {
masterCountsMap.put(k, new Integer(0));
} else if (map1.get(k).equalsIgnoreCase("MASTER")) {
masterCountsMap.put(k, masterCountsMap.get(k).intValue() + 1);
}
}
}
double sum = 0;
int maxCount = 0;
int minCount = Integer.MAX_VALUE;
for (String k : countsMap.keySet()) {
int count = countsMap.get(k);
sum += count;
if (maxCount < count) {
maxCount = count;
}
if (minCount > count) {
minCount = count;
}
System.out.print(count + " ");
}
System.out.println("\nMax count: " + maxCount + " min count:" + minCount);
System.out.println("\n master:");
double sumMaster = 0;
int maxCountMaster = 0;
int minCountMaster = Integer.MAX_VALUE;
for (String k : masterCountsMap.keySet()) {
int count = masterCountsMap.get(k);
sumMaster += count;
if (maxCountMaster < count) {
maxCountMaster = count;
}
if (minCountMaster > count) {
minCountMaster = count;
}
System.out.print(count + " ");
}
System.out.println("\nMean master: " + 1.0 * sumMaster / countsMap.size());
System.out.println("Max master count: " + maxCountMaster + " min count:" + minCountMaster);
double mean = sum / (countsMap.size());
// calculate the deviation of the node distribution
double deviation = 0;
for (String k : countsMap.keySet()) {
double count = countsMap.get(k);
deviation += (count - mean) * (count - mean);
}
System.out.println("Mean: " + mean + " normal deviation:"
+ Math.sqrt(deviation / countsMap.size()) / mean);
// System.out.println("Max count: " + maxCount + " min count:" + minCount);
int steps = 10;
int stepLen = (maxCount - minCount) / steps;
if (stepLen == 0)
return;
List<Integer> histogram = new ArrayList<Integer>((maxCount - minCount) / stepLen + 1);
for (int i = 0; i < (maxCount - minCount) / stepLen + 1; i++) {
histogram.add(0);
}
for (String k : countsMap.keySet()) {
int count = countsMap.get(k);
int stepNo = (count - minCount) / stepLen;
histogram.set(stepNo, histogram.get(stepNo) + 1);
}
System.out.println("histogram:");
for (Integer x : histogram) {
System.out.print(x + " ");
}
}
public static void main(String args[]) throws Exception {
int partitions = 4096, replicas = 2;
String resourceName = "espressoDB1";
List<String> instanceNames = new ArrayList<String>();
List<List<String>> instanceCluster1 = new ArrayList<List<String>>();
for (int i = 0; i < 20; i++) {
instanceNames.add("local" + i + "host_123" + i);
}
instanceCluster1.add(instanceNames);
List<Integer> weights1 = new ArrayList<Integer>();
weights1.add(1);
ZNRecord result =
IdealStateCalculatorByRush.calculateIdealState(instanceCluster1, weights1, partitions,
replicas, resourceName);
printIdealStateStats(result);
List<String> instanceNames2 = new ArrayList<String>();
for (int i = 400; i < 405; i++) {
instanceNames2.add("localhost_123" + i);
}
instanceCluster1.add(instanceNames2);
weights1.add(1);
ZNRecord result2 =
IdealStateCalculatorByRush.calculateIdealState(instanceCluster1, weights1, partitions,
replicas, resourceName);
printDiff(result, result2);
printIdealStateStats(result2);
}
}
| 9,963 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/IdealStateCalculatorForEspressoRelay.java | package org.apache.helix.tools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.apache.helix.HelixException;
import org.apache.helix.model.IdealState;
public class IdealStateCalculatorForEspressoRelay {
public static IdealState calculateRelayIdealState(List<String> partitions,
List<String> instances, String resultRecordName, int replica, String firstValue,
String restValue, String stateModelName) {
Collections.sort(partitions);
Collections.sort(instances);
if (instances.size() % replica != 0) {
throw new HelixException("Instances must be divided by replica");
}
IdealState result = new IdealState(resultRecordName);
result.setNumPartitions(partitions.size());
result.setReplicas("" + replica);
result.setStateModelDefRef(stateModelName);
int groups = instances.size() / replica;
int remainder = instances.size() % replica;
int remainder2 = partitions.size() % groups;
int storageNodeGroupSize = partitions.size() / groups;
for (int i = 0; i < groups; i++) {
int relayStart = 0, relayEnd = 0, storageNodeStart = 0, storageNodeEnd = 0;
if (i < remainder) {
relayStart = (replica + 1) * i;
relayEnd = (replica + 1) * (i + 1);
} else {
relayStart = (replica + 1) * remainder + replica * (i - remainder);
relayEnd = relayStart + replica;
}
// System.out.println("relay start :" + relayStart + " relayEnd:" + relayEnd);
if (i < remainder2) {
storageNodeStart = (storageNodeGroupSize + 1) * i;
storageNodeEnd = (storageNodeGroupSize + 1) * (i + 1);
} else {
storageNodeStart =
(storageNodeGroupSize + 1) * remainder2 + storageNodeGroupSize * (i - remainder2);
storageNodeEnd = storageNodeStart + storageNodeGroupSize;
}
// System.out.println("storageNodeStart :" + storageNodeStart + " storageNodeEnd:" +
// storageNodeEnd);
List<String> snBatch = partitions.subList(storageNodeStart, storageNodeEnd);
List<String> relayBatch = instances.subList(relayStart, relayEnd);
Map<String, List<String>> sublistFields =
calculateSubIdealState(snBatch, relayBatch, replica);
result.getRecord().getListFields().putAll(sublistFields);
}
for (String snName : result.getRecord().getListFields().keySet()) {
Map<String, String> mapField = new TreeMap<String, String>();
List<String> relayCandidates = result.getRecord().getListField(snName);
mapField.put(relayCandidates.get(0), firstValue);
for (int i = 1; i < relayCandidates.size(); i++) {
mapField.put(relayCandidates.get(i), restValue);
}
result.getRecord().getMapFields().put(snName, mapField);
}
System.out.println();
return result;
}
private static Map<String, List<String>> calculateSubIdealState(List<String> snBatch,
List<String> relayBatch, int replica) {
Map<String, List<String>> result = new HashMap<String, List<String>>();
for (int i = 0; i < snBatch.size(); i++) {
String snName = snBatch.get(i);
result.put(snName, new ArrayList<String>());
for (int j = 0; j < replica; j++) {
result.get(snName).add(relayBatch.get((j + i) % (relayBatch.size())));
}
}
return result;
}
}
| 9,964 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/TestTrigger.java | package org.apache.helix.tools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import java.util.Map;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
public class TestTrigger {
public long _startTime;
public long _timeout;
public ZnodeValue _expectValue;
/**
* no time or data trigger
*/
public TestTrigger() {
this(0, 0, (ZnodeValue) null);
}
/**
* time trigger with a start time, no data trigger
* @param startTime
* @param timeout
*/
public TestTrigger(long startTime) {
this(startTime, 0, (ZnodeValue) null);
}
/**
* simple field data trigger
* @param expect
*/
public TestTrigger(long startTime, long timeout, String expect) {
this(startTime, timeout, new ZnodeValue(expect));
}
/**
* list field data trigger
* @param expect
*/
public TestTrigger(long startTime, long timeout, List<String> expect) {
this(startTime, timeout, new ZnodeValue(expect));
}
/**
* map field data trigger
* @param expect
*/
public TestTrigger(long startTime, long timeout, Map<String, String> expect) {
this(startTime, timeout, new ZnodeValue(expect));
}
/**
* znode data trigger
* @param expect
*/
public TestTrigger(long startTime, long timeout, ZNRecord expect) {
this(startTime, timeout, new ZnodeValue(expect));
}
/**
* @param startTime
* @param timeout
* @param expect
*/
public TestTrigger(long startTime, long timeout, ZnodeValue expect) {
_startTime = startTime;
_timeout = timeout;
_expectValue = expect;
}
@Override
public String toString() {
String ret = "<" + _startTime + "~" + _timeout + "ms, " + _expectValue + ">";
return ret;
}
// TODO temp test; remove it
/*
* public static void main(String[] args)
* {
* TestTrigger trigger = new TestTrigger(0, 0, "simpleValue0");
* System.out.println("trigger=" + trigger);
* List<String> list = new ArrayList<String>();
* list.add("listValue1");
* list.add("listValue2");
* trigger = new TestTrigger(0, 0, list);
* System.out.println("trigger=" + trigger);
* Map<String, String> map = new HashMap<String, String>();
* map.put("mapKey3", "mapValue3");
* map.put("mapKey4", "mapValue4");
* trigger = new TestTrigger(0, 0, map);
* System.out.println("trigger=" + trigger);
* trigger = new TestTrigger();
* System.out.println("trigger=" + trigger);
* }
*/
}
| 9,965 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/StateModelConfigGenerator.java | package org.apache.helix.tools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.manager.zk.ZNRecordSerializer;
import org.apache.helix.model.LeaderStandbySMD;
import org.apache.helix.model.MasterSlaveSMD;
import org.apache.helix.model.OnlineOfflineSMD;
import org.apache.helix.model.ScheduledTaskSMD;
import org.apache.helix.model.StorageSchemataSMD;
import org.apache.helix.model.TaskSMD;
// TODO refactor to use StateModelDefinition.Builder
@Deprecated
public class StateModelConfigGenerator {
public static void main(String[] args) {
ZNRecordSerializer serializer = new ZNRecordSerializer();
System.out.println(new String(serializer.serialize(generateConfigForMasterSlave())));
}
/**
* count -1 don't care any numeric value > 0 will be tried to be satisfied based on
* priority N all nodes in the cluster will be assigned to this state if possible R all
* remaining nodes in the preference list will be assigned to this state, applies only
* to last state
*/
@Deprecated
public static ZNRecord generateConfigForStorageSchemata() {
return StorageSchemataSMD.generateConfigForStorageSchemata();
}
@Deprecated
public static ZNRecord generateConfigForMasterSlave() {
return MasterSlaveSMD.generateConfigForMasterSlave();
}
@Deprecated
public static ZNRecord generateConfigForLeaderStandby() {
return LeaderStandbySMD.generateConfigForLeaderStandby();
}
@Deprecated
public static ZNRecord generateConfigForOnlineOffline() {
return OnlineOfflineSMD.generateConfigForOnlineOffline();
}
@Deprecated
public static ZNRecord generateConfigForScheduledTaskQueue() {
return ScheduledTaskSMD.generateConfigForScheduledTaskQueue();
}
@Deprecated
public static ZNRecord generateConfigForTaskStateModel() {
return TaskSMD.generateConfigForTaskStateModel();
}
}
| 9,966 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Helix tools classes
*
*/
package org.apache.helix.tools; | 9,967 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/HelixClusterVerifier.java | package org.apache.helix.tools.ClusterVerifiers;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
public interface HelixClusterVerifier {
/**
* Verify the cluster.
* The method will be blocked at most {@code timeout}.
* Return true if the verify succeed, otherwise return false.
*
* @param timeout in milliseconds
* @return true if succeed, false if not.
*/
boolean verify(long timeout);
/**
* Verify the cluster.
* Return true if the verify succeed, otherwise return false.
*
* @return true if succeed, false if not.
*/
boolean verify();
/**
* Close the underlying metadata store connection.
*/
default void close() {
System.out.println("Default close() was invoked! No operation was executed.");
}
}
| 9,968 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/ClusterLiveNodesVerifier.java | package org.apache.helix.tools.ClusterVerifiers;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.helix.zookeeper.api.client.RealmAwareZkClient;
public class ClusterLiveNodesVerifier extends ZkHelixClusterVerifier {
final Set<String> _expectLiveNodes;
@Deprecated
public ClusterLiveNodesVerifier(RealmAwareZkClient zkclient, String clusterName,
List<String> expectLiveNodes) {
// usesExternalZkClient = true because ZkClient is given by the caller
// at close(), we will not close this ZkClient because it might be being used elsewhere
super(zkclient, clusterName, true, 0);
_expectLiveNodes = new HashSet<>(expectLiveNodes);
}
private ClusterLiveNodesVerifier(RealmAwareZkClient zkClient, String clusterName,
Set<String> expectLiveNodes, int waitPeriodTillVerify) {
// Initialize ClusterLiveNodesVerifier with usesExternalZkClient = false so that
// ClusterLiveNodesVerifier::close() would close ZkClient to prevent thread leakage
super(zkClient, clusterName, false, waitPeriodTillVerify);
_expectLiveNodes = expectLiveNodes == null ? new HashSet<>() : new HashSet<>(expectLiveNodes);
}
@Override
public boolean verifyByZkCallback(long timeout) {
waitTillVerify();
List<ClusterVerifyTrigger> triggers = new ArrayList<ClusterVerifyTrigger>();
triggers.add(new ClusterVerifyTrigger(_keyBuilder.liveInstances(), false, true, true));
return verifyByCallback(timeout, triggers);
}
@Override
protected boolean verifyState() throws Exception {
Set<String> actualLiveNodes =
new HashSet<String>(_accessor.getChildNames(_keyBuilder.liveInstances()));
return _expectLiveNodes.equals(actualLiveNodes);
}
@Override
public void finalize() {
close();
}
public static class Builder extends ZkHelixClusterVerifier.Builder<Builder> {
private final String _clusterName; // This is the ZK path sharding key
private final Set<String> _expectLiveNodes;
public Builder(String clusterName, Set<String> expectLiveNodes) {
_clusterName = clusterName;
_expectLiveNodes = expectLiveNodes;
}
public ClusterLiveNodesVerifier build() {
if (_clusterName == null || _clusterName.isEmpty()) {
throw new IllegalArgumentException("Cluster name is missing!");
}
validate();
return new ClusterLiveNodesVerifier(
createZkClient(RealmAwareZkClient.RealmMode.SINGLE_REALM, _realmAwareZkConnectionConfig,
_realmAwareZkClientConfig, _zkAddress), _clusterName, _expectLiveNodes, _waitPeriodTillVerify);
}
}
}
| 9,969 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/ZkHelixClusterVerifier.java | package org.apache.helix.tools.ClusterVerifiers;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.PropertyKey;
import org.apache.helix.SystemPropertyKeys;
import org.apache.helix.api.listeners.PreFetch;
import org.apache.helix.manager.zk.GenericZkHelixApiBuilder;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.msdcommon.exception.InvalidRoutingDataException;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.api.client.RealmAwareZkClient;
import org.apache.helix.zookeeper.datamodel.serializer.ZNRecordSerializer;
import org.apache.helix.zookeeper.impl.factory.DedicatedZkClientFactory;
import org.apache.helix.zookeeper.zkclient.IZkChildListener;
import org.apache.helix.zookeeper.zkclient.IZkDataListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class ZkHelixClusterVerifier
implements IZkChildListener, IZkDataListener, HelixClusterVerifier, AutoCloseable {
private static Logger LOG = LoggerFactory.getLogger(ZkHelixClusterVerifier.class);
protected static int DEFAULT_TIMEOUT = 300 * 1000;
protected static int DEFAULT_PERIOD = 500;
protected final RealmAwareZkClient _zkClient;
// true if ZkHelixClusterVerifier was instantiated with a RealmAwareZkClient, false otherwise
// This is used for close() to determine how ZkHelixClusterVerifier should close the underlying
// ZkClient
private final boolean _usesExternalZkClient;
protected final String _clusterName;
protected final HelixDataAccessor _accessor;
protected final PropertyKey.Builder _keyBuilder;
private CountDownLatch _countdown;
protected final int _waitPeriodTillVerify;
private ExecutorService _verifyTaskThreadPool =
Executors.newSingleThreadExecutor(r -> new Thread(r, "ZkHelixClusterVerifier-verify_thread"));
protected static class ClusterVerifyTrigger {
final PropertyKey _triggerKey;
final boolean _triggerOnDataChange;
final boolean _triggerOnChildChange;
final boolean _triggerOnChildDataChange;
public ClusterVerifyTrigger(PropertyKey triggerKey, boolean triggerOnDataChange,
boolean triggerOnChildChange, boolean triggerOnChildDataChange) {
_triggerKey = triggerKey;
_triggerOnDataChange = triggerOnDataChange;
_triggerOnChildChange = triggerOnChildChange;
_triggerOnChildDataChange = triggerOnChildDataChange;
}
public boolean isTriggerOnDataChange() {
return _triggerOnDataChange;
}
public PropertyKey getTriggerKey() {
return _triggerKey;
}
public boolean isTriggerOnChildChange() {
return _triggerOnChildChange;
}
public boolean isTriggerOnChildDataChange() {
return _triggerOnChildDataChange;
}
}
protected ZkHelixClusterVerifier(RealmAwareZkClient zkClient, String clusterName,
boolean usesExternalZkClient, int waitPeriodTillVerify) {
if (zkClient == null || clusterName == null) {
throw new IllegalArgumentException("requires zkClient|clusterName");
}
_zkClient = zkClient;
_usesExternalZkClient = usesExternalZkClient;
_clusterName = clusterName;
_accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_zkClient));
_keyBuilder = _accessor.keyBuilder();
_waitPeriodTillVerify = waitPeriodTillVerify;
}
@Deprecated
public ZkHelixClusterVerifier(String zkAddr, String clusterName, int waitPeriodTillVerify) {
if (clusterName == null || clusterName.isEmpty()) {
throw new IllegalArgumentException("ZkHelixClusterVerifier: clusterName is null or empty!");
}
// If the multi ZK config is enabled, use DedicatedZkClient on multi-realm mode
if (Boolean.getBoolean(SystemPropertyKeys.MULTI_ZK_ENABLED) || zkAddr == null) {
LOG.info(
"ZkHelixClusterVerifier: zkAddr is null or multi-zk mode is enabled in System Properties."
+ " Instantiating in multi-zk mode!");
try {
RealmAwareZkClient.RealmAwareZkConnectionConfig.Builder connectionConfigBuilder =
new RealmAwareZkClient.RealmAwareZkConnectionConfig.Builder();
connectionConfigBuilder.setZkRealmShardingKey("/" + clusterName);
RealmAwareZkClient.RealmAwareZkClientConfig clientConfig =
new RealmAwareZkClient.RealmAwareZkClientConfig();
_zkClient = DedicatedZkClientFactory.getInstance()
.buildZkClient(connectionConfigBuilder.build(), clientConfig);
} catch (InvalidRoutingDataException | IllegalStateException e) {
// Note: IllegalStateException is for HttpRoutingDataReader if MSDS endpoint cannot be
// found
throw new HelixException("ZkHelixClusterVerifier: failed to create ZkClient!", e);
}
} else {
_zkClient = DedicatedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(zkAddr));
}
_usesExternalZkClient = false;
_zkClient.setZkSerializer(new ZNRecordSerializer());
_clusterName = clusterName;
_accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_zkClient));
_keyBuilder = _accessor.keyBuilder();
_waitPeriodTillVerify = waitPeriodTillVerify;
}
/**
* Verify the cluster.
* The method will be blocked at most {@code timeout}.
* Return true if the verify succeed, otherwise return false.
* @param timeout in milliseconds
* @return true if succeed, false if not.
*/
public boolean verify(long timeout) {
return verifyByZkCallback(timeout);
}
/**
* Verify the cluster.
* The method will be blocked at most 30 seconds.
* Return true if the verify succeed, otherwise return false.
* @return true if succeed, false if not.
*/
public boolean verify() {
return verify(DEFAULT_TIMEOUT);
}
/**
* Verify the cluster by relying on zookeeper callback and verify.
* The method will be blocked at most {@code timeout}.
* Return true if the verify succeed, otherwise return false.
* @param timeout in milliseconds
* @return true if succeed, false if not.
*/
public abstract boolean verifyByZkCallback(long timeout);
/**
* Verify the cluster by relying on zookeeper callback and verify.
* The method will be blocked at most 30 seconds.
* Return true if the verify succeed, otherwise return false.
* @return true if succeed, false if not.
*/
public boolean verifyByZkCallback() {
return verifyByZkCallback(DEFAULT_TIMEOUT);
}
protected void waitTillVerify() {
try {
if (_waitPeriodTillVerify != 0) {
Thread.sleep(_waitPeriodTillVerify);
}
} catch (InterruptedException e) {
LOG.error("cooldown in verifyByPolling interrupted");
}
}
/**
* Verify the cluster by periodically polling the cluster status and verify.
* The method will be blocked at most {@code timeout}.
* Return true if the verify succeed, otherwise return false.
* @param timeout
* @param period polling interval
* @return
*/
public boolean verifyByPolling(long timeout, long period) {
waitTillVerify();
try {
long start = System.currentTimeMillis();
boolean success;
do {
success = verifyState();
if (success) {
return true;
}
TimeUnit.MILLISECONDS.sleep(period);
} while ((System.currentTimeMillis() - start) <= timeout);
LOG.error("verifier timeout out with timeout {}", timeout);
} catch (Exception e) {
LOG.error("Exception in verifier", e);
}
return false;
}
/**
* Verify the cluster by periodically polling the cluster status and verify.
* The method will be blocked at most 30 seconds.
* Return true if the verify succeed, otherwise return false.
* @return true if succeed, false if not.
*/
public boolean verifyByPolling() {
return verifyByPolling(DEFAULT_TIMEOUT, DEFAULT_PERIOD);
}
/**
* Implement close() for {@link AutoCloseable} and {@link HelixClusterVerifier}.
* Non-external resources should be closed in this method to prevent resource leak.
*/
@Override
public void close() {
if (_zkClient != null && !_usesExternalZkClient) {
_zkClient.close();
}
}
protected boolean verifyByCallback(long timeout, List<ClusterVerifyTrigger> triggers) {
_countdown = new CountDownLatch(1);
for (ClusterVerifyTrigger trigger : triggers) {
subscribeTrigger(trigger);
}
boolean success = false;
try {
success = verifyState();
if (!success) {
success = _countdown.await(timeout, TimeUnit.MILLISECONDS);
if (!success) {
// make a final try if timeout
success = verifyState();
if (!success) {
LOG.error("verifyByCallback failed due to timeout {}", timeout);
}
}
}
} catch (Exception e) {
LOG.error("Exception in verifier", e);
}
// clean up
_zkClient.unsubscribeAll();
_verifyTaskThreadPool.shutdownNow();
return success;
}
private void subscribeTrigger(ClusterVerifyTrigger trigger) {
String path = trigger.getTriggerKey().getPath();
if (trigger.isTriggerOnDataChange()) {
_zkClient.subscribeDataChanges(path, this);
}
if (trigger.isTriggerOnChildChange()) {
_zkClient.subscribeChildChanges(path, this);
}
if (trigger.isTriggerOnChildDataChange()) {
List<String> childs = _zkClient.getChildren(path);
for (String child : childs) {
String childPath = String.format("%s/%s", path, child);
_zkClient.subscribeDataChanges(childPath, this);
}
}
}
/**
* The method actually performs the required verifications.
* @return
* @throws Exception
*/
protected abstract boolean verifyState() throws Exception;
class VerifyStateCallbackTask implements Runnable {
@Override
public void run() {
try {
boolean success = verifyState();
if (success) {
_countdown.countDown();
}
} catch (Exception ex) {
LOG.info("verifyState() throws exception: " + ex);
}
}
}
@Override
@PreFetch(enabled = false)
public void handleDataChange(String dataPath, Object data) throws Exception {
if (!_verifyTaskThreadPool.isShutdown()) {
_verifyTaskThreadPool.submit(new VerifyStateCallbackTask());
}
}
@Override
public void handleDataDeleted(String dataPath) throws Exception {
_zkClient.unsubscribeDataChanges(dataPath, this);
if (!_verifyTaskThreadPool.isShutdown()) {
_verifyTaskThreadPool.submit(new VerifyStateCallbackTask());
}
}
@Override
public void handleChildChange(String parentPath, List<String> currentChilds) throws Exception {
for (String child : currentChilds) {
String childPath = String.format("%s/%s", parentPath, child);
_zkClient.subscribeDataChanges(childPath, this);
}
if (!_verifyTaskThreadPool.isShutdown()) {
_verifyTaskThreadPool.submit(new VerifyStateCallbackTask());
}
}
public String getClusterName() {
return _clusterName;
}
protected abstract static class Builder<B extends Builder<B>> extends GenericZkHelixApiBuilder<B> {
protected int _waitPeriodTillVerify;
public Builder() {
// Note: ZkHelixClusterVerifier is a single-realm API, so RealmMode is assumed to be
// SINGLE-REALM
setRealmMode(RealmAwareZkClient.RealmMode.SINGLE_REALM);
}
/**
* Use setZkAddress() instead. Deprecated but left here for backward-compatibility.
* @param zkAddress
* @return
*/
@Deprecated
public B setZkAddr(String zkAddress) {
return setZkAddress(zkAddress);
}
/**
* The class of verify() methods in this class and its subclass such as
* BestPossibleExternalViewVerifier is intend to wait for the cluster converging to a stable
* state after changes in the cluster. However, after making changes, it would take some time
* till controller taking the changes in. Thus, if we verify() too early, before controller
* taking the changes, the class may mistake the previous stable cluster state as new (expected)
* stable state. This would cause various issues. Thus, we supply a waitPeriod before starting
* to validate next expected state to avoid this pre-mature stable state validation.
*/
public B setWaitTillVerify(int waitPeriod) {
_waitPeriodTillVerify = waitPeriod;
return (B) this;
}
public String getClusterName() {
if (_realmAwareZkConnectionConfig != null && (
_realmAwareZkConnectionConfig.getZkRealmShardingKey() != null
&& !_realmAwareZkConnectionConfig.getZkRealmShardingKey().isEmpty())) {
// Need to remove the first "/" from sharding key given
return _realmAwareZkConnectionConfig.getZkRealmShardingKey().substring(1);
}
throw new HelixException(
"Failed to get the cluster name! Either RealmAwareZkConnectionConfig is null or its sharding key is null or empty!");
}
protected void validate() {
// Validate that either ZkAddress or ZkRealmShardingKey is set
if (_zkAddress == null || _zkAddress.isEmpty()) {
if (_realmAwareZkConnectionConfig == null
|| _realmAwareZkConnectionConfig.getZkRealmShardingKey() == null
|| _realmAwareZkConnectionConfig.getZkRealmShardingKey().isEmpty()) {
throw new IllegalArgumentException(
"ZkHelixClusterVerifier: one of either ZkAddress or ZkRealmShardingKey must be set! ZkAddress: "
+ _zkAddress + " RealmAwareZkConnectionConfig: " + _realmAwareZkConnectionConfig);
}
}
initializeConfigsIfNull();
}
/**
* Creates a RealmAwareZkClient for ZkHelixClusterVerifiers.
* Note that DedicatedZkClient is used whether it's multi-realm or single-realm.
* @return
*/
@Override
protected RealmAwareZkClient createZkClient(RealmAwareZkClient.RealmMode realmMode,
RealmAwareZkClient.RealmAwareZkConnectionConfig connectionConfig,
RealmAwareZkClient.RealmAwareZkClientConfig clientConfig, String zkAddress) {
if (Boolean.getBoolean(SystemPropertyKeys.MULTI_ZK_ENABLED) || zkAddress == null) {
try {
// First, try to create a RealmAwareZkClient that's a DedicatedZkClient
return DedicatedZkClientFactory.getInstance()
.buildZkClient(connectionConfig, clientConfig);
} catch (InvalidRoutingDataException | IllegalStateException e) {
throw new HelixException("ZkHelixClusterVerifier: failed to create ZkClient!", e);
}
} else {
return DedicatedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(zkAddress));
}
}
}
@Override
public void finalize() {
close();
}
}
| 9,970 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/StrictMatchExternalViewVerifier.java | package org.apache.helix.tools.ClusterVerifiers;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.PropertyKey;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.rebalancer.AbstractRebalancer;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.Partition;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.task.TaskConstants;
import org.apache.helix.util.HelixUtil;
import org.apache.helix.zookeeper.api.client.RealmAwareZkClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Verifier that verifies whether the ExternalViews of given resources (or all resources in the cluster)
* match exactly as its ideal mapping (in idealstate).
* To use this verifier on resources in Full-Auto mode, BestPossible state must be persisted in Cluster config.
*/
public class StrictMatchExternalViewVerifier extends ZkHelixClusterVerifier {
private static Logger LOG = LoggerFactory.getLogger(StrictMatchExternalViewVerifier.class);
private final Set<String> _resources;
private final Set<String> _expectLiveInstances;
private final boolean _isDeactivatedNodeAware;
@Deprecated
public StrictMatchExternalViewVerifier(String zkAddr, String clusterName, Set<String> resources,
Set<String> expectLiveInstances) {
this(zkAddr, clusterName, resources, expectLiveInstances, false, 0);
}
@Deprecated
public StrictMatchExternalViewVerifier(RealmAwareZkClient zkClient, String clusterName,
Set<String> resources, Set<String> expectLiveInstances) {
// usesExternalZkClient = true because ZkClient is given by the caller
// at close(), we will not close this ZkClient because it might be being used elsewhere
super(zkClient, clusterName, true, 0);
_resources = resources == null ? new HashSet<>() : new HashSet<>(resources);
_expectLiveInstances =
expectLiveInstances == null ? new HashSet<>() : new HashSet<>(expectLiveInstances);
_isDeactivatedNodeAware = false;
}
@Deprecated
private StrictMatchExternalViewVerifier(String zkAddr, String clusterName, Set<String> resources,
Set<String> expectLiveInstances, boolean isDeactivatedNodeAware, int waitTillVerify) {
super(zkAddr, clusterName, waitTillVerify);
_resources = resources;
_expectLiveInstances = expectLiveInstances;
_isDeactivatedNodeAware = isDeactivatedNodeAware;
}
private StrictMatchExternalViewVerifier(RealmAwareZkClient zkClient, String clusterName,
Set<String> resources, Set<String> expectLiveInstances, boolean isDeactivatedNodeAware,
int waitPeriodTillVerify, boolean usesExternalZkClient) {
// Initialize StrictMatchExternalViewVerifier with usesExternalZkClient = false so that
// StrictMatchExternalViewVerifier::close() would close ZkClient to prevent thread leakage
super(zkClient, clusterName, usesExternalZkClient, 0);
_resources = resources == null ? new HashSet<>() : new HashSet<>(resources);
_expectLiveInstances =
expectLiveInstances == null ? new HashSet<>() : new HashSet<>(expectLiveInstances);
_isDeactivatedNodeAware = isDeactivatedNodeAware;
}
public static class Builder extends ZkHelixClusterVerifier.Builder<Builder> {
private final String _clusterName; // This is the ZK path sharding key
private Set<String> _resources;
private Set<String> _expectLiveInstances;
private RealmAwareZkClient _zkClient;
// For backward compatibility, set the default isDeactivatedNodeAware to be false.
private boolean _isDeactivatedNodeAware = false;
private boolean _usesExternalZkClient = false; // false by default
public StrictMatchExternalViewVerifier build() {
if (_clusterName == null) {
throw new IllegalArgumentException("Cluster name is missing!");
}
if (_zkClient != null) {
return new StrictMatchExternalViewVerifier(_zkClient, _clusterName, _resources,
_expectLiveInstances, _isDeactivatedNodeAware, _waitPeriodTillVerify,
_usesExternalZkClient);
}
if (_realmAwareZkConnectionConfig == null || _realmAwareZkClientConfig == null) {
// For backward-compatibility
return new StrictMatchExternalViewVerifier(_zkAddress, _clusterName, _resources,
_expectLiveInstances, _isDeactivatedNodeAware, _waitPeriodTillVerify);
}
validate();
return new StrictMatchExternalViewVerifier(
createZkClient(RealmAwareZkClient.RealmMode.SINGLE_REALM, _realmAwareZkConnectionConfig,
_realmAwareZkClientConfig, _zkAddress), _clusterName, _resources,
_expectLiveInstances, _isDeactivatedNodeAware, _waitPeriodTillVerify,
_usesExternalZkClient);
}
public Builder(String clusterName) {
_clusterName = clusterName;
}
public String getClusterName() {
return _clusterName;
}
public Set<String> getResources() {
return _resources;
}
public Builder setResources(Set<String> resources) {
_resources = resources;
return this;
}
public Set<String> getExpectLiveInstances() {
return _expectLiveInstances;
}
public Builder setExpectLiveInstances(Set<String> expectLiveInstances) {
_expectLiveInstances = expectLiveInstances;
return this;
}
public String getZkAddress() {
return _zkAddress;
}
@Deprecated
public Builder setZkClient(RealmAwareZkClient zkClient) {
_zkClient = zkClient;
_usesExternalZkClient = true; // Set the flag since external ZkClient is used
return this;
}
public boolean getDeactivatedNodeAwareness() {
return _isDeactivatedNodeAware;
}
public Builder setDeactivatedNodeAwareness(boolean isDeactivatedNodeAware) {
_isDeactivatedNodeAware = isDeactivatedNodeAware;
return this;
}
protected void validate() {
super.validate();
if (!_clusterName.equals(_realmAwareZkConnectionConfig.getZkRealmShardingKey())) {
throw new IllegalArgumentException(
"StrictMatchExternalViewVerifier: Cluster name: " + _clusterName
+ " and ZK realm sharding key: " + _realmAwareZkConnectionConfig
.getZkRealmShardingKey() + " do not match!");
}
}
}
@Override
public boolean verify(long timeout) {
return verifyByZkCallback(timeout);
}
@Override
public boolean verifyByZkCallback(long timeout) {
waitTillVerify();
List<ClusterVerifyTrigger> triggers = new ArrayList<ClusterVerifyTrigger>();
// setup triggers
if (_resources != null && !_resources.isEmpty()) {
for (String resource : _resources) {
triggers
.add(new ClusterVerifyTrigger(_keyBuilder.idealStates(resource), true, false, false));
triggers
.add(new ClusterVerifyTrigger(_keyBuilder.externalView(resource), true, false, false));
}
} else {
triggers.add(new ClusterVerifyTrigger(_keyBuilder.idealStates(), false, true, true));
triggers.add(new ClusterVerifyTrigger(_keyBuilder.externalViews(), false, true, true));
}
return verifyByCallback(timeout, triggers);
}
@Override
protected boolean verifyState() {
try {
PropertyKey.Builder keyBuilder = _accessor.keyBuilder();
// read cluster once and do verification
ResourceControllerDataProvider cache = new ResourceControllerDataProvider();
cache.refresh(_accessor);
Map<String, IdealState> idealStates = new HashMap<>(cache.getIdealStates());
// filter out all resources that use Task state model
Iterator<Map.Entry<String, IdealState>> it = idealStates.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<String, IdealState> pair = it.next();
if (pair.getValue().getStateModelDefRef().equals(TaskConstants.STATE_MODEL_NAME)) {
it.remove();
}
}
// verify live instances.
if (_expectLiveInstances != null && !_expectLiveInstances.isEmpty()) {
Set<String> actualLiveNodes = cache.getLiveInstances().keySet();
if (!_expectLiveInstances.equals(actualLiveNodes)) {
return false;
}
}
Map<String, ExternalView> extViews =
_accessor.getChildValuesMap(keyBuilder.externalViews(), true);
if (extViews == null) {
extViews = Collections.emptyMap();
}
// Filter resources if requested
if (_resources != null && !_resources.isEmpty()) {
idealStates.keySet().retainAll(_resources);
extViews.keySet().retainAll(_resources);
}
// if externalView is not empty and idealState doesn't exist
// add empty idealState for the resource
for (String resource : extViews.keySet()) {
if (!idealStates.containsKey(resource)) {
idealStates.put(resource, new IdealState(resource));
}
}
for (String resourceName : idealStates.keySet()) {
ExternalView extView = extViews.get(resourceName);
IdealState idealState = idealStates.get(resourceName);
if (extView == null) {
if (idealState.isExternalViewDisabled()) {
continue;
} else {
LOG.debug("externalView for " + resourceName + " is not available");
return false;
}
}
boolean result = verifyExternalView(cache, extView, idealState);
if (!result) {
return false;
}
}
return true;
} catch (Exception e) {
LOG.error("exception in verification", e);
return false;
}
}
private boolean verifyExternalView(ResourceControllerDataProvider dataCache, ExternalView externalView,
IdealState idealState) {
Map<String, Map<String, String>> mappingInExtview = externalView.getRecord().getMapFields();
Map<String, Map<String, String>> idealPartitionState;
switch (idealState.getRebalanceMode()) {
case FULL_AUTO:
ClusterConfig clusterConfig = new ConfigAccessor(_zkClient).getClusterConfig(dataCache.getClusterName());
if (!clusterConfig.isPersistBestPossibleAssignment() && !clusterConfig.isPersistIntermediateAssignment()) {
throw new HelixException(String.format("Full-Auto IdealState verifier requires "
+ "ClusterConfig.PERSIST_BEST_POSSIBLE_ASSIGNMENT or ClusterConfig.PERSIST_INTERMEDIATE_ASSIGNMENT "
+ "is enabled."));
}
for (String partition : idealState.getPartitionSet()) {
if (idealState.getPreferenceList(partition) == null || idealState.getPreferenceList(partition).isEmpty()) {
return false;
}
}
idealPartitionState = computeIdealPartitionState(dataCache, idealState);
break;
case SEMI_AUTO:
case USER_DEFINED:
idealPartitionState = computeIdealPartitionState(dataCache, idealState);
break;
case CUSTOMIZED:
idealPartitionState = idealState.getRecord().getMapFields();
break;
case TASK:
// ignore jobs
default:
return true;
}
return mappingInExtview.equals(idealPartitionState);
}
private Map<String, Map<String, String>> computeIdealPartitionState(
ResourceControllerDataProvider cache, IdealState idealState) {
String stateModelDefName = idealState.getStateModelDefRef();
StateModelDefinition stateModelDef = cache.getStateModelDef(stateModelDefName);
Map<String, Map<String, String>> idealPartitionState = new HashMap<>();
for (String partition : idealState.getPartitionSet()) {
List<String> preferenceList = AbstractRebalancer
.getPreferenceList(new Partition(partition), idealState, cache.getEnabledLiveInstances());
Map<String, String> idealMapping;
if (_isDeactivatedNodeAware) {
idealMapping = HelixUtil
.computeIdealMapping(preferenceList, stateModelDef, cache.getLiveInstances().keySet(),
cache.getDisabledInstancesForPartition(idealState.getResourceName(), partition));
} else {
idealMapping = HelixUtil
.computeIdealMapping(preferenceList, stateModelDef, cache.getEnabledLiveInstances(),
Collections.emptySet());
}
idealPartitionState.put(partition, idealMapping);
}
return idealPartitionState;
}
@Override
public String toString() {
String verifierName = getClass().getSimpleName();
return String
.format("%s(%s@%s@resources[%s])", verifierName, _clusterName, _zkClient.getServers(),
_resources != null ? Arrays.toString(_resources.toArray()) : "");
}
@Override
public void finalize() {
close();
}
}
| 9,971 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/ClusterVerifiers/BestPossibleExternalViewVerifier.java | package org.apache.helix.tools.ClusterVerifiers;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.helix.HelixDefinedState;
import org.apache.helix.HelixRebalanceException;
import org.apache.helix.PropertyKey;
import org.apache.helix.controller.common.PartitionStateMap;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.rebalancer.waged.ReadOnlyWagedRebalancer;
import org.apache.helix.controller.rebalancer.waged.RebalanceAlgorithm;
import org.apache.helix.controller.stages.AttributeName;
import org.apache.helix.controller.stages.BestPossibleStateCalcStage;
import org.apache.helix.controller.stages.BestPossibleStateOutput;
import org.apache.helix.controller.stages.ClusterEvent;
import org.apache.helix.controller.stages.ClusterEventType;
import org.apache.helix.controller.stages.CurrentStateComputationStage;
import org.apache.helix.controller.stages.CurrentStateOutput;
import org.apache.helix.controller.stages.ResourceComputationStage;
import org.apache.helix.manager.zk.ZkBucketDataAccessor;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.Partition;
import org.apache.helix.model.Resource;
import org.apache.helix.model.ResourceAssignment;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.task.TaskConstants;
import org.apache.helix.util.RebalanceUtil;
import org.apache.helix.zookeeper.api.client.RealmAwareZkClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* verifier that the ExternalViews of given resources (or all resources in the cluster)
* match its best possible mapping states.
*/
public class BestPossibleExternalViewVerifier extends ZkHelixClusterVerifier {
private static Logger LOG = LoggerFactory.getLogger(BestPossibleExternalViewVerifier.class);
private final Map<String, Map<String, String>> _errStates;
private final Set<String> _resources;
private final Set<String> _expectLiveInstances;
private final ResourceControllerDataProvider _dataProvider;
/**
* Deprecated - please use the Builder to construct this class.
* @param zkAddr
* @param clusterName
* @param resources
* @param errStates
* @param expectLiveInstances
*/
@Deprecated
public BestPossibleExternalViewVerifier(String zkAddr, String clusterName, Set<String> resources,
Map<String, Map<String, String>> errStates, Set<String> expectLiveInstances) {
this(zkAddr, clusterName, resources, errStates, expectLiveInstances, 0);
}
@Deprecated
public BestPossibleExternalViewVerifier(String zkAddr, String clusterName, Set<String> resources,
Map<String, Map<String, String>> errStates, Set<String> expectLiveInstances, int waitTillVerify) {
super(zkAddr, clusterName, waitTillVerify);
_errStates = errStates;
_resources = resources;
_expectLiveInstances = expectLiveInstances;
_dataProvider = new ResourceControllerDataProvider();
// _zkClient should be closed with BestPossibleExternalViewVerifier
}
/**
* Deprecated - please use the Builder to construct this class.
* @param zkClient
* @param clusterName
* @param resources
* @param errStates
* @param expectLiveInstances
*/
@Deprecated
public BestPossibleExternalViewVerifier(RealmAwareZkClient zkClient, String clusterName,
Set<String> resources, Map<String, Map<String, String>> errStates,
Set<String> expectLiveInstances) {
this(zkClient, clusterName, errStates, resources, expectLiveInstances, 0, true);
}
@Deprecated
public BestPossibleExternalViewVerifier(RealmAwareZkClient zkClient, String clusterName,
Set<String> resources, Map<String, Map<String, String>> errStates,
Set<String> expectLiveInstances, int waitTillVerify) {
// usesExternalZkClient = true because ZkClient is given by the caller
// at close(), we will not close this ZkClient because it might be being used elsewhere
this(zkClient, clusterName, errStates, resources, expectLiveInstances, waitTillVerify, true);
}
private BestPossibleExternalViewVerifier(RealmAwareZkClient zkClient, String clusterName,
Map<String, Map<String, String>> errStates, Set<String> resources,
Set<String> expectLiveInstances, int waitPeriodTillVerify, boolean usesExternalZkClient) {
// Initialize BestPossibleExternalViewVerifier with usesExternalZkClient = false so that
// BestPossibleExternalViewVerifier::close() would close ZkClient to prevent thread leakage
super(zkClient, clusterName, usesExternalZkClient, waitPeriodTillVerify);
// Deep copy data from Builder
_errStates = new HashMap<>();
if (errStates != null) {
errStates.forEach((k, v) -> _errStates.put(k, new HashMap<>(v)));
}
_resources = resources == null ? new HashSet<>() : new HashSet<>(resources);
_expectLiveInstances =
expectLiveInstances == null ? new HashSet<>() : new HashSet<>(expectLiveInstances);
_dataProvider = new ResourceControllerDataProvider();
}
public static class Builder extends ZkHelixClusterVerifier.Builder<Builder> {
private final String _clusterName;
private Map<String, Map<String, String>> _errStates;
private Set<String> _resources;
private Set<String> _expectLiveInstances;
private RealmAwareZkClient _zkClient;
public Builder(String clusterName) {
_clusterName = clusterName;
}
public BestPossibleExternalViewVerifier build() {
if (_clusterName == null) {
throw new IllegalArgumentException("Cluster name is missing!");
}
// _usesExternalZkClient == true
if (_zkClient != null) {
return new BestPossibleExternalViewVerifier(_zkClient, _clusterName, _errStates, _resources,
_expectLiveInstances, _waitPeriodTillVerify, true);
}
// _usesExternalZkClient == false
if (_realmAwareZkConnectionConfig == null || _realmAwareZkClientConfig == null) {
// For backward-compatibility
return new BestPossibleExternalViewVerifier(_zkAddress, _clusterName, _resources,
_errStates, _expectLiveInstances, _waitPeriodTillVerify);
}
validate();
return new BestPossibleExternalViewVerifier(
createZkClient(RealmAwareZkClient.RealmMode.SINGLE_REALM, _realmAwareZkConnectionConfig,
_realmAwareZkClientConfig, _zkAddress), _clusterName, _errStates, _resources,
_expectLiveInstances, _waitPeriodTillVerify, false);
}
public String getClusterName() {
return _clusterName;
}
public Map<String, Map<String, String>> getErrStates() {
return _errStates;
}
public Builder setErrStates(Map<String, Map<String, String>> errStates) {
_errStates = errStates;
return this;
}
public Set<String> getResources() {
return _resources;
}
public Builder setResources(Set<String> resources) {
_resources = resources;
return this;
}
public Set<String> getExpectLiveInstances() {
return _expectLiveInstances;
}
public Builder setExpectLiveInstances(Set<String> expectLiveInstances) {
_expectLiveInstances = expectLiveInstances;
return this;
}
public String getZkAddr() {
return _zkAddress;
}
public Builder setZkClient(RealmAwareZkClient zkClient) {
_zkClient = zkClient;
return this;
}
}
@Override
public boolean verify(long timeout) {
return verifyByZkCallback(timeout);
}
@Override
public boolean verifyByZkCallback(long timeout) {
waitTillVerify();
List<ClusterVerifyTrigger> triggers = new ArrayList<ClusterVerifyTrigger>();
// setup triggers
if (_resources != null && !_resources.isEmpty()) {
for (String resource : _resources) {
triggers
.add(new ClusterVerifyTrigger(_keyBuilder.idealStates(resource), true, false, false));
triggers
.add(new ClusterVerifyTrigger(_keyBuilder.externalView(resource), true, false, false));
}
} else {
triggers.add(new ClusterVerifyTrigger(_keyBuilder.idealStates(), false, true, true));
triggers.add(new ClusterVerifyTrigger(_keyBuilder.externalViews(), false, true, true));
}
return verifyByCallback(timeout, triggers);
}
@Override
protected synchronized boolean verifyState() {
try {
PropertyKey.Builder keyBuilder = _accessor.keyBuilder();
_dataProvider.requireFullRefresh();
_dataProvider.refresh(_accessor);
_dataProvider.setClusterEventId("ClusterStateVerifier");
Map<String, IdealState> idealStates = new HashMap<>(_dataProvider.getIdealStates());
// filter out all resources that use Task state model
idealStates.entrySet()
.removeIf(pair -> pair.getValue().getStateModelDefRef().equals(TaskConstants.STATE_MODEL_NAME));
// verify live instances.
if (_expectLiveInstances != null && !_expectLiveInstances.isEmpty()) {
Set<String> actualLiveNodes = _dataProvider.getLiveInstances().keySet();
if (!_expectLiveInstances.equals(actualLiveNodes)) {
LOG.warn("Live instances are not as expected. Actual live nodes: " + actualLiveNodes
.toString());
return false;
}
}
Map<String, ExternalView> extViews =
_accessor.getChildValuesMap(keyBuilder.externalViews(), true);
if (extViews == null) {
extViews = Collections.emptyMap();
}
// Filter resources if requested
if (_resources != null && !_resources.isEmpty()) {
idealStates.keySet().retainAll(_resources);
extViews.keySet().retainAll(_resources);
}
// if externalView is not empty and idealState doesn't exist
// add empty idealState for the resource
for (String resource : extViews.keySet()) {
if (!idealStates.containsKey(resource)) {
ExternalView ev = extViews.get(resource);
IdealState is = new IdealState(resource);
is.getRecord().setSimpleFields(ev.getRecord().getSimpleFields());
idealStates.put(resource, is);
}
}
// calculate best possible state
BestPossibleStateOutput bestPossOutput = calcBestPossState(_dataProvider, _resources);
Map<String, Map<Partition, Map<String, String>>> bestPossStateMap =
bestPossOutput.getStateMap();
// set error states
if (_errStates != null) {
for (String resourceName : _errStates.keySet()) {
Map<String, String> partErrStates = _errStates.get(resourceName);
for (String partitionName : partErrStates.keySet()) {
String instanceName = partErrStates.get(partitionName);
if (!bestPossStateMap.containsKey(resourceName)) {
bestPossStateMap.put(resourceName, new HashMap<Partition, Map<String, String>>());
}
Partition partition = new Partition(partitionName);
if (!bestPossStateMap.get(resourceName).containsKey(partition)) {
bestPossStateMap.get(resourceName).put(partition, new HashMap<String, String>());
}
bestPossStateMap.get(resourceName).get(partition)
.put(instanceName, HelixDefinedState.ERROR.toString());
}
}
}
for (String resourceName : idealStates.keySet()) {
IdealState is = idealStates.get(resourceName);
ExternalView extView = extViews.get(resourceName);
if (extView == null) {
if (is.isExternalViewDisabled()) {
continue;
}
LOG.warn("externalView for " + resourceName
+ " is not available, check if best possible state is available.");
extView = new ExternalView(resourceName);
}
// step 0: remove empty map and DROPPED state from best possible state
PartitionStateMap bpStateMap =
bestPossOutput.getPartitionStateMap(resourceName);
StateModelDefinition stateModelDef = _dataProvider.getStateModelDef(is.getStateModelDefRef());
if (stateModelDef == null) {
LOG.error(
"State model definition " + is.getStateModelDefRef() + " for resource not found!" + is
.getResourceName());
return false;
}
boolean result = verifyExternalView(extView, bpStateMap, stateModelDef);
if (!result) {
if (LOG.isDebugEnabled()) {
LOG.debug("verifyExternalView fails for " + resourceName + "! ExternalView: " + extView
+ " BestPossibleState: " + bpStateMap);
} else {
LOG.warn("verifyExternalView fails for " + resourceName
+ "! ExternalView does not match BestPossibleState");
}
return false;
}
}
return true;
} catch (Exception e) {
LOG.error("exception in verification", e);
return false;
}
}
private boolean verifyExternalView(ExternalView externalView,
PartitionStateMap bestPossibleState, StateModelDefinition stateModelDef) {
Set<String> ignoreStates = new HashSet<>(
Arrays.asList(stateModelDef.getInitialState(), HelixDefinedState.DROPPED.toString()));
Map<String, Map<String, String>> bestPossibleStateMap =
convertBestPossibleState(bestPossibleState);
removeEntryWithIgnoredStates(bestPossibleStateMap.entrySet().iterator(), ignoreStates);
Map<String, Map<String, String>> externalViewMap = externalView.getRecord().getMapFields();
removeEntryWithIgnoredStates(externalViewMap.entrySet().iterator(), ignoreStates);
return externalViewMap.equals(bestPossibleStateMap);
}
private void removeEntryWithIgnoredStates(
Iterator<Map.Entry<String, Map<String, String>>> partitionInstanceStateMapIter,
Set<String> ignoredStates) {
while (partitionInstanceStateMapIter.hasNext()) {
Map.Entry<String, Map<String, String>> entry = partitionInstanceStateMapIter.next();
Map<String, String> instanceStateMap = entry.getValue();
// remove instances with DROPPED and OFFLINE state
Iterator<Map.Entry<String, String>> insIter = instanceStateMap.entrySet().iterator();
while (insIter.hasNext()) {
String state = insIter.next().getValue();
if (ignoredStates.contains(state)) {
insIter.remove();
}
}
if (instanceStateMap.isEmpty()) {
partitionInstanceStateMapIter.remove();
}
}
}
private Map<String, Map<String, String>> convertBestPossibleState(
PartitionStateMap bestPossibleState) {
Map<String, Map<String, String>> result = new HashMap<String, Map<String, String>>();
for (Partition partition : bestPossibleState.getStateMap().keySet()) {
result.put(partition.getPartitionName(), bestPossibleState.getPartitionMap(partition));
}
return result;
}
/**
* calculate the best possible state note that DROPPED states are not checked since when
* kick off the BestPossibleStateCalcStage we are providing an empty current state map
*
* @param cache
* @param resources
* @return
* @throws Exception
*/
private BestPossibleStateOutput calcBestPossState(ResourceControllerDataProvider cache, Set<String> resources)
throws Exception {
ClusterEvent event = new ClusterEvent(_clusterName, ClusterEventType.StateVerifier);
event.addAttribute(AttributeName.ControllerDataProvider.name(), cache);
RebalanceUtil.runStage(event, new ResourceComputationStage());
if (resources != null && !resources.isEmpty()) {
// Filtering out all non-required resources
final Map<String, Resource> resourceMap = event.getAttribute(AttributeName.RESOURCES.name());
resourceMap.keySet().retainAll(resources);
event.addAttribute(AttributeName.RESOURCES.name(), resourceMap);
final Map<String, Resource> resourceMapToRebalance =
event.getAttribute(AttributeName.RESOURCES_TO_REBALANCE.name());
resourceMapToRebalance.keySet().retainAll(resources);
event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), resourceMapToRebalance);
}
RebalanceUtil.runStage(event, new CurrentStateComputationStage());
// Note the readOnlyWagedRebalancer is just for one time usage
try (ZkBucketDataAccessor zkBucketDataAccessor = new ZkBucketDataAccessor(_zkClient);
DryrunWagedRebalancer dryrunWagedRebalancer = new DryrunWagedRebalancer(zkBucketDataAccessor,
cache.getClusterName(), cache.getClusterConfig().getGlobalRebalancePreference())) {
event.addAttribute(AttributeName.STATEFUL_REBALANCER.name(), dryrunWagedRebalancer);
RebalanceUtil.runStage(event, new BestPossibleStateCalcStage());
}
return event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.name());
}
@Override
public String toString() {
String verifierName = getClass().getSimpleName();
return verifierName + "(" + _clusterName + "@" + _zkClient + "@resources["
+ (_resources != null ? Arrays.toString(_resources.toArray()) : "") + "])";
}
// TODO: to clean up, finalize is deprecated in Java 9
@Override
public void finalize() {
close();
super.finalize();
}
private static class DryrunWagedRebalancer extends ReadOnlyWagedRebalancer implements AutoCloseable {
public DryrunWagedRebalancer(ZkBucketDataAccessor zkBucketDataAccessor, String clusterName,
Map<ClusterConfig.GlobalRebalancePreferenceKey, Integer> preferences) {
super(zkBucketDataAccessor, clusterName, preferences);
}
@Override
protected Map<String, ResourceAssignment> computeBestPossibleAssignment(
ResourceControllerDataProvider clusterData, Map<String, Resource> resourceMap,
Set<String> activeNodes, CurrentStateOutput currentStateOutput,
RebalanceAlgorithm algorithm) throws HelixRebalanceException {
return getBestPossibleAssignment(getAssignmentMetadataStore(), currentStateOutput,
resourceMap.keySet());
}
}
}
| 9,972 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/commandtools/ZkGrep.java | package org.apache.helix.tools.commandtools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.BufferedReader;
import java.io.File;
import java.io.FileFilter;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Date;
import java.util.List;
import java.util.zip.GZIPInputStream;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.OptionGroup;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* utility for grep zk transaction/snapshot logs
* - to grep a pattern by t1 use:
* zkgrep --zkCfg zkCfg --by t1 --pattern patterns...
* - to grep a pattern between t1 and t2 use:
* zkgrep --zkCfg zkCfg --between t1 t2 --pattern patterns...
* for example, to find fail-over latency between t1 and t2, use:
* 1) zkgrep --zkCfg zkCfg --by t1 --pattern "/{cluster}/LIVEINSTNCES/" | grep {fail-node}
* 2) zkgrep --zkCfg zkCfg --between t1 t2 --pattern "closeSession" | grep {fail-node session-id}
* 3) zkgrep --zkCfg zkCfg --between t1 t2 --pattern "/{cluster}" | grep "CURRENTSTATES" |
* grep "setData" | tail -1
* fail-over latency = timestamp difference between 2) and 3)
*/
public class ZkGrep {
private static Logger LOG = LoggerFactory.getLogger(ZkGrep.class);
private static final String zkCfg = "zkCfg";
private static final String pattern = "pattern";
private static final String by = "by";
private static final String between = "between";
public static final String log = "log";
public static final String snapshot = "snapshot";
private static final String gzSuffix = ".gz";
@SuppressWarnings("static-access")
private static Options constructCommandLineOptions() {
Option zkCfgOption =
OptionBuilder.hasArgs(1).isRequired(false).withLongOpt(zkCfg).withArgName("zoo.cfg")
.withDescription("provide zoo.cfg").create();
Option patternOption =
OptionBuilder.hasArgs().isRequired(true).withLongOpt(pattern)
.withArgName("grep-patterns...").withDescription("provide patterns (required)")
.create();
Option betweenOption =
OptionBuilder.hasArgs(2).isRequired(false).withLongOpt(between)
.withArgName("t1 t2 (timestamp in ms or yyMMdd_hhmmss_SSS)")
.withDescription("grep between t1 and t2").create();
Option byOption =
OptionBuilder.hasArgs(1).isRequired(false).withLongOpt(by)
.withArgName("t (timestamp in ms or yyMMdd_hhmmss_SSS)").withDescription("grep by t")
.create();
OptionGroup group = new OptionGroup();
group.setRequired(true);
group.addOption(betweenOption);
group.addOption(byOption);
Options options = new Options();
options.addOption(zkCfgOption);
options.addOption(patternOption);
options.addOptionGroup(group);
return options;
}
/**
* get zk transaction log dir and zk snapshot log dir
* @param zkCfgFile
* @return String[0]: zk-transaction-log-dir, String[1]: zk-snapshot-dir
*/
static String[] getZkDataDirs(String zkCfgFile) {
String[] zkDirs = new String[2];
FileInputStream fis = null;
BufferedReader br = null;
try {
fis = new FileInputStream(zkCfgFile);
br = new BufferedReader(new InputStreamReader(fis));
String line;
while ((line = br.readLine()) != null) {
String key = "dataDir=";
if (line.startsWith(key)) {
zkDirs[1] = zkDirs[0] = line.substring(key.length()) + "/version-2";
}
key = "dataLogDir=";
if (line.startsWith(key)) {
zkDirs[0] = line.substring(key.length()) + "/version-2";
}
}
} catch (Exception e) {
LOG.error("exception in read file: " + zkCfgFile, e);
} finally {
try {
if (br != null) {
br.close();
}
if (fis != null) {
fis.close();
}
} catch (Exception e) {
LOG.error("exception in closing file: " + zkCfgFile, e);
}
}
return zkDirs;
}
// debug
static void printFiles(File[] files) {
System.out.println("START print");
for (int i = 0; i < files.length; i++) {
File file = files[i];
System.out.println(file.getName() + ", " + file.lastModified());
}
System.out.println("END print");
}
/**
* get files under dir in order of last modified time
* @param dir
* @param pattern
* @return
*/
static File[] getSortedFiles(String dirPath, final String pattern) {
File dir = new File(dirPath);
File[] files = dir.listFiles(new FileFilter() {
@Override
public boolean accept(File file) {
return file.isFile() && (file.getName().indexOf(pattern) != -1);
}
});
Arrays.sort(files, new Comparator<File>() {
@Override
public int compare(File o1, File o2) {
int sign = (int) Math.signum(o1.lastModified() - o2.lastModified());
return sign;
}
});
return files;
}
/**
* get value for an attribute in a parsed zk log; e.g.
* "time:1384984016778 session:0x14257d1d17e0004 cxid:0x5 zxid:0x46899 type:error err:-101"
* given "time" return "1384984016778"
* @param line
* @param attribute
* @return value
*/
static String getAttributeValue(String line, String attribute) {
if (line == null) {
return null;
}
if (!attribute.endsWith(":")) {
attribute = attribute + ":";
}
String[] parts = line.split("\\s");
if (parts != null && parts.length > 0) {
for (int i = 0; i < parts.length; i++) {
if (parts[i].startsWith(attribute)) {
String val = parts[i].substring(attribute.length());
return val;
}
}
}
return null;
}
static long getTimestamp(String line) {
String timestamp = getAttributeValue(line, "time");
return Long.parseLong(timestamp);
}
/**
* parse a time string either in timestamp form or "yyMMdd_hhmmss_SSS" form
* @param time
* @return timestamp or -1 on error
*/
static long parseTimeString(String time) {
try {
return Long.parseLong(time);
} catch (NumberFormatException e) {
try {
SimpleDateFormat formatter = new SimpleDateFormat("yyMMdd_hhmmss_SSS");
Date date = formatter.parse(time);
return date.getTime();
} catch (java.text.ParseException ex) {
LOG.error("fail to parse time string: " + time, e);
}
}
return -1;
}
public static void grepZkLog(File zkLog, long start, long end, String... patterns) {
FileInputStream fis = null;
BufferedReader br = null;
try {
fis = new FileInputStream(zkLog);
br = new BufferedReader(new InputStreamReader(fis));
String line;
while ((line = br.readLine()) != null) {
try {
long timestamp = getTimestamp(line);
if (timestamp > end) {
break;
}
if (timestamp < start) {
continue;
}
boolean match = true;
for (String pattern : patterns) {
if (line.indexOf(pattern) == -1) {
match = false;
break;
}
}
if (match) {
System.out.println(line);
}
} catch (NumberFormatException e) {
// ignore
}
}
} catch (Exception e) {
LOG.error("exception in grep zk-log: " + zkLog, e);
} finally {
try {
if (br != null) {
br.close();
}
if (fis != null) {
fis.close();
}
} catch (Exception e) {
LOG.error("exception in closing zk-log: " + zkLog, e);
}
}
}
public static void grepZkLogDir(List<File> parsedZkLogs, long start, long end, String... patterns) {
for (File file : parsedZkLogs) {
grepZkLog(file, start, end, patterns);
}
}
public static void grepZkSnapshot(File zkSnapshot, String... patterns) {
FileInputStream fis = null;
BufferedReader br = null;
try {
fis = new FileInputStream(zkSnapshot);
br = new BufferedReader(new InputStreamReader(fis));
String line;
while ((line = br.readLine()) != null) {
try {
boolean match = true;
for (String pattern : patterns) {
if (line.indexOf(pattern) == -1) {
match = false;
break;
}
}
if (match) {
System.out.println(line);
}
} catch (NumberFormatException e) {
// ignore
}
}
} catch (Exception e) {
LOG.error("exception in grep zk-snapshot: " + zkSnapshot, e);
} finally {
try {
if (br != null) {
br.close();
}
if (fis != null) {
fis.close();
}
} catch (Exception e) {
LOG.error("exception in closing zk-snapshot: " + zkSnapshot, e);
}
}
}
/**
* guess zoo.cfg dir
* @return absolute path to zoo.cfg
*/
static String guessZkCfgDir() {
// TODO impl this
return null;
}
public static void printUsage(Options cliOptions) {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.setWidth(1000);
helpFormatter.printHelp("java " + ZkGrep.class.getName(), cliOptions);
}
/**
* parse zk-transaction-logs between start and end, if not already parsed
* @param zkLogDir
* @param start
* @param end
* @return list of parsed zklogs between start and end, in order of last modified timestamp
*/
static List<File> parseZkLogs(String zkLogDir, long start, long end) {
File zkParsedDir = new File(String.format("%s/zklog-parsed", System.getProperty("user.home")));
File[] zkLogs = getSortedFiles(zkLogDir, log);
// printFiles(zkDataFiles);
List<File> parsedZkLogs = new ArrayList<File>();
boolean stop = false;
for (File zkLog : zkLogs) {
if (stop) {
break;
}
if (zkLog.lastModified() < start) {
continue;
}
if (zkLog.lastModified() > end) {
stop = true;
}
try {
File parsedZkLog = new File(zkParsedDir, stripGzSuffix(zkLog.getName()) + ".parsed");
if (!parsedZkLog.exists() || parsedZkLog.lastModified() <= zkLog.lastModified()) {
if (zkLog.getName().endsWith(gzSuffix)) {
// copy and gunzip it
FileUtils.copyFileToDirectory(zkLog, zkParsedDir);
File zkLogGz = new File(zkParsedDir, zkLog.getName());
File tmpZkLog = gunzip(zkLogGz);
// parse gunzip file
ZKLogFormatter
.main(new String[] { log, tmpZkLog.getAbsolutePath(), parsedZkLog.getAbsolutePath()
});
// delete it
zkLogGz.delete();
tmpZkLog.delete();
} else {
// parse it directly
ZKLogFormatter.main(new String[] {
log, zkLog.getAbsolutePath(), parsedZkLog.getAbsolutePath()
});
}
}
parsedZkLogs.add(parsedZkLog);
} catch (Exception e) {
LOG.error("fail to parse zkLog: " + zkLog, e);
}
}
return parsedZkLogs;
}
/**
* Strip off a .gz suffix if any
* @param filename
* @return
*/
static String stripGzSuffix(String filename) {
if (filename.endsWith(gzSuffix)) {
return filename.substring(0, filename.length() - gzSuffix.length());
}
return filename;
}
/**
* Gunzip a file
* @param zipFile
* @return
*/
static File gunzip(File zipFile) {
File outputFile = new File(stripGzSuffix(zipFile.getAbsolutePath()));
byte[] buffer = new byte[1024];
try {
GZIPInputStream gzis = new GZIPInputStream(new FileInputStream(zipFile));
FileOutputStream out = new FileOutputStream(outputFile);
int len;
while ((len = gzis.read(buffer)) > 0) {
out.write(buffer, 0, len);
}
gzis.close();
out.close();
return outputFile;
} catch (IOException e) {
LOG.error("fail to gunzip file: " + zipFile, e);
}
return null;
}
/**
* parse the last zk-snapshots by by-time, if not already parsed
* @param zkSnapshotDir
* @param byTime
* @return File array which the first element is the last zk-snapshot by by-time and the second
* element is its parsed file
*/
static File[] parseZkSnapshot(String zkSnapshotDir, long byTime) {
File[] retFiles = new File[2];
File zkParsedDir = new File(String.format("%s/zklog-parsed", System.getProperty("user.home")));
File[] zkSnapshots = getSortedFiles(zkSnapshotDir, snapshot);
// printFiles(zkDataFiles);
File lastZkSnapshot = null;
for (int i = 0; i < zkSnapshots.length; i++) {
File zkSnapshot = zkSnapshots[i];
if (zkSnapshot.lastModified() >= byTime) {
break;
}
lastZkSnapshot = zkSnapshot;
retFiles[0] = lastZkSnapshot;
}
try {
File parsedZkSnapshot =
new File(zkParsedDir, stripGzSuffix(lastZkSnapshot.getName()) + ".parsed");
if (!parsedZkSnapshot.exists()
|| parsedZkSnapshot.lastModified() <= lastZkSnapshot.lastModified()) {
if (lastZkSnapshot.getName().endsWith(gzSuffix)) {
// copy and gunzip it
FileUtils.copyFileToDirectory(lastZkSnapshot, zkParsedDir);
File lastZkSnapshotGz = new File(zkParsedDir, lastZkSnapshot.getName());
File tmpLastZkSnapshot = gunzip(lastZkSnapshotGz);
// parse gunzip file
ZKLogFormatter.main(new String[] {
snapshot, tmpLastZkSnapshot.getAbsolutePath(), parsedZkSnapshot.getAbsolutePath()
});
// delete it
lastZkSnapshotGz.delete();
tmpLastZkSnapshot.delete();
} else {
// parse it directly
ZKLogFormatter.main(new String[] {
snapshot, lastZkSnapshot.getAbsolutePath(), parsedZkSnapshot.getAbsolutePath()
});
}
}
retFiles[1] = parsedZkSnapshot;
return retFiles;
} catch (Exception e) {
LOG.error("fail to parse zkSnapshot: " + lastZkSnapshot, e);
}
return null;
}
public static void processCommandLineArgs(String[] cliArgs) {
CommandLineParser cliParser = new GnuParser();
Options cliOptions = constructCommandLineOptions();
CommandLine cmd = null;
try {
cmd = cliParser.parse(cliOptions, cliArgs);
} catch (ParseException pe) {
System.err.println("CommandLineClient: failed to parse command-line options: " + pe);
printUsage(cliOptions);
System.exit(1);
}
String zkCfgDirValue = null;
String zkCfgFile = null;
if (cmd.hasOption(zkCfg)) {
zkCfgDirValue = cmd.getOptionValue(zkCfg);
}
if (zkCfgDirValue == null) {
zkCfgDirValue = guessZkCfgDir();
}
if (zkCfgDirValue == null) {
LOG.error("couldn't figure out path to zkCfg file");
System.exit(1);
}
// get zoo.cfg path from cfg-dir
zkCfgFile = zkCfgDirValue;
if (!zkCfgFile.endsWith(".cfg")) {
// append with default zoo.cfg
zkCfgFile = zkCfgFile + "/zoo.cfg";
}
if (!new File(zkCfgFile).exists()) {
LOG.error("zoo.cfg file doen't exist: " + zkCfgFile);
System.exit(1);
}
String[] patterns = cmd.getOptionValues(pattern);
String[] zkDataDirs = getZkDataDirs(zkCfgFile);
// parse zk data files
if (zkDataDirs == null || zkDataDirs[0] == null || zkDataDirs[1] == null) {
LOG.error("invalid zkCfgDir: " + zkCfgDirValue);
System.exit(1);
}
File zkParsedDir = new File(String.format("%s/zklog-parsed", System.getProperty("user.home")));
if (!zkParsedDir.exists()) {
LOG.info("creating zklog-parsed dir: " + zkParsedDir.getAbsolutePath());
zkParsedDir.mkdirs();
}
if (cmd.hasOption(between)) {
String[] timeStrings = cmd.getOptionValues(between);
long startTime = parseTimeString(timeStrings[0]);
if (startTime == -1) {
LOG.error("invalid start time string: " + timeStrings[0]
+ ", should be either timestamp or yyMMdd_hhmmss_SSS");
System.exit(1);
}
long endTime = parseTimeString(timeStrings[1]);
if (endTime == -1) {
LOG.error("invalid end time string: " + timeStrings[1]
+ ", should be either timestamp or yyMMdd_hhmmss_SSS");
System.exit(1);
}
if (startTime > endTime) {
LOG.warn("empty window: " + startTime + " - " + endTime);
System.exit(1);
}
// zkDataDirs[0] is the transaction log dir
List<File> parsedZkLogs = parseZkLogs(zkDataDirs[0], startTime, endTime);
grepZkLogDir(parsedZkLogs, startTime, endTime, patterns);
} else if (cmd.hasOption(by)) {
String timeString = cmd.getOptionValue(by);
long byTime = parseTimeString(timeString);
if (byTime == -1) {
LOG.error("invalid by time string: " + timeString
+ ", should be either timestamp or yyMMdd_hhmmss_SSS");
System.exit(1);
}
// zkDataDirs[1] is the snapshot dir
File[] lastZkSnapshot = parseZkSnapshot(zkDataDirs[1], byTime);
// lastZkSnapshot[1] is the parsed last snapshot by byTime
grepZkSnapshot(lastZkSnapshot[1], patterns);
// need to grep transaction logs between last-modified-time of snapshot and byTime also
// lastZkSnapshot[0] is the last snapshot by byTime
long startTime = lastZkSnapshot[0].lastModified();
// zkDataDirs[0] is the transaction log dir
List<File> parsedZkLogs = parseZkLogs(zkDataDirs[0], startTime, byTime);
grepZkLogDir(parsedZkLogs, startTime, byTime, patterns);
}
}
public static void main(String[] args) {
processCommandLineArgs(args);
}
}
| 9,973 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/commandtools/IntegrationTestUtil.java | package org.apache.helix.tools.commandtools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.OptionGroup;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.helix.PropertyKey;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.manager.zk.ZNRecordSerializer;
import org.apache.helix.zookeeper.impl.factory.DedicatedZkClientFactory;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.tools.ClusterExternalViewVerifier;
import org.apache.helix.tools.ClusterVerifiers.BestPossibleExternalViewVerifier;
import org.apache.helix.tools.ClusterVerifiers.ClusterLiveNodesVerifier;
import org.apache.helix.tools.ClusterVerifiers.ZkHelixClusterVerifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* collection of test utilities for integration tests
*/
public class IntegrationTestUtil {
private static Logger LOG = LoggerFactory.getLogger(IntegrationTestUtil.class);
public static final long DEFAULT_TIMEOUT = 30 * 1000; // in milliseconds
public static final String help = "help";
public static final String zkSvr = "zkSvr";
public static final String timeout = "timeout";
public static final String verifyExternalView = "verifyExternalView";
public static final String verifyLiveNodes = "verifyLiveNodes";
public static final String readZNode = "readZNode";
public static final String readLeader = "readLeader";
public static final String verifyClusterState = "verifyClusterState";
final HelixZkClient _zkclient;
final ZNRecordSerializer _serializer;
final long _timeoutValue;
public IntegrationTestUtil(HelixZkClient zkclient, long timeoutValue) {
_zkclient = zkclient;
_timeoutValue = timeoutValue;
_serializer = new ZNRecordSerializer();
}
public void verifyExternalView(String[] args) {
if (args == null || args.length == 0) {
System.err.println("Illegal arguments for " + verifyExternalView);
return;
}
String clusterName = args[0];
List<String> liveNodes = new ArrayList<String>();
for (int i = 1; i < args.length; i++) {
liveNodes.add(args[i]);
}
ClusterExternalViewVerifier verifier =
new ClusterExternalViewVerifier(_zkclient, clusterName, liveNodes);
boolean success = verifier.verifyByPolling(_timeoutValue);
System.out.println(success ? "Successful" : "Failed");
if (!success) {
System.exit(1);
}
}
public void verifyClusterState(String[] args) {
if (args == null || args.length == 0) {
System.err.println("Illegal arguments for " + verifyExternalView);
return;
}
String clusterName = args[0];
ZkHelixClusterVerifier clusterVerifier =
new BestPossibleExternalViewVerifier.Builder(clusterName).setZkClient(_zkclient).build();
boolean success = clusterVerifier.verify(_timeoutValue);
System.out.println(success ? "Successful" : "Failed");
if (!success) {
System.exit(1);
}
}
public void verifyLiveNodes(String[] args) {
if (args == null || args.length == 0) {
System.err.println("Illegal arguments for " + verifyLiveNodes);
return;
}
String clusterName = args[0];
List<String> liveNodes = new ArrayList<String>();
for (int i = 1; i < args.length; i++) {
liveNodes.add(args[i]);
}
ClusterLiveNodesVerifier verifier =
new ClusterLiveNodesVerifier(_zkclient, clusterName, liveNodes);
boolean success = verifier.verify(_timeoutValue);
System.out.println(success ? "Successful" : "Failed");
if (!success) {
System.exit(1);
}
}
public void readZNode(String path) {
ZNRecord record = _zkclient.readData(path, true);
if (record == null) {
System.out.println("null");
} else {
System.out.println(new String(_serializer.serialize(record)));
}
}
@SuppressWarnings("static-access")
static Options constructCommandLineOptions() {
Option helpOption =
OptionBuilder.withLongOpt(help).withDescription("Prints command-line options information")
.create();
Option zkSvrOption =
OptionBuilder.hasArgs(1).isRequired(true).withArgName("zookeeperAddress")
.withLongOpt(zkSvr).withDescription("Provide zookeeper-address").create();
Option timeoutOption =
OptionBuilder.hasArgs(1).isRequired(true).withArgName("timeout")
.withLongOpt(timeout).withDescription("Provide timeout (in ms)").create();
Option verifyExternalViewOption =
OptionBuilder.hasArgs().isRequired(false).withArgName("clusterName node1 node2..")
.withLongOpt(verifyExternalView).withDescription("Verify external-view").create();
Option verifyClusterStateOption =
OptionBuilder.hasArgs().isRequired(false).withArgName("clusterName")
.withLongOpt(verifyClusterState).withDescription("Verify Bestpossible ClusterState").create();
Option verifyLiveNodesOption =
OptionBuilder.hasArg().isRequired(false).withArgName("clusterName node1, node2..")
.withLongOpt(verifyLiveNodes).withDescription("Verify live-nodes").create();
Option readZNodeOption =
OptionBuilder.hasArgs(1).isRequired(false).withArgName("zkPath").withLongOpt(readZNode)
.withDescription("Read znode").create();
Option readLeaderOption =
OptionBuilder.hasArgs(1).isRequired(false).withArgName("clusterName")
.withLongOpt(readLeader).withDescription("Read cluster controller").create();
OptionGroup optGroup = new OptionGroup();
optGroup.setRequired(true);
optGroup.addOption(verifyExternalViewOption);
optGroup.addOption(verifyClusterStateOption);
optGroup.addOption(verifyLiveNodesOption);
optGroup.addOption(readZNodeOption);
optGroup.addOption(readLeaderOption);
Options options = new Options();
options.addOption(helpOption);
options.addOption(zkSvrOption);
options.addOption(timeoutOption);
options.addOptionGroup(optGroup);
return options;
}
static void printUsage(Options cliOptions) {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.setWidth(1000);
helpFormatter.printHelp("java " + ClusterExternalViewVerifier.class.getName(), cliOptions);
}
static void processCommandLineArgs(String[] cliArgs) {
CommandLineParser cliParser = new GnuParser();
Options cliOptions = constructCommandLineOptions();
CommandLine cmd = null;
try {
cmd = cliParser.parse(cliOptions, cliArgs);
} catch (ParseException pe) {
System.err.println("failed to parse command-line args: " + Arrays.asList(cliArgs)
+ ", exception: " + pe.toString());
printUsage(cliOptions);
System.exit(1);
}
HelixZkClient.ZkClientConfig clientConfig = new HelixZkClient.ZkClientConfig();
clientConfig.setZkSerializer(new ZNRecordSerializer());
HelixZkClient zkClient = DedicatedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(cmd.getOptionValue(zkSvr)), clientConfig);
long timeoutValue = DEFAULT_TIMEOUT;
if (cmd.hasOption(timeout)) {
String timeoutStr = cmd.getOptionValue(timeout);
try {
timeoutValue = Long.valueOf(timeoutStr);
} catch (NumberFormatException ex) {
System.err.println(
"Invalid timeout value " + timeoutStr + ". Using default value: " + timeoutValue);
}
}
IntegrationTestUtil util = new IntegrationTestUtil(zkClient, timeoutValue);
if (cmd != null) {
if (cmd.hasOption(verifyExternalView)) {
String[] args = cmd.getOptionValues(verifyExternalView);
util.verifyExternalView(args);
} else if (cmd.hasOption(verifyClusterState)) {
String[] args = cmd.getOptionValues(verifyClusterState);
util.verifyClusterState(args);
} else if (cmd.hasOption(verifyLiveNodes)) {
String[] args = cmd.getOptionValues(verifyLiveNodes);
util.verifyLiveNodes(args);
} else if (cmd.hasOption(readZNode)) {
String path = cmd.getOptionValue(readZNode);
util.readZNode(path);
} else if (cmd.hasOption(readLeader)) {
String clusterName = cmd.getOptionValue(readLeader);
PropertyKey.Builder keyBuilder = new PropertyKey.Builder(clusterName);
util.readZNode(keyBuilder.controllerLeader().getPath());
} else {
printUsage(cliOptions);
}
}
}
public static void main(String[] args) {
processCommandLineArgs(args);
}
}
| 9,974 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/commandtools/ZKDumper.java | package org.apache.helix.tools.commandtools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FilenameFilter;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.List;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.OptionGroup;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.PosixParser;
import org.apache.helix.zookeeper.datamodel.serializer.ByteArraySerializer;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.impl.factory.SharedZkClientFactory;
import org.apache.helix.zookeeper.zkclient.serialize.ZkSerializer;
/**
* Dumps the Zookeeper file structure on to Disk
*/
@SuppressWarnings("static-access")
public class ZKDumper {
private HelixZkClient client;
private FilenameFilter filter;
static Options options;
private String suffix = "";
// enable by default
private boolean removeSuffix = false;
public String getSuffix() {
return suffix;
}
public void setSuffix(String suffix) {
this.suffix = suffix;
}
public boolean isRemoveSuffix() {
return removeSuffix;
}
public void setRemoveSuffix(boolean removeSuffix) {
this.removeSuffix = removeSuffix;
}
static {
options = new Options();
OptionGroup optionGroup = new OptionGroup();
Option d =
OptionBuilder.withLongOpt("download").withDescription("Download from ZK to File System")
.create();
d.setArgs(0);
Option dSuffix =
OptionBuilder.withLongOpt("addSuffix")
.withDescription("add suffix to every file downloaded from ZK").create();
dSuffix.setArgs(1);
dSuffix.setRequired(false);
Option u =
OptionBuilder.withLongOpt("upload").withDescription("Upload from File System to ZK")
.create();
u.setArgs(0);
Option uSuffix =
OptionBuilder.withLongOpt("removeSuffix")
.withDescription("remove suffix from every file uploaded to ZK").create();
uSuffix.setArgs(0);
uSuffix.setRequired(false);
Option del =
OptionBuilder.withLongOpt("delete").withDescription("Delete given path from ZK").create();
optionGroup.setRequired(true);
optionGroup.addOption(del);
optionGroup.addOption(u);
optionGroup.addOption(d);
options.addOptionGroup(optionGroup);
options.addOption("zkSvr", true, "Zookeeper address");
options.addOption("zkpath", true, "Zookeeper path");
options.addOption("fspath", true, "Path on local Filesystem to dump");
options.addOption("h", "help", false, "Print this usage information");
options.addOption("v", "verbose", false, "Print out VERBOSE information");
options.addOption(dSuffix);
options.addOption(uSuffix);
}
public ZKDumper(String zkAddress) {
client = SharedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(zkAddress));
ZkSerializer zkSerializer = new ByteArraySerializer();
client.setZkSerializer(zkSerializer);
filter = new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return !name.startsWith(".");
}
};
}
public static void main(String[] args) throws Exception {
if (args == null || args.length == 0) {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.printHelp("java " + ZKDumper.class.getName(), options);
System.exit(1);
}
CommandLineParser parser = new PosixParser();
CommandLine cmd = parser.parse(options, args);
cmd.hasOption("zkSvr");
boolean download = cmd.hasOption("download");
boolean upload = cmd.hasOption("upload");
boolean del = cmd.hasOption("delete");
String zkAddress = cmd.getOptionValue("zkSvr");
String zkPath = cmd.getOptionValue("zkpath");
String fsPath = cmd.getOptionValue("fspath");
ZKDumper zkDump = new ZKDumper(zkAddress);
try {
if (download) {
if (cmd.hasOption("addSuffix")) {
zkDump.suffix = cmd.getOptionValue("addSuffix");
}
zkDump.download(zkPath, fsPath + zkPath);
}
if (upload) {
if (cmd.hasOption("removeSuffix")) {
zkDump.removeSuffix = true;
}
zkDump.upload(zkPath, fsPath);
}
if (del) {
zkDump.delete(zkPath);
}
} finally {
zkDump.close();
}
}
private void delete(String zkPath) {
client.deleteRecursively(zkPath);
}
public void upload(String zkPath, String fsPath) throws Exception {
File file = new File(fsPath);
System.out.println("Uploading " + file.getCanonicalPath() + " to " + zkPath);
zkPath = zkPath.replaceAll("[/]+", "/");
int index = -1;
if (removeSuffix && (index = file.getName().indexOf(".")) > -1) {
zkPath = zkPath.replaceAll(file.getName().substring(index), "");
}
if (file.isDirectory()) {
File[] children = file.listFiles(filter);
client.createPersistent(zkPath, true);
if (children != null && children.length > 0) {
for (File child : children) {
upload(zkPath + "/" + child.getName(), fsPath + "/" + child.getName());
}
} else {
}
} else {
byte[] result = new byte[(int) file.length()];
InputStream input = null;
try {
int totalBytesRead = 0;
input = new BufferedInputStream(new FileInputStream(file));
while (totalBytesRead < result.length) {
int bytesRemaining = result.length - totalBytesRead;
// input.read() returns -1, 0, or more :
int bytesRead = input.read(result, totalBytesRead, bytesRemaining);
if (bytesRead > 0) {
totalBytesRead = totalBytesRead + bytesRead;
}
}
/*
* the above style is a bit tricky: it places bytes into the 'result'
* array; 'result' is an output parameter; the while loop usually has a
* single iteration only.
*/
client.createPersistent(zkPath, result);
} finally {
input.close();
}
}
}
public void download(String zkPath, String fsPath) throws Exception {
List<String> children = client.getChildren(zkPath);
if (children != null && children.size() > 0) {
new File(fsPath).mkdirs();
for (String child : children) {
String childPath = zkPath.equals("/") ? "/" + child : zkPath + "/" + child;
download(childPath, fsPath + "/" + child);
}
} else {
System.out
.println("Saving " + zkPath + " to " + new File(fsPath + suffix).getCanonicalPath());
OutputStream out = new FileOutputStream(fsPath + suffix);
Object readData = client.readData(zkPath);
if (readData != null) {
out.write((byte[]) readData);
}
out.close();
}
}
public void close() {
client.close();
}
}
| 9,975 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/commandtools/CurrentStateCleanUp.java | package org.apache.helix.tools.commandtools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.OptionGroup;
import org.apache.commons.cli.Options;
import org.apache.helix.AccessOption;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixDefinedState;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.PropertyKey;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.model.CurrentState;
import org.apache.helix.zookeeper.zkclient.DataUpdater;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class CurrentStateCleanUp {
private static final Logger LOG = LoggerFactory.getLogger(CurrentStateCleanUp.class);
public static final String zkServer = "zkSvr";
public static final String cluster = "cluster";
public static final String instance = "instance";
public static final String session = "session";
public static final String help = "help";
private static Options parseCommandLineOptions() {
Option helpOption =
OptionBuilder.withLongOpt(help).withDescription("Prints command-line options info")
.create();
Option zkServerOption =
OptionBuilder.withLongOpt(zkServer).withDescription("Provide zookeeper address").create();
zkServerOption.setArgs(1);
zkServerOption.setRequired(true);
zkServerOption.setArgName("ZookeeperServerAddress(Required)");
Option clusterOption =
OptionBuilder.withLongOpt(cluster).withDescription("Provide cluster name").create();
clusterOption.setArgs(1);
clusterOption.setRequired(true);
clusterOption.setArgName("Cluster name (Required)");
Option instanceOption = OptionBuilder.withLongOpt(instance)
.withDescription("Provide instance name").create();
instanceOption.setArgs(1);
instanceOption.setRequired(true);
instanceOption.setArgName("Instance name");
Option sessionOption = OptionBuilder.withLongOpt(session)
.withDescription("Provide instance session").create();
sessionOption.setArgs(1);
sessionOption.setRequired(true);
sessionOption.setArgName("Session name");
OptionGroup optionGroup = new OptionGroup();
optionGroup.addOption(zkServerOption);
Options options = new Options();
options.addOption(helpOption);
options.addOption(clusterOption);
options.addOption(instanceOption);
options.addOption(sessionOption);
options.addOptionGroup(optionGroup);
return options;
}
public static void cleanupCurrentStatesForCluster(String zkConnectString, String clusterName,
String instanceName, String session) throws Exception {
HelixManager manager = HelixManagerFactory
.getZKHelixManager(clusterName, "Administrator", InstanceType.ADMINISTRATOR,
zkConnectString);
manager.connect();
try {
HelixDataAccessor accessor = manager.getHelixDataAccessor();
DataUpdater<ZNRecord> updater = new DataUpdater<ZNRecord>() {
@Override
public ZNRecord update(ZNRecord currentData) {
if (currentData == null) {
return null;
}
Set<String> partitionToRemove = new HashSet<>();
for (String partition : currentData.getMapFields().keySet()) {
if (currentData.getMapField(partition).get("CURRENT_STATE")
.equals(HelixDefinedState.DROPPED.name())) {
partitionToRemove.add(partition);
}
}
currentData.getMapFields().keySet().removeAll(partitionToRemove);
return currentData;
}
};
LOG.info(String.format("Processing cleaning current state for instance: %s", instanceName));
List<String> currentStateNames =
accessor.getChildNames(accessor.keyBuilder().currentStates(instanceName, session));
List<String> taskCurrentStateNames =
accessor.getChildNames(accessor.keyBuilder().taskCurrentStates(instanceName, session));
List<PropertyKey> allCurrentStateKeys = new ArrayList<>();
currentStateNames.stream()
.map(name -> accessor.keyBuilder().currentState(instanceName, session, name))
.forEach(allCurrentStateKeys::add);
taskCurrentStateNames.stream()
.map(name -> accessor.keyBuilder().taskCurrentState(instanceName, session, name))
.forEach(allCurrentStateKeys::add);
List<String> pathsToRemove = new ArrayList<>();
for (PropertyKey key : allCurrentStateKeys) {
accessor.getBaseDataAccessor().update(key.getPath(), updater, AccessOption.PERSISTENT);
CurrentState currentState = accessor.getProperty(key);
if (currentState.getPartitionStateMap().size() == 0) {
pathsToRemove.add(key.getPath());
LOG.info(String.format("Remove current state for path %s", key.getPath()));
}
}
accessor.getBaseDataAccessor().remove(pathsToRemove, AccessOption.PERSISTENT);
} catch (Exception e) {
e.printStackTrace();
} finally {
manager.disconnect();
}
}
public static void printUsage(Options cliOptions) {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.setWidth(1000);
helpFormatter.printHelp("java " + CurrentStateCleanUp.class.getName(), cliOptions);
}
public static void main(String[] args) throws Exception {
CommandLine cmd = ToolsUtil.processCommandLineArgs(args, parseCommandLineOptions());
String zkConnectString = cmd.getOptionValue(zkServer);
String clusterName = cmd.getOptionValue(cluster);
String instanceName = cmd.getOptionValue(instance);
String sessionId = cmd.getOptionValue(session);
LOG.info(String
.format("Starting cleaning current state with ZK: %s, cluster: %s", zkConnectString,
clusterName));
cleanupCurrentStatesForCluster(zkConnectString, clusterName, instanceName, sessionId);
}
}
| 9,976 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/commandtools/ZkCopy.java | package org.apache.helix.tools.commandtools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.helix.AccessOption;
import org.apache.helix.BaseDataAccessor;
import org.apache.helix.zookeeper.datamodel.serializer.ByteArraySerializer;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.zookeeper.api.client.HelixZkClient;
import org.apache.helix.zookeeper.impl.factory.SharedZkClientFactory;
import org.apache.zookeeper.common.PathUtils;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Tool for copying a zk/file path to another zk/file path
*/
public class ZkCopy {
enum ZkCopyScheme {
zk
}
private static Logger logger = LoggerFactory.getLogger(ZkCopy.class);
private static final String src = "src";
private static final String dst = "dst";
@SuppressWarnings("static-access")
private static Options constructCmdLineOpt() {
Option srcOpt =
OptionBuilder.withLongOpt(src).hasArgs(1).isRequired(true)
.withArgName("source-URI (e.g. zk://localhost:2181/src-path")
.withDescription("Provide source URI").create();
Option dstOpt =
OptionBuilder.withLongOpt(dst).hasArgs(1).isRequired(true)
.withArgName("destination-URI (e.g. zk://localhost:2181/dst-path")
.withDescription("Provide destination URI").create();
Options options = new Options();
options.addOption(srcOpt);
options.addOption(dstOpt);
return options;
}
private static void printUsage(Options cliOptions) {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.setWidth(1000);
helpFormatter.printHelp("java " + ZkCopy.class.getName(), cliOptions);
}
private static String concatenate(String path, String suffix) {
if (suffix == null || suffix.isEmpty()) {
return path;
}
if (path.endsWith("/") || suffix.startsWith("/")) {
return path + suffix;
} else {
return path + "/" + suffix;
}
}
/**
* Copy a list of paths from src to dst
* @param srcClient
* @param srcRootPath
* @param dstClient
* @param dstRootPath
* @param paths
*/
private static void copy(HelixZkClient srcClient, String srcRootPath, HelixZkClient dstClient,
String dstRootPath, List<String> paths) {
BaseDataAccessor<Object> srcAccessor = new ZkBaseDataAccessor<Object>(srcClient);
List<String> readPaths = new ArrayList<String>();
for (String path : paths) {
readPaths.add(concatenate(srcRootPath, path));
}
List<Stat> stats = new ArrayList<Stat>();
List<Object> readData = srcAccessor.get(readPaths, stats, 0, true);
List<String> writePaths = new ArrayList<String>();
List<Object> writeData = new ArrayList<Object>();
for (int i = 0; i < paths.size(); i++) {
if (stats.get(i).getEphemeralOwner() != 0) {
logger.warn("Skip copying ephemeral znode: " + readPaths.get(i));
continue;
}
writePaths.add(concatenate(dstRootPath, paths.get(i)));
writeData.add(readData.get(i));
}
if (writePaths.size() > 0) {
BaseDataAccessor<Object> dstAccessor = new ZkBaseDataAccessor<Object>(dstClient);
boolean[] success =
dstAccessor.createChildren(writePaths, writeData, AccessOption.PERSISTENT);
List<String> successPaths = new ArrayList<String>();
List<String> failPaths = new ArrayList<String>();
for (int i = 0; i < success.length; i++) {
if (success[i]) {
successPaths.add(writePaths.get(i));
} else {
failPaths.add(writePaths.get(i));
}
}
// Print
if (!successPaths.isEmpty()) {
System.out.println("Copy " + successPaths);
}
if (!failPaths.isEmpty()) {
System.out.println("Skip " + failPaths);
}
}
}
private static void zkCopy(HelixZkClient srcClient, String srcRootPath, HelixZkClient dstClient,
String dstRootPath) {
// Strip off tailing "/"
if (!srcRootPath.equals("/") && srcRootPath.endsWith("/")) {
srcRootPath = srcRootPath.substring(0, srcRootPath.length() - 1);
}
if (!dstRootPath.equals("/") && dstRootPath.endsWith("/")) {
dstRootPath = dstRootPath.substring(0, dstRootPath.length() - 1);
}
// Validate paths
PathUtils.validatePath(srcRootPath);
PathUtils.validatePath(dstRootPath);
if (srcRootPath.equals(dstRootPath)) {
logger.info("srcPath == dstPath. Skip copying");
return;
}
if (srcRootPath.startsWith(dstRootPath) || dstRootPath.startsWith(srcRootPath)) {
throw new IllegalArgumentException(
"srcPath/dstPath can't be prefix of dstPath/srcPath, was srcPath: " + srcRootPath
+ ", dstPath: " + dstRootPath);
}
// Recursive copy using BFS
List<String> queue = new LinkedList<String>();
String root = "";
copy(srcClient, srcRootPath, dstClient, dstRootPath, Arrays.asList(root));
queue.add(root);
while (!queue.isEmpty()) {
String path = queue.remove(0);
String fromPath = concatenate(srcRootPath, path);
List<String> children = srcClient.getChildren(fromPath);
List<String> paths = new ArrayList<String>();
if (children != null && children.size() > 0) {
for (String child : children) {
String childPath = concatenate(path, child);
paths.add(childPath);
}
copy(srcClient, srcRootPath, dstClient, dstRootPath, paths);
queue.addAll(paths);
}
}
}
public static void main(String[] args) throws Exception {
CommandLineParser cliParser = new GnuParser();
Options cliOptions = constructCmdLineOpt();
CommandLine cmd = null;
try {
cmd = cliParser.parse(cliOptions, args);
} catch (ParseException pe) {
System.err.println("CommandLineClient: failed to parse command-line options: "
+ pe.toString());
printUsage(cliOptions);
System.exit(1);
}
URI srcUri = new URI(cmd.getOptionValue(src));
URI dstUri = new URI(cmd.getOptionValue(dst));
ZkCopyScheme srcScheme = ZkCopyScheme.valueOf(srcUri.getScheme());
ZkCopyScheme dstScheme = ZkCopyScheme.valueOf(dstUri.getScheme());
if (srcScheme == ZkCopyScheme.zk && dstScheme == ZkCopyScheme.zk) {
String srcZkAddr = srcUri.getAuthority();
String dstZkAddr = dstUri.getAuthority();
HelixZkClient.ZkClientConfig clientConfig = new HelixZkClient.ZkClientConfig();
HelixZkClient srcClient = null;
HelixZkClient dstClient = null;
try {
if (srcZkAddr.equals(dstZkAddr)) {
clientConfig.setZkSerializer(new ByteArraySerializer());
srcClient = dstClient = SharedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(srcZkAddr), clientConfig);
} else {
clientConfig.setZkSerializer(new ByteArraySerializer());
srcClient = SharedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(srcZkAddr), clientConfig);
clientConfig.setZkSerializer(new ByteArraySerializer());
dstClient = SharedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(dstZkAddr), clientConfig);
}
String srcPath = srcUri.getPath();
String dstPath = dstUri.getPath();
zkCopy(srcClient, srcPath, dstClient, dstPath);
} finally {
if (srcClient != null) {
srcClient.close();
}
if (dstClient != null) {
dstClient.close();
}
}
} else {
System.err.println("Unsupported scheme. srcScheme: " + srcScheme + ", dstScheme: " + dstScheme);
System.exit(1);
}
}
}
| 9,977 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/commandtools/JmxDumper.java | package org.apache.helix.tools.commandtools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.FileWriter;
import java.io.PrintWriter;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.ConcurrentHashMap;
import javax.management.MBeanAttributeInfo;
import javax.management.MBeanInfo;
import javax.management.MBeanOperationInfo;
import javax.management.MBeanServerConnection;
import javax.management.MBeanServerDelegate;
import javax.management.MBeanServerNotification;
import javax.management.Notification;
import javax.management.NotificationListener;
import javax.management.ObjectInstance;
import javax.management.ObjectName;
import javax.management.relation.MBeanServerNotificationFilter;
import javax.management.remote.JMXConnector;
import javax.management.remote.JMXConnectorFactory;
import javax.management.remote.JMXServiceURL;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class JmxDumper implements NotificationListener {
public static final String help = "help";
public static final String domain = "domain";
public static final String fields = "fields";
public static final String pattern = "pattern";
public static final String operations = "operations";
public static final String period = "period";
public static final String className = "className";
public static final String outputFile = "outputFile";
public static final String jmxUrl = "jmxUrl";
public static final String sampleCount = "sampleCount";
private static final Logger _logger = LoggerFactory.getLogger(JmxDumper.class);
String _domain;
MBeanServerConnection _mbeanServer;
String _beanClassName;
String _namePattern;
int _samplePeriod;
Map<ObjectName, ObjectName> _mbeanNames = new ConcurrentHashMap<ObjectName, ObjectName>();
Timer _timer;
String _outputFileName;
List<String> _outputFields = new ArrayList<String>();
Set<String> _operations = new HashSet<String>();
PrintWriter _outputFile;
int _samples = 0;
int _targetSamples = -1;
String _jmxUrl;
public JmxDumper(String jmxService, String domain, String beanClassName, String namePattern,
int samplePeriod, List<String> fields, List<String> operations, String outputfile,
int sampleCount) throws Exception {
_jmxUrl = jmxService;
_domain = domain;
_beanClassName = beanClassName;
_samplePeriod = samplePeriod;
_outputFields.addAll(fields);
_operations.addAll(operations);
_outputFileName = outputfile;
_namePattern = namePattern;
_targetSamples = sampleCount;
JMXServiceURL url = new JMXServiceURL("service:jmx:rmi:///jndi/rmi://" + _jmxUrl + "/jmxrmi");
JMXConnector jmxc = JMXConnectorFactory.connect(url, null);
_mbeanServer = jmxc.getMBeanServerConnection();
MBeanServerNotificationFilter filter = new MBeanServerNotificationFilter();
filter.enableAllObjectNames();
_mbeanServer.addNotificationListener(MBeanServerDelegate.DELEGATE_NAME, this, filter, null);
init();
_timer = new Timer(true);
_timer.scheduleAtFixedRate(new SampleTask(), _samplePeriod, _samplePeriod);
}
class SampleTask extends TimerTask {
@Override
public void run() {
List<ObjectName> errorMBeans = new ArrayList<ObjectName>();
_logger.info("Sampling " + _mbeanNames.size() + " beans");
for (ObjectName beanName : _mbeanNames.keySet()) {
MBeanInfo info;
try {
info = _mbeanServer.getMBeanInfo(beanName);
} catch (Exception e) {
_logger.error(e.getMessage() + " removing it");
errorMBeans.add(beanName);
continue;
}
if (!info.getClassName().equals(_beanClassName)) {
_logger.warn("Skip: className " + info.getClassName() + " expected : " + _beanClassName);
continue;
}
StringBuffer line = new StringBuffer();
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd-hh:mm:ss:SSS");
String date = dateFormat.format(new Date());
line.append(date + " ");
line.append(beanName.toString() + " ");
MBeanAttributeInfo[] infos = info.getAttributes();
Map<String, MBeanAttributeInfo> infoMap = new HashMap<String, MBeanAttributeInfo>();
for (MBeanAttributeInfo infoItem : infos) {
infoMap.put(infoItem.getName(), infoItem);
}
for (String outputField : _outputFields) {
try {
if (infoMap.containsKey(outputField)) {
Object mbeanAttributeValue = _mbeanServer.getAttribute(beanName, outputField);
line.append(mbeanAttributeValue.toString() + " ");
} else {
_logger.warn(outputField + " not found");
line.append("null ");
}
} catch (Exception e) {
_logger.error("Error:", e);
line.append("null ");
continue;
}
}
MBeanOperationInfo[] operations = info.getOperations();
Map<String, MBeanOperationInfo> opeMap = new HashMap<String, MBeanOperationInfo>();
for (MBeanOperationInfo opeItem : operations) {
opeMap.put(opeItem.getName(), opeItem);
}
for (String ope : _operations) {
if (opeMap.containsKey(ope)) {
try {
_mbeanServer.invoke(beanName, ope, new Object[0], new String[0]);
// System.out.println(ope+" invoked");
} catch (Exception e) {
_logger.error("Error:", e);
continue;
}
}
}
_outputFile.println(line.toString());
// System.out.println(line);
}
for (ObjectName deadBean : errorMBeans) {
_mbeanNames.remove(deadBean);
}
_samples++;
// System.out.println("samples:"+_samples);
if (_samples == _targetSamples) {
synchronized (JmxDumper.this) {
_logger.info(_samples + " samples done, exiting...");
JmxDumper.this.notifyAll();
}
}
}
}
void init() throws Exception {
Set<ObjectInstance> existingInstances =
_mbeanServer.queryMBeans(new ObjectName(_namePattern), null);
_logger.info("Total " + existingInstances.size() + " mbeans matched " + _namePattern);
for (ObjectInstance instance : existingInstances) {
if (instance.getClassName().equals(_beanClassName)) {
_mbeanNames.put(instance.getObjectName(), instance.getObjectName());
_logger.info("Sampling " + instance.getObjectName());
}
}
FileWriter fos = new FileWriter(_outputFileName);
System.out.println(_outputFileName);
_outputFile = new PrintWriter(fos);
}
@Override
public void handleNotification(Notification notification, Object handback) {
MBeanServerNotification mbs = (MBeanServerNotification) notification;
if (MBeanServerNotification.REGISTRATION_NOTIFICATION.equals(mbs.getType())) {
// System.out.println("Adding mbean " + mbs.getMBeanName());
_logger.info("Adding mbean " + mbs.getMBeanName());
if (mbs.getMBeanName().getDomain().equalsIgnoreCase(_domain)) {
addMBean(mbs.getMBeanName());
}
} else if (MBeanServerNotification.UNREGISTRATION_NOTIFICATION.equals(mbs.getType())) {
// System.out.println("Removing mbean " + mbs.getMBeanName());
_logger.info("Removing mbean " + mbs.getMBeanName());
if (mbs.getMBeanName().getDomain().equalsIgnoreCase(_domain)) {
removeMBean(mbs.getMBeanName());
}
}
}
private void addMBean(ObjectName beanName) {
_mbeanNames.put(beanName, beanName);
}
private void removeMBean(ObjectName beanName) {
_mbeanNames.remove(beanName);
}
public static int processCommandLineArgs(String[] cliArgs) throws Exception {
CommandLineParser cliParser = new GnuParser();
Options cliOptions = constructCommandLineOptions();
CommandLine cmd = null;
try {
cmd = cliParser.parse(cliOptions, cliArgs);
} catch (ParseException pe) {
System.err.println("CommandLineClient: failed to parse command-line options: "
+ pe.toString());
printUsage(cliOptions);
System.exit(1);
}
boolean ret = checkOptionArgsNumber(cmd.getOptions());
if (ret == false) {
printUsage(cliOptions);
System.exit(1);
}
String portStr = cmd.getOptionValue(jmxUrl);
// int portVal = Integer.parseInt(portStr);
String periodStr = cmd.getOptionValue(period);
int periodVal = Integer.parseInt(periodStr);
String domainStr = cmd.getOptionValue(domain);
String classNameStr = cmd.getOptionValue(className);
String patternStr = cmd.getOptionValue(pattern);
String fieldsStr = cmd.getOptionValue(fields);
String operationsStr = cmd.getOptionValue(operations);
String resultFile = cmd.getOptionValue(outputFile);
String sampleCountStr = cmd.getOptionValue(sampleCount, "-1");
int sampleCount = Integer.parseInt(sampleCountStr);
List<String> fields = Arrays.asList(fieldsStr.split(","));
List<String> operations = Arrays.asList(operationsStr.split(","));
JmxDumper dumper = null;
try {
dumper =
new JmxDumper(portStr, domainStr, classNameStr, patternStr, periodVal, fields,
operations, resultFile, sampleCount);
synchronized (dumper) {
dumper.wait();
}
} finally {
if (dumper != null) {
dumper.flushFile();
}
}
return 0;
}
private void flushFile() {
if (_outputFile != null) {
_outputFile.flush();
_outputFile.close();
}
}
private static boolean checkOptionArgsNumber(Option[] options) {
for (Option option : options) {
int argNb = option.getArgs();
String[] args = option.getValues();
if (argNb == 0) {
if (args != null && args.length > 0) {
System.err.println(option.getArgName() + " shall have " + argNb + " arguments (was "
+ Arrays.toString(args) + ")");
return false;
}
} else {
if (args == null || args.length != argNb) {
System.err.println(option.getArgName() + " shall have " + argNb + " arguments (was "
+ Arrays.toString(args) + ")");
return false;
}
}
}
return true;
}
@SuppressWarnings("static-access")
private static Options constructCommandLineOptions() {
Option helpOption =
OptionBuilder.withLongOpt(help).withDescription("Prints command-line options info")
.create();
Option domainOption =
OptionBuilder.withLongOpt(domain).withDescription("Domain of the JMX bean").create();
domainOption.setArgs(1);
domainOption.setRequired(true);
Option fieldsOption =
OptionBuilder.withLongOpt(fields).withDescription("Fields of the JMX bean to sample")
.create();
fieldsOption.setArgs(1);
fieldsOption.setRequired(false);
Option operationOption =
OptionBuilder.withLongOpt(operations).withDescription("Operation to invoke").create();
operationOption.setArgs(1);
operationOption.setRequired(true);
Option periodOption =
OptionBuilder.withLongOpt(period).withDescription("Sampling period in MS").create();
periodOption.setArgs(1);
periodOption.setRequired(false);
Option classOption =
OptionBuilder.withLongOpt(className).withDescription("Classname of the MBean").create();
classOption.setArgs(1);
classOption.setRequired(true);
Option patternOption =
OptionBuilder.withLongOpt(pattern).withDescription("pattern of the MBean").create();
patternOption.setArgs(1);
patternOption.setRequired(true);
Option outputFileOption =
OptionBuilder.withLongOpt(outputFile).withDescription("outputFileName").create();
outputFileOption.setArgs(1);
outputFileOption.setRequired(false);
Option jmxUrlOption =
OptionBuilder.withLongOpt(jmxUrl).withDescription("jmx port to connect to").create();
jmxUrlOption.setArgs(1);
jmxUrlOption.setRequired(true);
Option sampleCountOption =
OptionBuilder.withLongOpt(sampleCount).withDescription("# of samples to take").create();
sampleCountOption.setArgs(1);
sampleCountOption.setRequired(false);
Options options = new Options();
options.addOption(helpOption);
options.addOption(domainOption);
options.addOption(fieldsOption);
options.addOption(operationOption);
options.addOption(classOption);
options.addOption(outputFileOption);
options.addOption(jmxUrlOption);
options.addOption(patternOption);
options.addOption(periodOption);
options.addOption(sampleCountOption);
return options;
}
public static void printUsage(Options cliOptions) {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.printHelp("java " + JmxDumper.class.getName(), cliOptions);
}
public static void main(String[] args) throws Exception {
/*
* List<String> fields = Arrays.asList(new
* String("AvgLatency,MaxLatency,MinLatency,PacketsReceived,PacketsSent").split(","));
* List<String> operations = Arrays.asList(new String("resetCounters").split(","));
* JmxDumper dumper = new JmxDumper(27961, "org.apache.zooKeeperService",
* "org.apache.zookeeper.server.ConnectionBean",
* "org.apache.ZooKeeperService:name0=*,name1=Connections,name2=*,name3=*", 1000, fields,
* operations, "/tmp/1.csv");
* Thread.currentThread().join();
*/
int ret = processCommandLineArgs(args);
System.exit(ret);
}
}
| 9,978 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/commandtools/LocalZKServer.java | package org.apache.helix.tools.commandtools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.helix.zookeeper.zkclient.IDefaultNameSpace;
import org.apache.helix.zookeeper.zkclient.ZkClient;
import org.apache.helix.zookeeper.zkclient.ZkServer;
/**
* Provides ability to start zookeeper locally on a particular port
*/
public class LocalZKServer {
public void start(int port, String dataDir, String logDir) throws Exception {
IDefaultNameSpace defaultNameSpace = new IDefaultNameSpace() {
@Override
public void createDefaultNameSpace(ZkClient zkClient) {
}
};
ZkServer server = new ZkServer(dataDir, logDir, defaultNameSpace, port);
server.start();
Thread.currentThread().join();
}
public static void main(String[] args) throws Exception {
int port = 2199;
String rootDir =
System.getProperty("java.io.tmpdir") + "/zk-helix/" + System.currentTimeMillis();
String dataDir = rootDir + "/dataDir";
String logDir = rootDir + "/logDir";
if (args.length > 0) {
port = Integer.parseInt(args[0]);
}
if (args.length > 1) {
dataDir = args[1];
logDir = args[1];
}
if (args.length > 2) {
logDir = args[2];
}
System.out.println("Starting Zookeeper locally at port:" + port + " dataDir:" + dataDir
+ " logDir:" + logDir);
LocalZKServer localZKServer = new LocalZKServer();
localZKServer.start(port, dataDir, logDir);
}
}
| 9,979 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/commandtools/ToolsUtil.java | package org.apache.helix.tools.commandtools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import static org.apache.helix.tools.commandtools.IntegrationTestUtil.printUsage;
public class ToolsUtil {
public static CommandLine processCommandLineArgs(String[] cliArgs, Options cliOptions) throws Exception {
CommandLineParser cliParser = new GnuParser();
try {
return cliParser.parse(cliOptions, cliArgs);
} catch (ParseException pe) {
System.err.println("CommandLineClient: failed to parse command-line options: "
+ pe.toString());
printUsage(cliOptions);
System.exit(1);
}
return null;
}
}
| 9,980 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/commandtools/ExampleParticipant.java | package org.apache.helix.tools.commandtools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Arrays;
import java.util.List;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.OptionGroup;
import org.apache.commons.cli.Options;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.examples.BrokerResourceOnlineOfflineStateModelFactory;
import org.apache.helix.examples.LeaderStandbyStateModelFactory;
import org.apache.helix.examples.MasterSlaveStateModelFactory;
import org.apache.helix.examples.OnlineOfflineStateModelFactory;
import org.apache.helix.examples.SegmentOnlineOfflineStateModelFactory;
import org.apache.helix.manager.zk.HelixManagerShutdownHook;
import org.apache.helix.model.Message.MessageType;
import org.apache.helix.participant.StateMachineEngine;
import org.apache.helix.participant.statemachine.StateModel;
import org.apache.helix.participant.statemachine.StateModelFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ExampleParticipant {
private static final Logger LOG = LoggerFactory.getLogger(ExampleParticipant.class);
public static final String zkServer = "zkSvr";
public static final String cluster = "cluster";
public static final String instances = "instances";
public static final String help = "help";
public static final String transDelay = "transDelay";
private final String zkConnectString;
private final String clusterName;
private final String instanceName;
private HelixManager manager;
private StateModelFactory<StateModel> stateModelFactory;
private final int delay;
public ExampleParticipant(String zkConnectString, String clusterName, String instanceName,
int delay) {
this.zkConnectString = zkConnectString;
this.clusterName = clusterName;
this.instanceName = instanceName;
this.delay = delay;
}
public void start() throws Exception {
manager =
HelixManagerFactory.getZKHelixManager(clusterName, instanceName, InstanceType.PARTICIPANT,
zkConnectString);
// genericStateMachineHandler = new StateMachineEngine();
// genericStateMachineHandler.registerStateModelFactory(stateModelType,
// stateModelFactory);
StateMachineEngine stateMach = manager.getStateMachineEngine();
stateMach.registerStateModelFactory("MasterSlave",
new MasterSlaveStateModelFactory(this.instanceName, delay));
stateMach.registerStateModelFactory("OnlineOffline",
new OnlineOfflineStateModelFactory(this.instanceName, delay));
stateMach.registerStateModelFactory("LeaderStandby",
new LeaderStandbyStateModelFactory(this.instanceName, delay));
stateMach.registerStateModelFactory("BrokerResourceOnlineOfflineStateModel",
new BrokerResourceOnlineOfflineStateModelFactory());
stateMach.registerStateModelFactory("SegmentOnlineOfflineStateModel",
new SegmentOnlineOfflineStateModelFactory());
manager.connect();
manager.getMessagingService()
.registerMessageHandlerFactory(MessageType.STATE_TRANSITION.name(), stateMach);
}
public void stop() {
manager.disconnect();
}
public HelixManager getManager() {
return manager;
}
@SuppressWarnings("static-access")
private static Options constructCommandLineOptions() {
Option helpOption =
OptionBuilder.withLongOpt(help).withDescription("Prints command-line options info")
.create();
Option zkServerOption =
OptionBuilder.withLongOpt(zkServer).withDescription("Provide zookeeper address").create();
zkServerOption.setArgs(1);
zkServerOption.setRequired(true);
zkServerOption.setArgName("ZookeeperServerAddress(Required)");
Option clusterOption =
OptionBuilder.withLongOpt(cluster).withDescription("Provide cluster name").create();
clusterOption.setArgs(1);
clusterOption.setRequired(true);
clusterOption.setArgName("Cluster name (Required)");
Option instancesOption =
OptionBuilder.withLongOpt(instances).withDescription("Provide instance names, separated by ':").create();
instancesOption.setArgs(1);
instancesOption.setRequired(true);
instancesOption.setArgName("Instance names (Required)");
Option transDelayOption =
OptionBuilder.withLongOpt(transDelay).withDescription("Provide state trans delay").create();
transDelayOption.setArgs(1);
transDelayOption.setRequired(false);
transDelayOption.setArgName("Delay time in state transition, in MS");
OptionGroup optionGroup = new OptionGroup();
optionGroup.addOption(zkServerOption);
Options options = new Options();
options.addOption(helpOption);
options.addOption(clusterOption);
options.addOption(instancesOption);
options.addOption(transDelayOption);
options.addOptionGroup(optionGroup);
return options;
}
public static void printUsage(Options cliOptions) {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.setWidth(1000);
helpFormatter.printHelp("java " + ExampleParticipant.class.getName(), cliOptions);
}
public static void main(String[] args) throws Exception {
int delay = 0;
CommandLine cmd = ToolsUtil.processCommandLineArgs(args, constructCommandLineOptions());
String zkConnectString = cmd.getOptionValue(zkServer);
String clusterName = cmd.getOptionValue(cluster);
String instanceNames = cmd.getOptionValue(instances);
List<String> hosts = Arrays.asList(instanceNames.split(":"));
if (cmd.hasOption(transDelay)) {
try {
delay = Integer.parseInt(cmd.getOptionValue(transDelay));
if (delay < 0) {
throw new Exception("delay must be positive");
}
} catch (Exception e) {
e.printStackTrace();
delay = 0;
}
}
System.out.println("Starting Instances with ZK:" + zkConnectString + ", cluster: " + clusterName
+ ", instances: " + hosts);
for (String instanceName : hosts) {
System.out.println("Starting Instance:" + instanceName);
ExampleParticipant process =
new ExampleParticipant(zkConnectString, clusterName, instanceName, delay);
process.start();
System.out.println("Started Instance:" + instanceName);
Runtime.getRuntime().addShutdownHook(new HelixManagerShutdownHook(process.getManager()));
}
Thread.currentThread().join();
}
}
| 9,981 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/commandtools/TaskAdmin.java | package org.apache.helix.tools.commandtools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.File;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import com.google.common.collect.Lists;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.OptionGroup;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.task.JobConfig;
import org.apache.helix.task.JobContext;
import org.apache.helix.task.TaskConfig;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.task.TaskPartitionState;
import org.apache.helix.task.TaskState;
import org.apache.helix.task.Workflow;
import org.apache.helix.task.WorkflowConfig;
import org.apache.helix.task.WorkflowContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* CLI for operating workflows and jobs.
* This is a wrapper of TaskDriver instance to allow command line changes of workflows and jobs.
*/
public class TaskAdmin {
/** For logging */
private static final Logger LOG = LoggerFactory.getLogger(TaskAdmin.class);
/** Required option name for Helix endpoint */
private static final String ZK_ADDRESS = "zk";
/** Required option name for cluster against which to run task */
private static final String CLUSTER_NAME_OPTION = "cluster";
/** Required option name for task resource within target cluster */
private static final String RESOURCE_OPTION = "resource";
/** Field for specifying a workflow file when starting a job */
private static final String WORKFLOW_FILE_OPTION = "file";
/**
* Parses the first argument as a driver command and the rest of the
* arguments are parsed based on that command. Constructs a Helix
* message and posts it to the controller
*/
public static void main(String[] args) throws Exception {
String[] cmdArgs = Arrays.copyOfRange(args, 1, args.length);
CommandLine cl = parseOptions(cmdArgs, constructOptions(), args[0]);
String zkAddr = cl.getOptionValue(ZK_ADDRESS);
String clusterName = cl.getOptionValue(CLUSTER_NAME_OPTION);
String workflow = cl.getOptionValue(RESOURCE_OPTION);
if (zkAddr == null || clusterName == null || workflow == null) {
printUsage(constructOptions(), "[cmd]");
throw new IllegalArgumentException(
"zk, cluster, and resource must all be non-null for all commands");
}
HelixManager helixMgr =
HelixManagerFactory.getZKHelixManager(clusterName, "Admin", InstanceType.ADMINISTRATOR,
zkAddr);
helixMgr.connect();
TaskDriver driver = new TaskDriver(helixMgr);
TaskDriver.DriverCommand cmd = TaskDriver.DriverCommand.valueOf(args[0]);
switch (cmd) {
case start:
if (cl.hasOption(WORKFLOW_FILE_OPTION)) {
driver.start(Workflow.parse(new File(cl.getOptionValue(WORKFLOW_FILE_OPTION))));
} else {
throw new IllegalArgumentException("Workflow file is required to start flow.");
}
break;
case stop:
driver.stop(workflow);
break;
case resume:
driver.resume(workflow);
break;
case delete:
driver.delete(workflow);
break;
case list:
list(driver, workflow);
break;
case flush:
driver.flushQueue(workflow);
break;
case clean:
driver.cleanupQueue(workflow);
break;
default:
throw new IllegalArgumentException("Unknown command " + args[0]);
}
helixMgr.disconnect();
}
private static void list(TaskDriver taskDriver, String workflow) {
WorkflowConfig wCfg = taskDriver.getWorkflowConfig(workflow);
if (wCfg == null) {
LOG.error("Workflow " + workflow + " does not exist!");
return;
}
WorkflowContext wCtx = taskDriver.getWorkflowContext(workflow);
LOG.info("Workflow " + workflow + " consists of the following tasks: " + wCfg.getJobDag()
.getAllNodes());
String workflowState =
(wCtx != null) ? wCtx.getWorkflowState().name() : TaskState.NOT_STARTED.name();
LOG.info("Current state of workflow is " + workflowState);
LOG.info("Job states are: ");
LOG.info("-------");
for (String job : wCfg.getJobDag().getAllNodes()) {
TaskState jobState = (wCtx != null) ? wCtx.getJobState(job) : TaskState.NOT_STARTED;
LOG.info("Job " + job + " is " + jobState);
// fetch job information
JobConfig jCfg = taskDriver.getJobConfig(job);
JobContext jCtx = taskDriver.getJobContext(job);
if (jCfg == null || jCtx == null) {
LOG.info("-------");
continue;
}
// calculate taskPartitions
List<Integer> partitions = Lists.newArrayList(jCtx.getPartitionSet());
Collections.sort(partitions);
// report status
for (Integer partition : partitions) {
String taskId = jCtx.getTaskIdForPartition(partition);
taskId = (taskId != null) ? taskId : jCtx.getTargetForPartition(partition);
LOG.info("Task: " + taskId);
TaskConfig taskConfig = jCfg.getTaskConfig(taskId);
if (taskConfig != null) {
LOG.info("Configuration: " + taskConfig.getConfigMap());
}
TaskPartitionState state = jCtx.getPartitionState(partition);
state = (state != null) ? state : TaskPartitionState.INIT;
LOG.info("State: " + state);
String assignedParticipant = jCtx.getAssignedParticipant(partition);
if (assignedParticipant != null) {
LOG.info("Assigned participant: " + assignedParticipant);
}
LOG.info("-------");
}
LOG.info("-------");
}
}
/** Constructs option group containing options required by all drivable jobs */
@SuppressWarnings("static-access")
private static OptionGroup contructGenericRequiredOptionGroup() {
Option zkAddressOption =
OptionBuilder.isRequired().withLongOpt(ZK_ADDRESS)
.withDescription("ZK address managing cluster").create();
zkAddressOption.setArgs(1);
zkAddressOption.setArgName("zkAddress");
Option clusterNameOption =
OptionBuilder.isRequired().withLongOpt(CLUSTER_NAME_OPTION).withDescription("Cluster name")
.create();
clusterNameOption.setArgs(1);
clusterNameOption.setArgName("clusterName");
Option taskResourceOption =
OptionBuilder.isRequired().withLongOpt(RESOURCE_OPTION)
.withDescription("Workflow or job name").create();
taskResourceOption.setArgs(1);
taskResourceOption.setArgName("resourceName");
OptionGroup group = new OptionGroup();
group.addOption(zkAddressOption);
group.addOption(clusterNameOption);
group.addOption(taskResourceOption);
return group;
}
/** Constructs options set for all basic control messages */
private static Options constructOptions() {
Options options = new Options();
options.addOptionGroup(contructGenericRequiredOptionGroup());
options.addOptionGroup(constructStartOptionGroup());
return options;
}
/** Constructs option group containing options required by all drivable jobs */
private static OptionGroup constructStartOptionGroup() {
@SuppressWarnings("static-access")
Option workflowFileOption =
OptionBuilder.withLongOpt(WORKFLOW_FILE_OPTION)
.withDescription("Local file describing workflow").create();
workflowFileOption.setArgs(1);
workflowFileOption.setArgName("workflowFile");
OptionGroup group = new OptionGroup();
group.addOption(workflowFileOption);
return group;
}
/** Attempts to parse options for given command, printing usage under failure */
private static CommandLine parseOptions(String[] args, Options options, String cmdStr) {
CommandLineParser cliParser = new GnuParser();
CommandLine cmd = null;
try {
cmd = cliParser.parse(options, args);
} catch (ParseException pe) {
LOG.error("CommandLineClient: failed to parse command-line options: " + pe.toString());
printUsage(options, cmdStr);
System.exit(1);
}
boolean ret = checkOptionArgsNumber(cmd.getOptions());
if (!ret) {
printUsage(options, cmdStr);
System.exit(1);
}
return cmd;
}
/** Ensures options argument counts are correct */
private static boolean checkOptionArgsNumber(Option[] options) {
for (Option option : options) {
int argNb = option.getArgs();
String[] args = option.getValues();
if (argNb == 0) {
if (args != null && args.length > 0) {
System.err.println(option.getArgName() + " shall have " + argNb + " arguments (was "
+ Arrays.toString(args) + ")");
return false;
}
} else {
if (args == null || args.length != argNb) {
System.err.println(option.getArgName() + " shall have " + argNb + " arguments (was "
+ Arrays.toString(args) + ")");
return false;
}
}
}
return true;
}
/** Displays CLI usage for given option set and command name */
private static void printUsage(Options cliOptions, String cmd) {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.setWidth(1000);
helpFormatter.printHelp("java " + TaskAdmin.class.getName() + " " + cmd, cliOptions);
}
}
| 9,982 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/commandtools/ZKLogFormatter.java | package org.apache.helix.tools.commandtools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.beans.Introspector;
import java.beans.PropertyDescriptor;
import java.io.BufferedWriter;
import java.io.EOFException;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.IOException;
import java.text.DateFormat;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import java.util.zip.Adler32;
import java.util.zip.Checksum;
import javax.xml.bind.annotation.adapters.HexBinaryAdapter;
import org.apache.jute.BinaryInputArchive;
import org.apache.jute.Record;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.ZooDefs.OpCode;
import org.apache.zookeeper.data.Stat;
import org.apache.zookeeper.server.DataNode;
import org.apache.zookeeper.server.DataTree;
import org.apache.zookeeper.server.TxnLogEntry;
import org.apache.zookeeper.server.persistence.FileHeader;
import org.apache.zookeeper.server.persistence.FileSnap;
import org.apache.zookeeper.server.persistence.FileTxnLog;
import org.apache.zookeeper.server.util.SerializeUtils;
import org.apache.zookeeper.txn.TxnHeader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ZKLogFormatter {
private static final Logger LOG = LoggerFactory.getLogger(ZKLogFormatter.class);
private static DateFormat dateTimeInstance = DateFormat.getDateTimeInstance(DateFormat.SHORT,
DateFormat.LONG);
private static HexBinaryAdapter adapter = new HexBinaryAdapter();
private static String fieldDelim = ":";
private static String fieldSep = " ";
static BufferedWriter bw = null;
/**
* @param args
*/
public static void main(String[] args) throws Exception {
if (args.length != 2 && args.length != 3) {
System.err.println("USAGE: LogFormatter <log|snapshot> log_file");
System.exit(2);
}
if (args.length == 3) {
bw = new BufferedWriter(new FileWriter(new File(args[2])));
}
if (args[0].equals("log")) {
readTransactionLog(args[1]);
} else if (args[0].equals("snapshot")) {
readSnapshotLog(args[1]);
}
if (bw != null) {
bw.close();
}
}
private static void readSnapshotLog(String snapshotPath) throws Exception {
FileInputStream fis = new FileInputStream(snapshotPath);
BinaryInputArchive ia = BinaryInputArchive.getArchive(fis);
Map<Long, Integer> sessions = new HashMap<Long, Integer>();
DataTree dt = new DataTree();
FileHeader header = new FileHeader();
header.deserialize(ia, "fileheader");
if (header.getMagic() != FileSnap.SNAP_MAGIC) {
throw new IOException("mismatching magic headers " + header.getMagic() + " != "
+ FileSnap.SNAP_MAGIC);
}
SerializeUtils.deserializeSnapshot(dt, ia, sessions);
if (bw != null) {
bw.write(sessions.toString());
bw.newLine();
} else {
System.out.println(sessions);
}
traverse(dt, 1, "/");
}
/*
* Level order traversal
*/
private static void traverse(DataTree dt, int startId, String startPath) throws Exception {
LinkedList<Pair> queue = new LinkedList<Pair>();
queue.add(new Pair(startPath, startId));
while (!queue.isEmpty()) {
Pair pair = queue.removeFirst();
String path = pair._path;
DataNode head = dt.getNode(path);
Stat stat = new Stat();
byte[] data = null;
try {
data = dt.getData(path, stat, null);
} catch (NoNodeException e) {
e.printStackTrace();
}
// print the node
format(startId, pair, head, data);
Set<String> children = head.getChildren();
if (children != null) {
for (String child : children) {
String childPath;
if (path.endsWith("/")) {
childPath = path + child;
} else {
childPath = path + "/" + child;
}
queue.add(new Pair(childPath, startId));
}
}
startId = startId + 1;
}
}
static class Pair {
private final String _path;
private final int _parentId;
public Pair(String path, int parentId) {
_path = path;
_parentId = parentId;
}
}
private static void format(int id, Pair pair, DataNode head, byte[] data) throws Exception {
String dataStr = "";
if (data != null) {
dataStr = new String(data).replaceAll("[\\s]+", "");
}
StringBuffer sb = new StringBuffer();
// @formatter:off
sb.append("id").append(fieldDelim).append(id).append(fieldSep);
sb.append("parent").append(fieldDelim).append(pair._parentId).append(fieldSep);
sb.append("path").append(fieldDelim).append(pair._path).append(fieldSep);
sb.append("session").append(fieldDelim)
.append("0x" + Long.toHexString(head.stat.getEphemeralOwner())).append(fieldSep);
sb.append("czxid").append(fieldDelim).append("0x" + Long.toHexString(head.stat.getCzxid()))
.append(fieldSep);
sb.append("ctime").append(fieldDelim).append(head.stat.getCtime()).append(fieldSep);
sb.append("mtime").append(fieldDelim).append(head.stat.getMtime()).append(fieldSep);
sb.append("cmzxid").append(fieldDelim).append("0x" + Long.toHexString(head.stat.getMzxid()))
.append(fieldSep);
sb.append("pzxid").append(fieldDelim).append("0x" + Long.toHexString(head.stat.getPzxid()))
.append(fieldSep);
sb.append("aversion").append(fieldDelim).append(head.stat.getAversion()).append(fieldSep);
sb.append("cversion").append(fieldDelim).append(head.stat.getCversion()).append(fieldSep);
sb.append("version").append(fieldDelim).append(head.stat.getVersion()).append(fieldSep);
sb.append("data").append(fieldDelim).append(dataStr).append(fieldSep);
// @formatter:on
if (bw != null) {
bw.write(sb.toString());
bw.newLine();
} else {
System.out.println(sb);
}
}
private static void readTransactionLog(String logfilepath) throws FileNotFoundException,
IOException, EOFException {
FileInputStream fis = new FileInputStream(logfilepath);
BinaryInputArchive logStream = BinaryInputArchive.getArchive(fis);
FileHeader fhdr = new FileHeader();
fhdr.deserialize(logStream, "fileheader");
if (fhdr.getMagic() != FileTxnLog.TXNLOG_MAGIC) {
System.err.println("Invalid magic number for " + logfilepath);
System.exit(2);
}
if (bw != null) {
bw.write("ZooKeeper Transactional Log File with dbid " + fhdr.getDbid()
+ " txnlog format version " + fhdr.getVersion());
bw.newLine();
} else {
System.out.println("ZooKeeper Transactional Log File with dbid " + fhdr.getDbid()
+ " txnlog format version " + fhdr.getVersion());
}
int count = 0;
while (true) {
long crcValue;
byte[] bytes;
try {
crcValue = logStream.readLong("crcvalue");
bytes = logStream.readBuffer("txnEntry");
} catch (EOFException e) {
if (bw != null) {
bw.write("EOF reached after " + count + " txns.");
bw.newLine();
} else {
System.out.println("EOF reached after " + count + " txns.");
}
break;
}
if (bytes.length == 0) {
// Since we preallocate, we define EOF to be an
// empty transaction
if (bw != null) {
bw.write("EOF reached after " + count + " txns.");
bw.newLine();
} else {
System.out.println("EOF reached after " + count + " txns.");
}
return;
}
Checksum crc = new Adler32();
crc.update(bytes, 0, bytes.length);
if (crcValue != crc.getValue()) {
throw new IOException("CRC doesn't match " + crcValue + " vs " + crc.getValue());
}
TxnLogEntry txnLogEntry = SerializeUtils.deserializeTxn(bytes);
if (bw != null) {
bw.write(formatTransaction(txnLogEntry.getHeader(), txnLogEntry.getTxn()));
bw.newLine();
} else {
System.out.println(formatTransaction(txnLogEntry.getHeader(), txnLogEntry.getTxn()));
}
if (logStream.readByte("EOR") != 'B') {
LOG.error("Last transaction was partial.");
throw new EOFException("Last transaction was partial.");
}
count++;
}
}
static String op2String(int op) {
switch (op) {
case OpCode.notification:
return "notification";
case OpCode.create:
return "create";
case OpCode.delete:
return "delete";
case OpCode.exists:
return "exists";
case OpCode.getData:
return "getDate";
case OpCode.setData:
return "setData";
case OpCode.getACL:
return "getACL";
case OpCode.setACL:
return "setACL";
case OpCode.getChildren:
return "getChildren";
case OpCode.getChildren2:
return "getChildren2";
case OpCode.ping:
return "ping";
case OpCode.createSession:
return "createSession";
case OpCode.closeSession:
return "closeSession";
case OpCode.error:
return "error";
default:
return "unknown " + op;
}
}
private static String formatTransaction(TxnHeader header, Record txn) {
StringBuilder sb = new StringBuilder();
sb.append("time").append(fieldDelim).append(header.getTime());
sb.append(fieldSep).append("session").append(fieldDelim).append("0x")
.append(Long.toHexString(header.getClientId()));
sb.append(fieldSep).append("cxid").append(fieldDelim).append("0x")
.append(Long.toHexString(header.getCxid()));
sb.append(fieldSep).append("zxid").append(fieldDelim).append("0x")
.append(Long.toHexString(header.getZxid()));
sb.append(fieldSep).append("type").append(fieldDelim).append(op2String(header.getType()));
if (txn != null) {
try {
byte[] data = null;
for (PropertyDescriptor pd : Introspector.getBeanInfo(txn.getClass())
.getPropertyDescriptors()) {
if (pd.getName().equalsIgnoreCase("data")) {
data = (byte[]) pd.getReadMethod().invoke(txn);
continue;
}
if (pd.getReadMethod() != null && !"class".equals(pd.getName())) {
sb.append(fieldSep).append(pd.getDisplayName()).append(fieldDelim)
.append(pd.getReadMethod().invoke(txn).toString().replaceAll("[\\s]+", ""));
}
}
if (data != null) {
sb.append(fieldSep).append("data").append(fieldDelim)
.append(new String(data).replaceAll("[\\s]+", ""));
}
} catch (Exception e) {
LOG.error("Error while retrieving bean property values for " + txn.getClass(), e);
}
}
return sb.toString();
}
}
| 9,983 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/commandtools/YAMLClusterSetup.java | package org.apache.helix.tools.commandtools;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixException;
import org.apache.helix.manager.zk.ZKHelixAdmin;
import org.apache.helix.manager.zk.ZKHelixManager;
import org.apache.helix.model.HelixConfigScope;
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.IdealState.RebalanceMode;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.model.builder.HelixConfigScopeBuilder;
import org.apache.helix.tools.StateModelConfigGenerator;
import org.apache.helix.tools.commandtools.YAMLClusterSetup.YAMLClusterConfig.ParticipantConfig;
import org.apache.helix.tools.commandtools.YAMLClusterSetup.YAMLClusterConfig.ResourceConfig;
import org.apache.helix.tools.commandtools.YAMLClusterSetup.YAMLClusterConfig.ResourceConfig.ConstraintsConfig;
import org.apache.helix.tools.commandtools.YAMLClusterSetup.YAMLClusterConfig.ResourceConfig.StateModelConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.yaml.snakeyaml.Yaml;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Supports HelixAdmin operations specified by a YAML configuration file defining a cluster,
* resources, participants, etc.
* See the user-rebalanced-lock-manager recipe for an annotated example file.
*/
public class YAMLClusterSetup {
private static final Logger LOG = LoggerFactory.getLogger(YAMLClusterSetup.class);
private final String _zkAddress;
/**
* Start the YAML parser for a given zookeeper instance
* @param zkAddress
*/
public YAMLClusterSetup(String zkAddress) {
_zkAddress = zkAddress;
}
/**
* Set up the cluster by parsing a YAML file.
* @param input InputStream representing the file
* @return ClusterConfig Java wrapper of the configuration file
*/
public YAMLClusterConfig setupCluster(InputStream input) {
// parse the YAML
Yaml yaml = new Yaml();
YAMLClusterConfig cfg = yaml.loadAs(input, YAMLClusterConfig.class);
// create the cluster
HelixAdmin helixAdmin = new ZKHelixAdmin(_zkAddress);
if (cfg.clusterName == null) {
throw new HelixException("Cluster name is required!");
}
helixAdmin.addCluster(cfg.clusterName);
// add each participant
if (cfg.participants != null) {
for (ParticipantConfig participant : cfg.participants) {
helixAdmin.addInstance(cfg.clusterName, getInstanceCfg(participant));
}
}
// add each resource
if (cfg.resources != null) {
for (ResourceConfig resource : cfg.resources) {
if (resource.name == null) {
throw new HelixException("Resources must be named!");
}
if (resource.stateModel == null || resource.stateModel.name == null) {
throw new HelixException("Resource must specify a named state model!");
}
// if states is null, assume using a built-in or already-added state model
if (resource.stateModel.states != null) {
StateModelDefinition stateModelDef =
getStateModelDef(resource.stateModel, resource.constraints);
helixAdmin.addStateModelDef(cfg.clusterName, resource.stateModel.name, stateModelDef);
} else {
StateModelDefinition stateModelDef = null;
if (resource.stateModel.name.equals("MasterSlave")) {
stateModelDef =
new StateModelDefinition(StateModelConfigGenerator.generateConfigForMasterSlave());
} else if (resource.stateModel.name.equals("OnlineOffline")) {
stateModelDef =
new StateModelDefinition(StateModelConfigGenerator.generateConfigForOnlineOffline());
} else if (resource.stateModel.name.equals("LeaderStandby")) {
stateModelDef =
new StateModelDefinition(StateModelConfigGenerator.generateConfigForLeaderStandby());
}
if (stateModelDef != null) {
try {
helixAdmin.addStateModelDef(cfg.clusterName, resource.stateModel.name, stateModelDef);
} catch (HelixException e) {
LOG.warn("State model definition " + resource.stateModel.name
+ " could not be added.");
}
}
}
int partitions = 1;
int replicas = 1;
if (resource.partitions != null) {
if (resource.partitions.containsKey("count")) {
partitions = resource.partitions.get("count");
}
if (resource.partitions.containsKey("replicas")) {
replicas = resource.partitions.get("replicas");
}
}
if (resource.rebalancer == null || !resource.rebalancer.containsKey("mode")) {
throw new HelixException("Rebalance mode is required!");
}
helixAdmin.addResource(cfg.clusterName, resource.name, partitions,
resource.stateModel.name, resource.rebalancer.get("mode"));
// user-defined rebalancer
if (resource.rebalancer.containsKey("class")
&& resource.rebalancer.get("mode").equals(RebalanceMode.USER_DEFINED.toString())) {
IdealState idealState = helixAdmin.getResourceIdealState(cfg.clusterName, resource.name);
idealState.setRebalancerClassName(resource.rebalancer.get("class"));
helixAdmin.setResourceIdealState(cfg.clusterName, resource.name, idealState);
}
helixAdmin.rebalance(cfg.clusterName, resource.name, replicas);
}
}
// enable auto join if this option is set
if (cfg.autoJoinAllowed != null && cfg.autoJoinAllowed) {
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(cfg.clusterName)
.build();
Map<String, String> properties = new HashMap<String, String>();
properties.put(ZKHelixManager.ALLOW_PARTICIPANT_AUTO_JOIN, cfg.autoJoinAllowed.toString());
helixAdmin.setConfig(scope, properties);
}
return cfg;
}
private static InstanceConfig getInstanceCfg(ParticipantConfig participant) {
if (participant == null || participant.name == null || participant.host == null
|| participant.port == null) {
throw new HelixException("Participant must have a specified name, host, and port!");
}
InstanceConfig instanceCfg = new InstanceConfig(participant.name);
instanceCfg.setHostName(participant.host);
instanceCfg.setPort(participant.port.toString());
return instanceCfg;
}
private static StateModelDefinition getStateModelDef(StateModelConfig stateModel,
ConstraintsConfig constraints) {
// Use a builder to define the state model
StateModelDefinition.Builder builder = new StateModelDefinition.Builder(stateModel.name);
if (stateModel.states == null || stateModel.states.size() == 0) {
throw new HelixException("List of states are required in a state model!");
}
Set<String> stateSet = new HashSet<String>(stateModel.states);
if (stateModel.initialState == null) {
throw new HelixException("Initial state is required in a state model!");
} else if (!stateSet.contains(stateModel.initialState)) {
throw new HelixException("Initial state is not a valid state");
}
builder.initialState(stateModel.initialState);
// Build a helper for state priorities
Map<String, Integer> statePriorities = new HashMap<String, Integer>();
if (constraints != null && constraints.state != null && constraints.state.priorityList != null) {
int statePriority = 0;
for (String state : constraints.state.priorityList) {
if (!stateSet.contains(state)) {
throw new HelixException("State " + state
+ " in the state priority list is not in the state list!");
}
statePriorities.put(state, statePriority);
statePriority++;
}
}
// Add states, set state priorities
for (String state : stateModel.states) {
if (statePriorities.containsKey(state)) {
builder.addState(state, statePriorities.get(state));
} else {
builder.addState(state);
}
}
// Set state counts
for (Map<String, String> counts : constraints.state.counts) {
String state = counts.get("name");
if (!stateSet.contains(state)) {
throw new HelixException("State " + state + " has a count, but not in the state list!");
}
builder.dynamicUpperBound(state, counts.get("count"));
}
// Build a helper for transition priorities
Map<String, Integer> transitionPriorities = new HashMap<String, Integer>();
if (constraints != null && constraints.transition != null
&& constraints.transition.priorityList != null) {
int transitionPriority = 0;
for (String transition : constraints.transition.priorityList) {
transitionPriorities.put(transition, transitionPriority);
transitionPriority++;
}
}
// Add the transitions
if (stateModel.transitions == null || stateModel.transitions.size() == 0) {
throw new HelixException("Transitions are required!");
}
for (Map<String, String> transitions : stateModel.transitions) {
String name = transitions.get("name");
String from = transitions.get("from");
String to = transitions.get("to");
if (name == null || from == null || to == null) {
throw new HelixException("All transitions must have a name, a from state, and a to state");
}
if (transitionPriorities.containsKey(name)) {
builder.addTransition(from, to, transitionPriorities.get(name));
} else {
builder.addTransition(from, to);
}
}
return builder.build();
}
/**
* Java wrapper for the YAML input file
*/
public static class YAMLClusterConfig {
public String clusterName;
public List<ResourceConfig> resources;
public List<ParticipantConfig> participants;
public Boolean autoJoinAllowed;
public static class ResourceConfig {
public String name;
public Map<String, String> rebalancer;
public Map<String, Integer> partitions;
public StateModelConfig stateModel;
public ConstraintsConfig constraints;
public static class StateModelConfig {
public String name;
public List<String> states;
public List<Map<String, String>> transitions;
public String initialState;
}
public static class ConstraintsConfig {
public StateConstraintsConfig state;
public TransitionConstraintsConfig transition;
public static class StateConstraintsConfig {
public List<Map<String, String>> counts;
public List<String> priorityList;
}
public static class TransitionConstraintsConfig {
public List<String> priorityList;
}
}
}
public static class ParticipantConfig {
public String name;
public String host;
public Integer port;
}
}
/**
* Start a cluster defined by a YAML file
* @param args zkAddr, yamlFile
*/
public static void main(String[] args) {
if (args.length < 2) {
LOG.error("USAGE: YAMLClusterSetup zkAddr yamlFile");
return;
}
String zkAddress = args[0];
String yamlFile = args[1];
InputStream input;
try {
input = new FileInputStream(new File(yamlFile));
} catch (FileNotFoundException e) {
LOG.error("Could not open " + yamlFile);
return;
}
new YAMLClusterSetup(zkAddress).setupCluster(input);
}
}
| 9,984 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools | Create_ds/helix/helix-core/src/main/java/org/apache/helix/tools/commandtools/ZkLogCSVFormatter.java | package org.apache.helix.tools.commandtools;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.apache.helix.manager.zk.ZNRecordSerializer;
import org.apache.helix.model.IdealState.IdealStateProperty;
import org.apache.helix.util.HelixUtil;
public class ZkLogCSVFormatter {
private static final ZNRecordSerializer _deserializer = new ZNRecordSerializer();
private static String _fieldDelim = ",";
/**
* @param args
*/
public static void main(String[] args) throws Exception {
if (args.length != 2) {
System.err.println("USAGE: ZkLogCSVFormatter log_file output_dir");
System.exit(2);
}
File outputDir = new File(args[1]);
if (!outputDir.exists() || !outputDir.isDirectory()) {
System.err.println(outputDir.getAbsolutePath() + " does NOT exist or is NOT a directory");
System.exit(2);
}
format(args[0], args[1]);
}
private static void formatter(BufferedWriter bw, String... args) {
StringBuffer sb = new StringBuffer();
if (args.length == 0) {
return;
} else {
sb.append(args[0]);
for (int i = 1; i < args.length; i++) {
sb.append(_fieldDelim).append(args[i]);
}
}
try {
bw.write(sb.toString());
bw.newLine();
// System.out.println(sb.toString());
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
private static String getAttributeValue(String line, String attribute) {
String[] parts = line.split("\\s");
if (parts != null && parts.length > 0) {
for (int i = 0; i < parts.length; i++) {
if (parts[i].startsWith(attribute)) {
String val = parts[i].substring(attribute.length());
return val;
}
}
}
return null;
}
private static void format(String logfilepath, String outputDir) throws FileNotFoundException {
try {
// input file
FileInputStream fis = new FileInputStream(logfilepath);
BufferedReader br = new BufferedReader(new InputStreamReader(fis));
// output files
FileOutputStream isFos = new FileOutputStream(outputDir + "/" + "idealState.csv");
BufferedWriter isBw = new BufferedWriter(new OutputStreamWriter(isFos));
FileOutputStream cfgFos = new FileOutputStream(outputDir + "/" + "config.csv");
BufferedWriter cfgBw = new BufferedWriter(new OutputStreamWriter(cfgFos));
FileOutputStream evFos = new FileOutputStream(outputDir + "/" + "externalView.csv");
BufferedWriter evBw = new BufferedWriter(new OutputStreamWriter(evFos));
FileOutputStream smdCntFos =
new FileOutputStream(outputDir + "/" + "stateModelDefStateCount.csv");
BufferedWriter smdCntBw = new BufferedWriter(new OutputStreamWriter(smdCntFos));
FileOutputStream smdNextFos =
new FileOutputStream(outputDir + "/" + "stateModelDefStateNext.csv");
BufferedWriter smdNextBw = new BufferedWriter(new OutputStreamWriter(smdNextFos));
FileOutputStream csFos = new FileOutputStream(outputDir + "/" + "currentState.csv");
BufferedWriter csBw = new BufferedWriter(new OutputStreamWriter(csFos));
FileOutputStream msgFos = new FileOutputStream(outputDir + "/" + "messages.csv");
BufferedWriter msgBw = new BufferedWriter(new OutputStreamWriter(msgFos));
FileOutputStream hrPerfFos =
new FileOutputStream(outputDir + "/" + "healthReportDefaultPerfCounters.csv");
BufferedWriter hrPerfBw = new BufferedWriter(new OutputStreamWriter(hrPerfFos));
FileOutputStream liFos = new FileOutputStream(outputDir + "/" + "liveInstances.csv");
BufferedWriter liBw = new BufferedWriter(new OutputStreamWriter(liFos));
formatter(cfgBw, "timestamp", "instanceName", "host", "port", "enabled");
formatter(isBw, "timestamp", "resourceName", "partitionNumber", "mode", "partition",
"instanceName", "priority");
formatter(evBw, "timestamp", "resourceName", "partition", "instanceName", "state");
formatter(smdCntBw, "timestamp", "stateModel", "state", "count");
formatter(smdNextBw, "timestamp", "stateModel", "from", "to", "next");
formatter(liBw, "timestamp", "instanceName", "sessionId", "Operation");
formatter(csBw, "timestamp", "resourceName", "partition", "instanceName", "sessionId",
"state");
formatter(msgBw, "timestamp", "resourceName", "partition", "instanceName", "sessionId",
"from", "to", "messageType", "messageState");
formatter(hrPerfBw, "timestamp", "instanceName", "availableCPUs", "averageSystemLoad",
"freeJvmMemory", "freePhysicalMemory", "totalJvmMemory");
Map<String, ZNRecord> liveInstanceSessionMap = new HashMap<String, ZNRecord>();
int pos;
String inputLine;
while ((inputLine = br.readLine()) != null) {
if (inputLine.indexOf("CONFIGS") != -1) {
pos = inputLine.indexOf("CONFIGS");
pos = inputLine.indexOf("data:{", pos);
if (pos != -1) {
String timestamp = getAttributeValue(inputLine, "time:");
ZNRecord record =
(ZNRecord) _deserializer.deserialize(inputLine.substring(pos + 5).getBytes());
formatter(cfgBw, timestamp, record.getId(), record.getSimpleField("HOST"),
record.getSimpleField("PORT"), record.getSimpleField("ENABLED"));
}
} else if (inputLine.indexOf("IDEALSTATES") != -1) {
pos = inputLine.indexOf("IDEALSTATES");
pos = inputLine.indexOf("data:{", pos);
if (pos != -1) {
String timestamp = getAttributeValue(inputLine, "time:");
ZNRecord record =
(ZNRecord) _deserializer.deserialize(inputLine.substring(pos + 5).getBytes());
// System.out.println("record=" + record);
for (String partition : record.getListFields().keySet()) {
List<String> preferenceList = record.getListFields().get(partition);
for (int i = 0; i < preferenceList.size(); i++) {
String instance = preferenceList.get(i);
formatter(isBw, timestamp, record.getId(),
record.getSimpleField(IdealStateProperty.NUM_PARTITIONS.toString()),
record.getSimpleField(IdealStateProperty.REBALANCE_MODE.toString()), partition,
instance, Integer.toString(i));
}
}
}
} else if (inputLine.indexOf("LIVEINSTANCES") != -1) {
pos = inputLine.indexOf("LIVEINSTANCES");
pos = inputLine.indexOf("data:{", pos);
if (pos != -1) {
String timestamp = getAttributeValue(inputLine, "time:");
ZNRecord record =
(ZNRecord) _deserializer.deserialize(inputLine.substring(pos + 5).getBytes());
formatter(liBw, timestamp, record.getId(), record.getSimpleField("SESSION_ID"), "ADD");
String zkSessionId = getAttributeValue(inputLine, "session:");
if (zkSessionId == null) {
System.err.println("no zk session id associated with the adding of live instance: "
+ inputLine);
} else {
liveInstanceSessionMap.put(zkSessionId, record);
}
}
} else if (inputLine.indexOf("EXTERNALVIEW") != -1) {
pos = inputLine.indexOf("EXTERNALVIEW");
pos = inputLine.indexOf("data:{", pos);
if (pos != -1) {
String timestamp = getAttributeValue(inputLine, "time:");
ZNRecord record =
(ZNRecord) _deserializer.deserialize(inputLine.substring(pos + 5).getBytes());
// System.out.println("record=" + record);
for (String partition : record.getMapFields().keySet()) {
Map<String, String> stateMap = record.getMapFields().get(partition);
for (String instance : stateMap.keySet()) {
String state = stateMap.get(instance);
formatter(evBw, timestamp, record.getId(), partition, instance, state);
}
}
}
} else if (inputLine.indexOf("STATEMODELDEFS") != -1) {
pos = inputLine.indexOf("STATEMODELDEFS");
pos = inputLine.indexOf("data:{", pos);
if (pos != -1) {
String timestamp = getAttributeValue(inputLine, "time:");
ZNRecord record =
(ZNRecord) _deserializer.deserialize(inputLine.substring(pos + 5).getBytes());
for (String stateInfo : record.getMapFields().keySet()) {
if (stateInfo.endsWith(".meta")) {
Map<String, String> metaMap = record.getMapFields().get(stateInfo);
formatter(smdCntBw, timestamp, record.getId(),
stateInfo.substring(0, stateInfo.indexOf('.')), metaMap.get("count"));
} else if (stateInfo.endsWith(".next")) {
Map<String, String> nextMap = record.getMapFields().get(stateInfo);
for (String destState : nextMap.keySet()) {
formatter(smdNextBw, timestamp, record.getId(),
stateInfo.substring(0, stateInfo.indexOf('.')), destState,
nextMap.get(destState));
}
}
}
}
} else if (inputLine.indexOf("CURRENTSTATES") != -1) {
pos = inputLine.indexOf("CURRENTSTATES");
pos = inputLine.indexOf("data:{", pos);
if (pos != -1) {
String timestamp = getAttributeValue(inputLine, "time:");
ZNRecord record =
(ZNRecord) _deserializer.deserialize(inputLine.substring(pos + 5).getBytes());
// System.out.println("record=" + record);
for (String partition : record.getMapFields().keySet()) {
Map<String, String> stateMap = record.getMapFields().get(partition);
String path = getAttributeValue(inputLine, "path:");
if (path != null) {
String instance = HelixUtil.getInstanceNameFromPath(path);
formatter(csBw, timestamp, record.getId(), partition, instance,
record.getSimpleField("SESSION_ID"), stateMap.get("CURRENT_STATE"));
}
}
}
} else if (inputLine.indexOf("MESSAGES") != -1) {
pos = inputLine.indexOf("MESSAGES");
pos = inputLine.indexOf("data:{", pos);
if (pos != -1) {
String timestamp = getAttributeValue(inputLine, "time:");
ZNRecord record =
(ZNRecord) _deserializer.deserialize(inputLine.substring(pos + 5).getBytes());
formatter(msgBw, timestamp, record.getSimpleField("RESOURCE_NAME"),
record.getSimpleField("PARTITION_NAME"), record.getSimpleField("TGT_NAME"),
record.getSimpleField("TGT_SESSION_ID"), record.getSimpleField("FROM_STATE"),
record.getSimpleField("TO_STATE"), record.getSimpleField("MSG_TYPE"),
record.getSimpleField("MSG_STATE"));
}
} else if (inputLine.indexOf("closeSession") != -1) {
String zkSessionId = getAttributeValue(inputLine, "session:");
if (zkSessionId == null) {
System.err.println("no zk session id associated with the closing of zk session: "
+ inputLine);
} else {
ZNRecord record = liveInstanceSessionMap.remove(zkSessionId);
// System.err.println("zkSessionId:" + zkSessionId + ", record:" + record);
if (record != null) {
String timestamp = getAttributeValue(inputLine, "time:");
formatter(liBw, timestamp, record.getId(), record.getSimpleField("SESSION_ID"),
"DELETE");
}
}
} else if (inputLine.indexOf("HEALTHREPORT/defaultPerfCounters") != -1) {
pos = inputLine.indexOf("HEALTHREPORT/defaultPerfCounters");
pos = inputLine.indexOf("data:{", pos);
if (pos != -1) {
String timestamp = getAttributeValue(inputLine, "time:");
ZNRecord record =
(ZNRecord) _deserializer.deserialize(inputLine.substring(pos + 5).getBytes());
String path = getAttributeValue(inputLine, "path:");
if (path != null) {
String instance = HelixUtil.getInstanceNameFromPath(path);
formatter(hrPerfBw, timestamp, instance, record.getSimpleField("availableCPUs"),
record.getSimpleField("averageSystemLoad"),
record.getSimpleField("freeJvmMemory"),
record.getSimpleField("freePhysicalMemory"),
record.getSimpleField("totalJvmMemory"));
}
}
}
}
br.close();
isBw.close();
cfgBw.close();
evBw.close();
smdCntBw.close();
smdNextBw.close();
csBw.close();
msgBw.close();
liBw.close();
hrPerfBw.close();
} catch (Exception e) {
System.err.println("Error: " + e.getMessage());
}
}
}
| 9,985 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/util/HelixUtil.java | package org.apache.helix.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.UUID;
import java.util.function.Function;
import java.util.stream.Collectors;
import com.google.common.base.Joiner;
import org.apache.helix.BaseDataAccessor;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixException;
import org.apache.helix.PropertyType;
import org.apache.helix.controller.common.PartitionStateMap;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.rebalancer.AbstractRebalancer;
import org.apache.helix.controller.rebalancer.strategy.RebalanceStrategy;
import org.apache.helix.controller.rebalancer.waged.ReadOnlyWagedRebalancer;
import org.apache.helix.controller.stages.AttributeName;
import org.apache.helix.controller.stages.BestPossibleStateCalcStage;
import org.apache.helix.controller.stages.BestPossibleStateOutput;
import org.apache.helix.controller.stages.ClusterEvent;
import org.apache.helix.controller.stages.ClusterEventType;
import org.apache.helix.controller.stages.CurrentStateComputationStage;
import org.apache.helix.controller.stages.ResourceComputationStage;
import org.apache.helix.manager.zk.ZKHelixDataAccessor;
import org.apache.helix.manager.zk.ZkBaseDataAccessor;
import org.apache.helix.manager.zk.ZkBucketDataAccessor;
import org.apache.helix.model.BuiltInStateModelDefinitions;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Message;
import org.apache.helix.model.Partition;
import org.apache.helix.model.PauseSignal;
import org.apache.helix.model.ResourceAssignment;
import org.apache.helix.model.ResourceConfig;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public final class HelixUtil {
static private Logger LOG = LoggerFactory.getLogger(HelixUtil.class);
private HelixUtil() {
}
public static String getInstanceNameFromPath(String path) {
// path structure
// /<cluster_name>/instances/<instance_name>/[currentStates/messages]
if (path.contains("/" + PropertyType.INSTANCES + "/")) {
String[] split = path.split("\\/");
if (split.length > 3) {
return split[3];
}
}
return null;
}
/**
* get the parent-path of given path
* return "/" string if path = "/xxx", null if path = "/"
* @param path
* @return
*/
public static String getZkParentPath(String path) {
if (path.equals("/")) {
return null;
}
int idx = path.lastIndexOf('/');
return idx == 0 ? "/" : path.substring(0, idx);
}
/**
* get the last part of the zk-path
* @param path
* @return
*/
public static String getZkName(String path) {
return path.substring(path.lastIndexOf('/') + 1);
}
/**
* Convert a cluster name to a sharding key for routing purpose by adding a "/" to the front.
* Check if the cluster name already has a "/" at the front; if so just return it.
* @param clusterName - cluster name
* @return the sharding key corresponding the cluster name
*/
public static String clusterNameToShardingKey(String clusterName) {
return clusterName.charAt(0) == '/' ? clusterName : "/" + clusterName;
}
public static String serializeByComma(List<String> objects) {
return Joiner.on(",").join(objects);
}
public static List<String> deserializeByComma(String object) {
if (object.length() == 0) {
return Collections.EMPTY_LIST;
}
return Arrays.asList(object.split(","));
}
/**
* parse a csv-formated key-value pairs
* @param keyValuePairs : csv-formatted key-value pairs. e.g. k1=v1,k2=v2,...
* @return
*/
public static Map<String, String> parseCsvFormatedKeyValuePairs(String keyValuePairs) {
String[] pairs = keyValuePairs.split("[\\s,]");
Map<String, String> keyValueMap = new TreeMap<String, String>();
for (String pair : pairs) {
int idx = pair.indexOf('=');
if (idx == -1) {
LOG.error("Invalid key-value pair: " + pair + ". Igonore it.");
continue;
}
String key = pair.substring(0, idx);
String value = pair.substring(idx + 1);
keyValueMap.put(key, value);
}
return keyValueMap;
}
/**
* Attempts to load the class and delegates to TCCL if class is not found.
* Note: The approach is used as a last resort for environments like OSGi.
* @param className
* @return
* @throws ClassNotFoundException
*/
public static <T> Class<?> loadClass(Class<T> clazz, String className)
throws ClassNotFoundException {
try {
return clazz.getClassLoader().loadClass(className);
} catch (ClassNotFoundException ex) {
if (Thread.currentThread().getContextClassLoader() != null) {
return Thread.currentThread().getContextClassLoader().loadClass(className);
} else {
throw ex;
}
}
}
/**
* Returns the expected ideal ResourceAssignments for the given resources in the cluster
* calculated using the read-only WAGED rebalancer. The returned result is based on preference
* lists, which is the target stable assignment.
* @param metadataStoreAddress
* @param clusterConfig
* @param instanceConfigs
* @param liveInstances
* @param idealStates
* @param resourceConfigs
* @return
*/
public static Map<String, ResourceAssignment> getTargetAssignmentForWagedFullAuto(
String metadataStoreAddress, ClusterConfig clusterConfig,
List<InstanceConfig> instanceConfigs, List<String> liveInstances,
List<IdealState> idealStates, List<ResourceConfig> resourceConfigs) {
BaseDataAccessor<ZNRecord> baseDataAccessor = new ZkBaseDataAccessor<>(metadataStoreAddress);
Map<String, ResourceAssignment> result =
getAssignmentForWagedFullAutoImpl(new ZkBucketDataAccessor(metadataStoreAddress),
baseDataAccessor, clusterConfig, instanceConfigs, liveInstances, idealStates,
resourceConfigs, false);
baseDataAccessor.close();
return result;
}
/**
* Returns the expected ideal ResourceAssignments for the given resources in the cluster
* calculated using the read-only WAGED rebalancer. The returned result is based on preference
* lists, which is the target stable assignment.
* @param zkBucketDataAccessor
* @param baseDataAccessor
* @param clusterConfig
* @param instanceConfigs
* @param liveInstances
* @param idealStates
* @param resourceConfigs
* @return
*/
public static Map<String, ResourceAssignment> getTargetAssignmentForWagedFullAuto(
ZkBucketDataAccessor zkBucketDataAccessor, BaseDataAccessor<ZNRecord> baseDataAccessor,
ClusterConfig clusterConfig, List<InstanceConfig> instanceConfigs, List<String> liveInstances,
List<IdealState> idealStates, List<ResourceConfig> resourceConfigs) {
return getAssignmentForWagedFullAutoImpl(zkBucketDataAccessor, baseDataAccessor, clusterConfig,
instanceConfigs, liveInstances, idealStates, resourceConfigs, true);
}
/**
* Returns the expected ideal ResourceAssignments for the given resources in the cluster
* calculated using the read-only WAGED rebalancer. The returned result is based on partition
* state mapping. which is the immediate assignment. The immediate assignment is different from
* the final target assignment; it could be an intermediate state where it contains replicas that
* need to be dropped later, for example.
* @param metadataStoreAddress
* @param clusterConfig
* @param instanceConfigs
* @param liveInstances
* @param idealStates
* @param resourceConfigs
* @return
*/
public static Map<String, ResourceAssignment> getImmediateAssignmentForWagedFullAuto(
String metadataStoreAddress, ClusterConfig clusterConfig,
List<InstanceConfig> instanceConfigs, List<String> liveInstances,
List<IdealState> idealStates, List<ResourceConfig> resourceConfigs) {
BaseDataAccessor<ZNRecord> baseDataAccessor = new ZkBaseDataAccessor<>(metadataStoreAddress);
Map<String, ResourceAssignment> result =
getAssignmentForWagedFullAutoImpl(new ZkBucketDataAccessor(metadataStoreAddress),
baseDataAccessor, clusterConfig, instanceConfigs, liveInstances, idealStates,
resourceConfigs, false);
baseDataAccessor.close();
return result;
}
/*
* If usePrefLists is set to true, the returned assignment is based on preference lists; if
* false, the returned assignment is based on partition state mapping, which may differ from
* preference lists.
*/
private static Map<String, ResourceAssignment> getAssignmentForWagedFullAutoImpl(
ZkBucketDataAccessor zkBucketDataAccessor, BaseDataAccessor<ZNRecord> baseDataAccessor,
ClusterConfig clusterConfig, List<InstanceConfig> instanceConfigs, List<String> liveInstances,
List<IdealState> idealStates, List<ResourceConfig> resourceConfigs, boolean usePrefLists) {
// Copy the cluster config and make globalRebalance happen synchronously
// Otherwise, globalRebalance may not complete and this util might end up returning
// an empty assignment.
ClusterConfig globalSyncClusterConfig = new ClusterConfig(clusterConfig.getRecord());
globalSyncClusterConfig.setGlobalRebalanceAsyncMode(false);
// Prepare a data accessor for a dataProvider (cache) refresh
HelixDataAccessor helixDataAccessor =
new ZKHelixDataAccessor(globalSyncClusterConfig.getClusterName(), baseDataAccessor);
// Create an instance of read-only WAGED rebalancer
ReadOnlyWagedRebalancer readOnlyWagedRebalancer =
new ReadOnlyWagedRebalancer(zkBucketDataAccessor, globalSyncClusterConfig.getClusterName(),
globalSyncClusterConfig.getGlobalRebalancePreference());
// Use a dummy event to run the required stages for BestPossibleState calculation
// Attributes RESOURCES and RESOURCES_TO_REBALANCE are populated in ResourceComputationStage
ClusterEvent event =
new ClusterEvent(globalSyncClusterConfig.getClusterName(), ClusterEventType.Unknown);
try {
// First, prepare waged rebalancer with a snapshot, so that it can react on the difference
// between the current snapshot and the provided parameters which act as the new snapshot
ResourceControllerDataProvider dataProvider =
new ResourceControllerDataProvider(globalSyncClusterConfig.getClusterName());
dataProvider.requireFullRefresh();
dataProvider.refresh(helixDataAccessor);
readOnlyWagedRebalancer.updateChangeDetectorSnapshots(dataProvider);
// Refresh dataProvider completely to populate _refreshedChangeTypes
dataProvider.requireFullRefresh();
dataProvider.refresh(helixDataAccessor);
dataProvider.setClusterConfig(globalSyncClusterConfig);
dataProvider.setInstanceConfigMap(instanceConfigs.stream()
.collect(Collectors.toMap(InstanceConfig::getInstanceName, Function.identity())));
// For LiveInstances, we must preserve the existing session IDs
// So read LiveInstance objects from the cluster and do a "retainAll" on them
// liveInstanceMap is an unmodifiableMap instances, so we filter using a stream
Map<String, LiveInstance> liveInstanceMap = dataProvider.getLiveInstances();
List<LiveInstance> filteredLiveInstances = liveInstanceMap.entrySet().stream()
.filter(entry -> liveInstances.contains(entry.getKey())).map(Map.Entry::getValue)
.collect(Collectors.toList());
// Synthetically create LiveInstance objects that are passed in as the parameter
// First, determine which new LiveInstance objects need to be created
List<String> liveInstanceList = new ArrayList<>(liveInstances);
liveInstanceList.removeAll(filteredLiveInstances.stream().map(LiveInstance::getInstanceName)
.collect(Collectors.toList()));
liveInstanceList.forEach(liveInstanceName -> {
// Create a new LiveInstance object and give it a random UUID as a session ID
LiveInstance newLiveInstanceObj = new LiveInstance(liveInstanceName);
newLiveInstanceObj.getRecord()
.setSimpleField(LiveInstance.LiveInstanceProperty.SESSION_ID.name(),
UUID.randomUUID().toString().replace("-", ""));
filteredLiveInstances.add(newLiveInstanceObj);
});
dataProvider.setLiveInstances(new ArrayList<>(filteredLiveInstances));
dataProvider.setIdealStates(idealStates);
dataProvider.setResourceConfigMap(resourceConfigs.stream()
.collect(Collectors.toMap(ResourceConfig::getResourceName, Function.identity())));
event.addAttribute(AttributeName.ControllerDataProvider.name(), dataProvider);
event.addAttribute(AttributeName.STATEFUL_REBALANCER.name(), readOnlyWagedRebalancer);
// Run the required stages to obtain the BestPossibleOutput
RebalanceUtil.runStage(event, new ResourceComputationStage());
RebalanceUtil.runStage(event, new CurrentStateComputationStage());
RebalanceUtil.runStage(event, new BestPossibleStateCalcStage());
} catch (Exception e) {
LOG.error("getIdealAssignmentForWagedFullAuto(): Failed to compute ResourceAssignments!", e);
} finally {
// Close all ZK connections
readOnlyWagedRebalancer.close();
}
// Convert the resulting BestPossibleStateOutput to Map<String, ResourceAssignment>
Map<String, ResourceAssignment> result = new HashMap<>();
BestPossibleStateOutput output = event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.name());
if (output == null || (output.getPreferenceLists() == null && output.getResourceStatesMap()
.isEmpty())) {
throw new HelixException(
"getIdealAssignmentForWagedFullAuto(): Calculation failed: Failed to compute BestPossibleState!");
}
for (IdealState idealState : idealStates) {
String resourceName = idealState.getResourceName();
StateModelDefinition stateModelDefinition =
BuiltInStateModelDefinitions.valueOf(idealState.getStateModelDefRef())
.getStateModelDefinition();
PartitionStateMap partitionStateMap = output.getPartitionStateMap(resourceName);
ResourceAssignment resourceAssignment = new ResourceAssignment(resourceName);
for (String partitionName : idealState.getPartitionSet()) {
Partition partition = new Partition(partitionName);
if (usePrefLists) {
resourceAssignment.addReplicaMap(partition,
computeIdealMapping(output.getPreferenceList(resourceName, partitionName),
stateModelDefinition, new HashSet<>(liveInstances)));
} else {
resourceAssignment.addReplicaMap(partition, partitionStateMap.getPartitionMap(partition));
}
}
result.put(resourceName, resourceAssignment);
}
return result;
}
/**
* This method provides the ideal state mapping with corresponding rebalance strategy
* @param clusterConfig The cluster config
* @param instanceConfigs List of all existing instance configs including disabled/down instances
* @param liveInstances List of live and enabled instance names
* @param idealState The ideal state of current resource. If input is null, will be
* treated as newly created resource.
* @param partitions The list of partition names
* @param strategyClassName The rebalance strategy. e.g. AutoRebalanceStrategy
* @return A map of ideal state assignment as partition -> instance -> state
*/
public static Map<String, Map<String, String>> getIdealAssignmentForFullAuto(
ClusterConfig clusterConfig, List<InstanceConfig> instanceConfigs, List<String> liveInstances,
IdealState idealState, List<String> partitions, String strategyClassName)
throws ClassNotFoundException, IllegalAccessException, InstantiationException {
List<String> allNodes = new ArrayList<>();
Map<String, InstanceConfig> instanceConfigMap = new HashMap<>();
for (InstanceConfig instanceConfig : instanceConfigs) {
allNodes.add(instanceConfig.getInstanceName());
instanceConfigMap.put(instanceConfig.getInstanceName(), instanceConfig);
}
ResourceControllerDataProvider cache = new ResourceControllerDataProvider();
cache.setClusterConfig(clusterConfig);
cache.setInstanceConfigMap(instanceConfigMap);
StateModelDefinition stateModelDefinition =
BuiltInStateModelDefinitions.valueOf(idealState.getStateModelDefRef())
.getStateModelDefinition();
RebalanceStrategy strategy =
RebalanceStrategy.class.cast(loadClass(HelixUtil.class, strategyClassName).newInstance());
strategy.init(idealState.getResourceName(), partitions, stateModelDefinition
.getStateCountMap(liveInstances.size(), Integer.parseInt(idealState.getReplicas())),
idealState.getMaxPartitionsPerInstance());
// Remove all disabled instances so that Helix will not consider them live.
List<String> disabledInstance = instanceConfigs.stream()
.filter(instanceConfig -> !InstanceValidationUtil.isInstanceEnabled(instanceConfig, clusterConfig))
.map(InstanceConfig::getInstanceName)
.collect(Collectors.toList());
liveInstances.removeAll(disabledInstance);
Map<String, List<String>> preferenceLists = strategy
.computePartitionAssignment(allNodes, liveInstances,
new HashMap<String, Map<String, String>>(), cache).getListFields();
Map<String, Map<String, String>> idealStateMapping = new HashMap<>();
Set<String> liveInstanceSet = new HashSet<>(liveInstances);
for (String partitionName : preferenceLists.keySet()) {
idealStateMapping.put(partitionName,
computeIdealMapping(preferenceLists.get(partitionName), stateModelDefinition,
liveInstanceSet));
}
return idealStateMapping;
}
/**
* compute the ideal mapping for resource in Full-Auto and Semi-Auto based on its preference list
*/
public static Map<String, String> computeIdealMapping(List<String> preferenceList,
StateModelDefinition stateModelDef, Set<String> liveAndEnabled) {
return computeIdealMapping(preferenceList, stateModelDef, liveAndEnabled,
Collections.emptySet());
}
/**
* compute the ideal mapping for resource in Full-Auto and Semi-Auto based on its preference list
*/
public static Map<String, String> computeIdealMapping(List<String> preferenceList,
StateModelDefinition stateModelDef, Set<String> liveInstanceSet,
Set<String> disabledInstancesForPartition) {
Map<String, String> idealStateMap = new HashMap<String, String>();
if (preferenceList == null) {
return idealStateMap;
}
for (String instance : preferenceList) {
if (disabledInstancesForPartition.contains(instance) && liveInstanceSet.contains(instance)) {
idealStateMap.put(instance, stateModelDef.getInitialState());
}
}
Set<String> liveAndEnabledInstances = new HashSet<>(liveInstanceSet);
liveAndEnabledInstances.removeAll(disabledInstancesForPartition);
List<String> statesPriorityList = stateModelDef.getStatesPriorityList();
Set<String> assigned = new HashSet<String>();
for (String state : statesPriorityList) {
int stateCount = AbstractRebalancer
.getStateCount(state, stateModelDef, liveAndEnabledInstances.size(), preferenceList.size());
for (String instance : preferenceList) {
if (stateCount <= 0) {
break;
}
if (!assigned.contains(instance) && liveAndEnabledInstances.contains(instance)) {
idealStateMap.put(instance, state);
assigned.add(instance);
stateCount--;
}
}
}
return idealStateMap;
}
/**
* Remove the given message from ZK using the given accessor. This function will
* not throw exception
* @param accessor HelixDataAccessor
* @param msg message to remove
* @param instanceName name of the instance on which the message sits
* @return true if success else false
*/
public static boolean removeMessageFromZK(HelixDataAccessor accessor, Message msg,
String instanceName) {
try {
return accessor.removeProperty(msg.getKey(accessor.keyBuilder(), instanceName));
} catch (Exception e) {
LOG.error("Caught exception while removing message {}.", msg, e);
}
return false;
}
/**
* Get the value of system property
* @param propertyKey
* @param propertyDefaultValue
* @return
*/
public static int getSystemPropertyAsInt(String propertyKey, int propertyDefaultValue) {
String valueString = System.getProperty(propertyKey, "" + propertyDefaultValue);
try {
int value = Integer.parseInt(valueString);
if (value > 0) {
return value;
}
} catch (NumberFormatException e) {
LOG.warn("Exception while parsing property: " + propertyKey + ", string: " + valueString
+ ", using default value: " + propertyDefaultValue);
}
return propertyDefaultValue;
}
/**
* Get the value of system property
* @param propertyKey
* @param propertyDefaultValue
* @return
*/
public static long getSystemPropertyAsLong(String propertyKey, long propertyDefaultValue) {
String valueString = System.getProperty(propertyKey, "" + propertyDefaultValue);
try {
long value = Long.parseLong(valueString);
if (value > 0) {
return value;
}
} catch (NumberFormatException e) {
LOG.warn("Exception while parsing property: " + propertyKey + ", string: " + valueString
+ ", using default value: " + propertyDefaultValue);
}
return propertyDefaultValue;
}
/**
* Compose the config for an instance
* @param instanceName the unique name of the instance
* @return InstanceConfig
*/
public static InstanceConfig composeInstanceConfig(String instanceName) {
return new InstanceConfig.Builder().build(instanceName);
}
/**
* Checks whether or not the cluster is in management mode. It checks:
* - pause signal
* - live instances: whether any live instance is not in normal status, eg. frozen.
* - messages: whether live instance has a participant status change message
*
* @param pauseSignal pause signal
* @param liveInstanceMap map of live instances
* @param enabledLiveInstances set of enabled live instance names. They should be all included
* in the liveInstanceMap.
* @param instancesMessages a map of all instances' messages.
* @return true if cluster is in management mode; otherwise, false
*/
public static boolean inManagementMode(PauseSignal pauseSignal,
Map<String, LiveInstance> liveInstanceMap, Set<String> enabledLiveInstances,
Map<String, Collection<Message>> instancesMessages) {
// Check pause signal and abnormal live instances (eg. in freeze mode)
// TODO: should check maintenance signal when moving maintenance to management pipeline
return pauseSignal != null || enabledLiveInstances.stream().anyMatch(
instance -> isInstanceInManagementMode(instance, liveInstanceMap, instancesMessages));
}
private static boolean isInstanceInManagementMode(String instance,
Map<String, LiveInstance> liveInstanceMap,
Map<String, Collection<Message>> instancesMessages) {
// Check live instance status and participant status change message
return LiveInstance.LiveInstanceStatus.FROZEN.equals(liveInstanceMap.get(instance).getStatus())
|| (instancesMessages.getOrDefault(instance, Collections.emptyList()).stream()
.anyMatch(Message::isParticipantStatusChangeType));
}
/**
* Sort zoneMapping for each virtual group and flatten to a list.
* @param zoneMapping virtual group mapping.
* @return a list of instances sorted and flattened.
*/
public static List<String> sortAndFlattenZoneMapping(Map<String, Set<String>> zoneMapping) {
return zoneMapping
.entrySet()
.stream()
.sorted(Map.Entry.comparingByKey())
.flatMap(entry -> entry.getValue().stream().sorted())
.collect(Collectors.toList());
}
}
| 9,986 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/util/ZNRecordUtil.java | package org.apache.helix.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.lang.reflect.Constructor;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
//TODO find a proper place for these methods
public class ZNRecordUtil {
private static final Logger logger = LoggerFactory.getLogger(ZNRecordUtil.class.getName());
private ZNRecordUtil() {
}
public static ZNRecord find(String id, List<ZNRecord> list) {
for (ZNRecord record : list) {
if (record.getId() != null && record.getId().equals(id)) {
return record;
}
}
return null;
}
public static Map<String, ZNRecord> convertListToMap(List<ZNRecord> recordList) {
Map<String, ZNRecord> recordMap = new HashMap<String, ZNRecord>();
for (ZNRecord record : recordList) {
if (record.getId() != null) {
recordMap.put(record.getId(), record);
}
}
return recordMap;
}
public static <T extends Object> List<T> convertListToTypedList(List<ZNRecord> recordList,
Class<T> clazz) {
List<T> list = new ArrayList<T>();
for (ZNRecord record : recordList) {
if (record.getId() == null) {
logger.error("Invalid record: Id missing in " + record);
continue;
}
try {
Constructor<T> constructor = clazz.getConstructor(new Class[] {
ZNRecord.class
});
T instance = constructor.newInstance(record);
list.add(instance);
} catch (Exception e) {
logger.error("Error creating an Object of type:" + clazz.getCanonicalName(), e);
}
}
return list;
}
public static <T extends Object> Map<String, T> convertListToTypedMap(List<ZNRecord> recordList,
Class<T> clazz) {
Map<String, T> map = new HashMap<String, T>();
for (ZNRecord record : recordList) {
if (record.getId() == null) {
logger.error("Invalid record: Id missing in " + record);
continue;
}
try {
Constructor<T> constructor = clazz.getConstructor(new Class[] {
ZNRecord.class
});
T instance = constructor.newInstance(record);
map.put(record.getId(), instance);
} catch (Exception e) {
logger.error("Error creating an Object of type:" + clazz.getCanonicalName(), e);
}
}
return map;
}
public static <T extends Object> List<T> convertMapToList(Map<String, T> map) {
List<T> list = new ArrayList<T>();
for (T t : map.values()) {
list.add(t);
}
return list;
}
}
| 9,987 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/util/StringTemplate.java | package org.apache.helix.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class StringTemplate {
private static Logger LOG = LoggerFactory.getLogger(StringTemplate.class);
Map<Enum, Map<Integer, String>> templateMap = new HashMap<Enum, Map<Integer, String>>();
static Pattern pattern = Pattern.compile("(\\{.+?\\})");
public void addEntry(Enum type, int numKeys, String template) {
if (!templateMap.containsKey(type)) {
templateMap.put(type, new HashMap<Integer, String>());
}
LOG.trace("Add template for type: " + type.name() + ", arguments: " + numKeys + ", template: "
+ template);
templateMap.get(type).put(numKeys, template);
}
public String instantiate(Enum type, String... keys) {
if (keys == null) {
keys = new String[] {};
}
String template = null;
if (templateMap.containsKey(type)) {
template = templateMap.get(type).get(keys.length);
}
String result = null;
if (template != null) {
result = template;
Matcher matcher = pattern.matcher(template);
int count = 0;
while (matcher.find()) {
String var = matcher.group();
result = result.replace(var, keys[count]);
count++;
}
}
if (result == null || result.indexOf('{') > -1 || result.indexOf('}') > -1) {
String errMsg =
"Unable to instantiate template: " + template + " using keys: " + Arrays.toString(keys);
LOG.error(errMsg);
throw new IllegalArgumentException(errMsg);
}
return result;
}
}
| 9,988 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/util/RebalanceUtil.java | package org.apache.helix.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.apache.helix.HelixException;
import org.apache.helix.controller.GenericHelixController;
import org.apache.helix.controller.pipeline.Stage;
import org.apache.helix.controller.pipeline.StageContext;
import org.apache.helix.controller.stages.BestPossibleStateOutput;
import org.apache.helix.controller.stages.ClusterEvent;
import org.apache.helix.controller.stages.CurrentStateOutput;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.Partition;
import org.apache.helix.model.StateModelDefinition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class RebalanceUtil {
private static final Logger LOG = LoggerFactory.getLogger(RebalanceUtil.class.getName());
public static Map<String, Object> buildInternalIdealState(IdealState state) {
// Try parse the partition number from name DB_n. If not, sort the partitions and
// assign id
Map<String, Integer> partitionIndex = new HashMap<String, Integer>();
Map<String, String> reversePartitionIndex = new HashMap<String, String>();
boolean indexInPartitionName = true;
for (String partitionId : state.getPartitionSet()) {
int lastPos = partitionId.lastIndexOf("_");
if (lastPos < 0) {
indexInPartitionName = false;
break;
}
try {
String idStr = partitionId.substring(lastPos + 1);
int partition = Integer.parseInt(idStr);
partitionIndex.put(partitionId, partition);
reversePartitionIndex.put(state.getResourceName() + "_" + partition, partitionId);
} catch (Exception e) {
indexInPartitionName = false;
partitionIndex.clear();
reversePartitionIndex.clear();
break;
}
}
if (indexInPartitionName == false) {
List<String> partitions = new ArrayList<String>();
partitions.addAll(state.getPartitionSet());
Collections.sort(partitions);
for (int i = 0; i < partitions.size(); i++) {
partitionIndex.put(partitions.get(i), i);
reversePartitionIndex.put(state.getResourceName() + "_" + i, partitions.get(i));
}
}
Map<String, List<Integer>> nodeMasterAssignmentMap = new TreeMap<String, List<Integer>>();
Map<String, Map<String, List<Integer>>> combinedNodeSlaveAssignmentMap =
new TreeMap<String, Map<String, List<Integer>>>();
for (String partition : state.getPartitionSet()) {
List<String> instances = state.getRecord().getListField(partition);
String master = instances.get(0);
if (!nodeMasterAssignmentMap.containsKey(master)) {
nodeMasterAssignmentMap.put(master, new ArrayList<Integer>());
}
if (!combinedNodeSlaveAssignmentMap.containsKey(master)) {
combinedNodeSlaveAssignmentMap.put(master, new TreeMap<String, List<Integer>>());
}
nodeMasterAssignmentMap.get(master).add(partitionIndex.get(partition));
for (int i = 1; i < instances.size(); i++) {
String instance = instances.get(i);
Map<String, List<Integer>> slaveMap = combinedNodeSlaveAssignmentMap.get(master);
if (!slaveMap.containsKey(instance)) {
slaveMap.put(instance, new ArrayList<Integer>());
}
slaveMap.get(instance).add(partitionIndex.get(partition));
}
}
Map<String, Object> result = new TreeMap<String, Object>();
result.put("MasterAssignmentMap", nodeMasterAssignmentMap);
result.put("SlaveAssignmentMap", combinedNodeSlaveAssignmentMap);
result.put("replicas", Integer.parseInt(state.getReplicas()));
result.put("partitions", new Integer(state.getRecord().getListFields().size()));
result.put("reversePartitionIndex", reversePartitionIndex);
return result;
}
public static String[] parseStates(String clusterName, StateModelDefinition stateModDef) {
String[] result = new String[2];
String masterStateValue = null, slaveStateValue = null;
// StateModelDefinition def = new StateModelDefinition(stateModDef);
List<String> statePriorityList = stateModDef.getStatesPriorityList();
for (String state : statePriorityList) {
String count = stateModDef.getNumInstancesPerState(state);
if (count.equals("1")) {
if (masterStateValue != null) {
throw new HelixException("Invalid or unsupported state model definition");
}
masterStateValue = state;
} else if (count.equalsIgnoreCase("R")) {
if (slaveStateValue != null) {
throw new HelixException("Invalid or unsupported state model definition");
}
slaveStateValue = state;
} else if (count.equalsIgnoreCase("N")) {
if (!(masterStateValue == null && slaveStateValue == null)) {
throw new HelixException("Invalid or unsupported state model definition");
}
masterStateValue = slaveStateValue = state;
}
}
if (masterStateValue == null && slaveStateValue == null) {
throw new HelixException("Invalid or unsupported state model definition");
}
if (masterStateValue == null) {
masterStateValue = slaveStateValue;
}
result[0] = masterStateValue;
result[1] = slaveStateValue;
return result;
}
/**
* Enables/disables controller to run management mode pipeline.
*
* @param clusterName target cluster name
* @param enabled enable/disable controller to management mode pipeline
*/
public static void enableManagementMode(String clusterName, boolean enabled) {
GenericHelixController leaderController =
GenericHelixController.getLeaderController(clusterName);
if (leaderController != null) {
LOG.info("Switching management mode pipeline for cluster={}, enabled={}", clusterName,
enabled);
leaderController.setInManagementMode(enabled);
} else {
throw new HelixException(String.format("Failed to switch management mode pipeline, "
+ "enabled=%s. Controller for cluster %s does not exist", enabled, clusterName));
}
// Triggers an event to immediately run the pipeline
scheduleOnDemandPipeline(clusterName, 0L);
}
public static void scheduleOnDemandPipeline(String clusterName, long delay) {
scheduleOnDemandPipeline(clusterName, delay, true);
}
public static void scheduleOnDemandPipeline(String clusterName, long delay,
boolean shouldRefreshCache) {
if (clusterName == null || delay < 0L) {
LOG.warn("Invalid input: [clusterName: {}, delay: {}], skip the pipeline issuing.", clusterName, delay);
return;
}
GenericHelixController leaderController =
GenericHelixController.getLeaderController(clusterName);
if (leaderController != null) {
leaderController.scheduleOnDemandRebalance(delay, shouldRefreshCache);
} else {
LOG.error("Failed to issue a pipeline. Controller for cluster {} does not exist.",
clusterName);
}
}
/**
* Build best possible state out by copying the state map from current state output.
* It'll be used for generating pending ST cancellation messages.
*
* @param resourceNames collection of resource names
* @param currentStateOutput Current state output {@link CurrentStateOutput}
* @return {@link BestPossibleStateOutput}
*/
public static BestPossibleStateOutput buildBestPossibleState(Collection<String> resourceNames,
CurrentStateOutput currentStateOutput) {
BestPossibleStateOutput output = new BestPossibleStateOutput();
for (String resource : resourceNames) {
Map<Partition, Map<String, String>> currentStateMap =
currentStateOutput.getCurrentStateMap(resource);
if (currentStateMap != null) {
output.setState(resource, currentStateMap);
}
}
return output;
}
/**
* runStage allows the run of individual stages. It can be used to mock a part of the Controller
* pipeline run.
*
* An example usage is as follows:
* runStage(event, new ResourceComputationStage());
* runStage(event, new CurrentStateComputationStage());
* runStage(event, new BestPossibleStateCalcStage());
* By running these stages, we are able to obtain BestPossibleStateOutput in the event object.
* @param event
* @param stage
* @throws Exception
*/
public static void runStage(ClusterEvent event, Stage stage) throws Exception {
StageContext context = new StageContext();
stage.init(context);
stage.preProcess();
stage.process(event);
stage.postProcess();
}
}
| 9,989 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/util/WeightAwareRebalanceUtil.java | package org.apache.helix.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.helix.HelixException;
import org.apache.helix.api.config.RebalanceConfig;
import org.apache.helix.api.rebalancer.constraint.AbstractRebalanceHardConstraint;
import org.apache.helix.api.rebalancer.constraint.AbstractRebalanceSoftConstraint;
import org.apache.helix.controller.common.PartitionStateMap;
import org.apache.helix.controller.common.ResourcesStateMap;
import org.apache.helix.controller.dataproviders.ResourceControllerDataProvider;
import org.apache.helix.controller.rebalancer.strategy.ConstraintRebalanceStrategy;
import org.apache.helix.model.BuiltInStateModelDefinitions;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Partition;
import org.apache.helix.model.ResourceConfig;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
/**
* A rebalance tool that generate an resource partition assignment based on the input.
* Note the assignment won't be automatically applied to the cluster. Users are supposed to
* apply the change.
*
* @see org.apache.helix.examples.WeightAwareRebalanceUtilExample WeightAwareRebalanceUtilExample
*/
public class WeightAwareRebalanceUtil {
private final ClusterConfig _clusterConfig;
private final Map<String, InstanceConfig> _instanceConfigMap = new HashMap<>();
// For the possible customized state models.
private final Map<String, StateModelDefinition> _stateModelDefs = new HashMap<>();
private final ResourceControllerDataProvider _dataCache;
private enum RebalanceOption {
INCREMENTAL,
FULL
}
/**
* Init the rebalance util with cluster and instances information.
*
* Note that it is not required to put any configuration items in these configs.
* However, in order to do topology aware rebalance, users need to set topology information such as Domain, fault zone, and TopologyAwareEnabled.
*
* The other config items will not be read or processed by the util.
*
* @param clusterConfig
* @param instanceConfigs InstanceConfigs for all assignment candidates.
* Note that all instances will be treated as enabled and alive during the calculation.
*/
public WeightAwareRebalanceUtil(ClusterConfig clusterConfig,
List<InstanceConfig> instanceConfigs) {
for (InstanceConfig instanceConfig : instanceConfigs) {
// ensure the instance is enabled
instanceConfig.setInstanceEnabled(true);
_instanceConfigMap.put(instanceConfig.getInstanceName(), instanceConfig);
}
// ensure no instance is disabled
clusterConfig.setDisabledInstances(Collections.<String, String>emptyMap());
_clusterConfig = clusterConfig;
_dataCache = new ResourceControllerDataProvider();
_dataCache.setInstanceConfigMap(_instanceConfigMap);
_dataCache.setClusterConfig(_clusterConfig);
List<LiveInstance> liveInstanceList = new ArrayList<>();
for (String instance : _instanceConfigMap.keySet()) {
LiveInstance liveInstance = new LiveInstance(instance);
liveInstanceList.add(liveInstance);
}
_dataCache.setLiveInstances(liveInstanceList);
}
/**
* Generate partition assignments for all new resources or partitions that have not been assigned yet.
* Note that a partition assignment that does not fit the state model will still be recalculated.
* For example, if the replica requirement is 3, but one partition has only 2 replicas, this partition will still
* be rebalanced even existing assignment exists.
*
* @param resourceConfigs Config of all the resources that need to be rebalanced.
* The tool throws Exception if any resource has no IS or broken/uninitialized IS.
* The tool throws Exception if any resource is in full-auto mode.
* Following fields are required by the tool:
* 1. ResourceName
* 2. StateModelDefRef
* 3. PreferenceLists, which includes all partitions in the resource
* 4. NumReplica
* @param existingAssignment The existing partition assignment of the resources specified in param resourceConfigs.
* Unrelated resource assignment will be discarded.
* @param hardConstraints Hard constraints for rebalancing.
* @param softConstraints Soft constraints for rebalancing.
*
* @return List of the IS that contains preference list and suggested state map
**/
public ResourcesStateMap buildIncrementalRebalanceAssignment(List<ResourceConfig> resourceConfigs,
ResourcesStateMap existingAssignment,
List<? extends AbstractRebalanceHardConstraint> hardConstraints,
List<? extends AbstractRebalanceSoftConstraint> softConstraints) {
return calculateAssignment(resourceConfigs, existingAssignment, RebalanceOption.INCREMENTAL,
hardConstraints, softConstraints);
}
/**
* Re-calculate the partition assignments for all the resources specified in resourceConfigs list.
*
* @param resourceConfigs Config of all the resources that need to be rebalanced.
* The tool throws Exception if any resource has no IS or broken/uninitialized IS.
* The tool throws Exception if any resource is in full-auto mode.
* Following fields are required by the tool:
* 1. ResourceName
* 2. StateModelDefRef
* 3. PreferenceLists, which includes all partitions in the resource
* 4. NumReplica
* @param preferredAssignment A set of preferred partition assignments for the resources specified in param resourceConfigs.
* The preference is not guaranteed.
* @param hardConstraints Hard constraints for rebalancing.
* @param softConstraints Soft constraints for rebalancing.
*
* @return List of the IS that contains preference list and suggested state map
**/
public ResourcesStateMap buildFullRebalanceAssignment(List<ResourceConfig> resourceConfigs,
ResourcesStateMap preferredAssignment,
List<? extends AbstractRebalanceHardConstraint> hardConstraints,
List<? extends AbstractRebalanceSoftConstraint> softConstraints) {
return calculateAssignment(resourceConfigs, preferredAssignment, RebalanceOption.FULL,
hardConstraints, softConstraints);
}
/**
* The method to generate partition assignment mappings.
*
* @param resourceConfigs Config of all the resources that need to be rebalanced.
* The tool throws Exception if any resource has no IS or broken/uninitialized IS.
* The tool throws Exception if any resource is in full-auto mode.
* Following fields are required by the tool:
* 1. ResourceName
* 2. StateModelDefRef
* 3. PreferenceLists, which includes all partitions in the resource
* 4. NumReplica
* @param existingAssignment The existing partition assignment of the resources specified in param resourceConfigs.
* @param option INCREMENTAL or FULL
* INCREMENTAL: Keep existing assignment. Only generate new partition assignment.
* FULL: Completely re-assign resources' partitions.
* @param hardConstraints Hard constraints for rebalancing.
* @param softConstraints Soft constraints for rebalancing.
*
* @return List of the IS that contains preference list and suggested state map
**/
private ResourcesStateMap calculateAssignment(List<ResourceConfig> resourceConfigs,
ResourcesStateMap existingAssignment, RebalanceOption option,
List<? extends AbstractRebalanceHardConstraint> hardConstraints,
List<? extends AbstractRebalanceSoftConstraint> softConstraints) {
// check the inputs
for (ResourceConfig resourceConfig : resourceConfigs) {
RebalanceConfig.RebalanceMode rebalanceMode =
resourceConfig.getRebalanceConfig().getRebalanceMode();
if (rebalanceMode.equals(RebalanceConfig.RebalanceMode.FULL_AUTO)) {
throw new HelixException(
"Resources that in FULL_AUTO mode are not supported: " + resourceConfig
.getResourceName());
}
}
ConstraintRebalanceStrategy constraintBasedStrategy =
new ConstraintRebalanceStrategy(hardConstraints, softConstraints);
ResourcesStateMap resultAssignment = new ResourcesStateMap();
for (ResourceConfig resourceConfig : resourceConfigs) {
Map<String, Map<String, String>> preferredMapping = new HashMap<>();
if (existingAssignment != null) {
PartitionStateMap partitionStateMap = existingAssignment.getPartitionStateMap(resourceConfig.getResourceName());
// keep existing assignment if rebalance option is INCREMENTAL
if (option.equals(RebalanceOption.INCREMENTAL) && partitionStateMap != null) {
for (Partition partition : partitionStateMap.getStateMap().keySet()) {
preferredMapping.put(partition.getPartitionName(), partitionStateMap.getPartitionMap(partition));
}
}
}
StateModelDefinition stateModelDefinition =
getStateModelDef(resourceConfig.getStateModelDefRef());
constraintBasedStrategy.init(resourceConfig.getResourceName(),
new ArrayList<>(resourceConfig.getPreferenceLists().keySet()), stateModelDefinition
.getStateCountMap(_instanceConfigMap.size(),
Integer.parseInt(resourceConfig.getNumReplica())), Integer.MAX_VALUE);
List<String> instanceNames = new ArrayList<>(_instanceConfigMap.keySet());
ZNRecord znRecord = constraintBasedStrategy
.computePartitionAssignment(instanceNames, instanceNames, preferredMapping, _dataCache);
Map<String, Map<String, String>> stateMap = znRecord.getMapFields();
// Construct resource states result
PartitionStateMap newStateMap = new PartitionStateMap(resourceConfig.getResourceName());
for (String partition : stateMap.keySet()) {
newStateMap.setState(new Partition(partition), stateMap.get(partition));
}
resultAssignment.setState(resourceConfig.getResourceName(), newStateMap);
}
return resultAssignment;
}
private StateModelDefinition getStateModelDef(String stateModelDefRef) {
if (_stateModelDefs.containsKey(stateModelDefRef)) {
return _stateModelDefs.get(stateModelDefRef);
}
return BuiltInStateModelDefinitions.valueOf(stateModelDefRef).getStateModelDefinition();
}
/**
* Since the tool is designed not to rely on ZK, if the application has customized state model,
* it needs to register to the tool before calling for an assignment.
*
* @param stateModelDefRef
* @param stateModelDefinition
*/
public void registerCustomizedStateModelDef(String stateModelDefRef,
StateModelDefinition stateModelDefinition) {
_stateModelDefs.put(stateModelDefRef, stateModelDefinition);
}
}
| 9,990 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/util/GZipCompressionUtil.java | package org.apache.helix.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Deprecated - please use GZipCompressionUtil in zookeeper-api.
*/
@Deprecated
public class GZipCompressionUtil extends org.apache.helix.zookeeper.util.GZipCompressionUtil {
}
| 9,991 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/util/PathUtils.java | package org.apache.helix.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Path related utilities
*/
public class PathUtils {
/**
* validate the provided znode path string
* @param path znode path string
* @param isSequential if the path is being created
* with a sequential flag
* @throws IllegalArgumentException if the path is invalid
*/
public static void validatePath(String path, boolean isSequential)
throws IllegalArgumentException {
validatePath(isSequential ? path + "1" : path);
}
/**
* Validate the provided znode path string
* @param path znode path string
* @throws IllegalArgumentException if the path is invalid
*/
public static void validatePath(String path) throws IllegalArgumentException {
if (path == null) {
throw new IllegalArgumentException("Path cannot be null");
}
if (path.length() == 0) {
throw new IllegalArgumentException("Path length must be > 0");
}
if (path.charAt(0) != '/') {
throw new IllegalArgumentException("Path must start with / character");
}
if (path.length() == 1) { // done checking - it's the root
return;
}
if (path.charAt(path.length() - 1) == '/') {
throw new IllegalArgumentException("Path must not end with / character");
}
String reason = null;
char lastc = '/';
char chars[] = path.toCharArray();
char c;
for (int i = 1; i < chars.length; lastc = chars[i], i++) {
c = chars[i];
if (c == 0) {
reason = "null character not allowed @" + i;
break;
} else if (c == '/' && lastc == '/') {
reason = "empty node name specified @" + i;
break;
} else if (c == '.' && lastc == '.') {
if (chars[i - 2] == '/' && ((i + 1 == chars.length) || chars[i + 1] == '/')) {
reason = "relative paths not allowed @" + i;
break;
}
} else if (c == '.') {
if (chars[i - 1] == '/' && ((i + 1 == chars.length) || chars[i + 1] == '/')) {
reason = "relative paths not allowed @" + i;
break;
}
} else if (c > '\u0000' && c <= '\u001f' || c >= '\u007f' && c <= '\u009F' || c >= '\ud800'
&& c <= '\uf8ff' || c >= '\ufff0' && c <= '\uffff') {
reason = "invalid charater @" + i;
break;
}
}
if (reason != null) {
throw new IllegalArgumentException("Invalid path string \"" + path + "\" caused by " + reason);
}
}
}
| 9,992 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/util/ConfigStringUtil.java | package org.apache.helix.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Collectors;
public final class ConfigStringUtil {
private static final String CONCATENATE_CONFIG_SPLITTER = ",";
public static final String CONCATENATE_CONFIG_JOINER = "=";
private ConfigStringUtil() {
throw new java.lang.UnsupportedOperationException(
"Utility class ConfigStringUtil and cannot be instantiated");
}
/**
* Parse a string represented map into a map.
* @param inputStr "propName0=propVal0,propName1=propVal1"
* @return map {[propName0, propVal0], [propName1, propVal1]}"
*/
public static Map<String, String> parseConcatenatedConfig(String inputStr) {
Map<String, String> resultMap = new HashMap<>();
if (inputStr == null || inputStr.isEmpty()) {
return resultMap;
}
String[] pathPairs = inputStr.trim().split(CONCATENATE_CONFIG_SPLITTER);
for (String pair : pathPairs) {
String[] values = pair.split(CONCATENATE_CONFIG_JOINER);
if (values.length != 2 || values[0].isEmpty() || values[1].isEmpty()) {
throw new IllegalArgumentException(
String.format("Domain-Value pair %s is not valid.", pair));
}
resultMap.put(values[0].trim(), values[1].trim());
}
return resultMap;
}
/**
* Concatenate a map into a string .
* @param inputMap {[propName0, propVal0], [propName1, propVal1]}
* @return String "propName0=propVal0,propName1=propVal1"
*/
public static String concatenateMapping(Map<String, String> inputMap) {
return inputMap
.entrySet()
.stream()
.map(entry -> entry.getKey() + CONCATENATE_CONFIG_JOINER + entry.getValue())
.collect(Collectors.joining(CONCATENATE_CONFIG_SPLITTER));
}
} | 9,993 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/util/JenkinsHash.java | package org.apache.helix.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
public class JenkinsHash {
// max value to limit it to 4 bytes
private static final long MAX_VALUE = 0xFFFFFFFFL;
private static final long CRUSH_HASH_SEED = 1315423911L;
/**
* Convert a byte into a long value without making it negative.
*/
private static long byteToLong(byte b) {
long val = b & 0x7F;
if ((b & 0x80) != 0) {
val += 128;
}
return val;
}
/**
* Do addition and turn into 4 bytes.
*/
private static long add(long val, long add) {
return (val + add) & MAX_VALUE;
}
/**
* Do subtraction and turn into 4 bytes.
*/
private static long subtract(long val, long subtract) {
return (val - subtract) & MAX_VALUE;
}
/**
* Left shift val by shift bits and turn in 4 bytes.
*/
private static long xor(long val, long xor) {
return (val ^ xor) & MAX_VALUE;
}
/**
* Left shift val by shift bits. Cut down to 4 bytes.
*/
private static long leftShift(long val, int shift) {
return (val << shift) & MAX_VALUE;
}
/**
* Convert 4 bytes from the buffer at offset into a long value.
*/
private static long fourByteToLong(byte[] bytes, int offset) {
return (byteToLong(bytes[offset + 0])
+ (byteToLong(bytes[offset + 1]) << 8)
+ (byteToLong(bytes[offset + 2]) << 16)
+ (byteToLong(bytes[offset + 3]) << 24));
}
/**
* Mix up the values in the hash function.
*/
private static Triple hashMix(Triple t) {
long a = t.a; long b = t.b; long c = t.c;
a = subtract(a, b); a = subtract(a, c); a = xor(a, c >> 13);
b = subtract(b, c); b = subtract(b, a); b = xor(b, leftShift(a, 8));
c = subtract(c, a); c = subtract(c, b); c = xor(c, (b >> 13));
a = subtract(a, b); a = subtract(a, c); a = xor(a, (c >> 12));
b = subtract(b, c); b = subtract(b, a); b = xor(b, leftShift(a, 16));
c = subtract(c, a); c = subtract(c, b); c = xor(c, (b >> 5));
a = subtract(a, b); a = subtract(a, c); a = xor(a, (c >> 3));
b = subtract(b, c); b = subtract(b, a); b = xor(b, leftShift(a, 10));
c = subtract(c, a); c = subtract(c, b); c = xor(c, (b >> 15));
return new Triple(a, b, c);
}
private static class Triple {
long a;
long b;
long c;
public Triple(long a, long b, long c) {
this.a = a; this.b = b; this.c = c;
}
}
public long hash(long a) {
long hash = xor(CRUSH_HASH_SEED, a);
long b = a;
long x = 231232L;
long y = 1232L;
Triple val = hashMix(new Triple(b, x, hash));
b = val.a; x = val.b; hash = val.c;
val = hashMix(new Triple(y, a, hash));
hash = val.c;
return hash;
}
public long hash(long a, long b) {
long hash = xor(xor(CRUSH_HASH_SEED, a), b);
long x = 231232L;
long y = 1232L;
Triple val = hashMix(new Triple(a, b, hash));
a = val.a; b = val.b; hash = val.c;
val = hashMix(new Triple(x, a, hash));
x = val.a; a = val.b; hash = val.c;
val = hashMix(new Triple(b, y, hash));
hash = val.c;
return hash;
}
public long hash(long a, long b, long c) {
long hash = xor(xor(xor(CRUSH_HASH_SEED, a), b), c);
long x = 231232L;
long y = 1232L;
Triple val = hashMix(new Triple(a, b, hash));
a = val.a; b = val.b; hash = val.c;
val = hashMix(new Triple(c, x, hash));
c = val.a; x = val.b; hash = val.c;
val = hashMix(new Triple(y, a, hash));
y = val.a; a = val.b; hash = val.c;
val = hashMix(new Triple(b, x, hash));
b = val.a; x = val.b; hash = val.c;
val = hashMix(new Triple(y, c, hash));
hash = val.c;
return hash;
}
}
| 9,994 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/util/InstanceValidationUtil.java | package org.apache.helix.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.google.common.collect.ImmutableSet;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixDefinedState;
import org.apache.helix.HelixException;
import org.apache.helix.PropertyKey;
import org.apache.helix.model.ClusterConfig;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.ExternalView;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.InstanceConfig;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.ResourceConfig;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.task.TaskConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Utility class for validating Helix properties
* Warning: each method validates one single property of instance individually and independently.
* One validation wouldn't depend on the results of other validations
*/
public class InstanceValidationUtil {
private static final Logger _logger = LoggerFactory.getLogger(InstanceValidationUtil.class);
public static Set<String> UNHEALTHY_STATES =
ImmutableSet.of(HelixDefinedState.DROPPED.name(), HelixDefinedState.ERROR.name());
static final String UNHEALTHY_PARTITION = "UNHEALTHY_PARTITION";
static final String HOST_NO_STATE_ERROR = "HOST_NO_STATE_ERROR:";
// The message that will be shown if partition is in initial state of the state model and
// partition health check has been skipped for that instance
static final String PARTITION_INITIAL_STATE_FAIL = "PARTITION_INITIAL_STATE_FAIL";
private InstanceValidationUtil() {
}
/**
* Method to check if the instance is enabled by configuration
* @param dataAccessor
* @param instanceName
* @return
*/
public static boolean isEnabled(HelixDataAccessor dataAccessor, String instanceName) {
PropertyKey.Builder propertyKeyBuilder = dataAccessor.keyBuilder();
InstanceConfig instanceConfig = dataAccessor.getProperty(propertyKeyBuilder.instanceConfig(instanceName));
ClusterConfig clusterConfig = dataAccessor.getProperty(propertyKeyBuilder.clusterConfig());
// TODO deprecate instance level config checks once migrated the enable status to cluster config only
if (instanceConfig == null || clusterConfig == null) {
throw new HelixException("InstanceConfig or ClusterConfig is NULL");
}
return isInstanceEnabled(instanceConfig, clusterConfig);
}
/**
* Check if the instance is enabled by configuration
* @param instanceConfig
* @param clusterConfig
* @return
*/
public static boolean isInstanceEnabled(InstanceConfig instanceConfig, ClusterConfig clusterConfig) {
if (instanceConfig == null) {
throw new HelixException("InstanceConfig is NULL");
}
boolean enabledInInstanceConfig = instanceConfig.getInstanceEnabled();
// TODO: batch enable/disable in cluster config is breaking backward compatibility with older library
// re-enable once batch enable/disable is ready
if (true || clusterConfig == null) {
return enabledInInstanceConfig;
}
boolean enabledInClusterConfig =
!clusterConfig.getDisabledInstances().containsKey(instanceConfig.getInstanceName());
return enabledInClusterConfig && enabledInInstanceConfig;
}
/**
* Method to check if the instance is up and running by configuration
* @param dataAccessor
* @param instanceName
* @return
*/
public static boolean isAlive(HelixDataAccessor dataAccessor, String instanceName) {
LiveInstance liveInstance =
dataAccessor.getProperty(dataAccessor.keyBuilder().liveInstance(instanceName));
return liveInstance != null;
}
/**
* Deprecated. Please use {@link #isResourceAssigned} instead.
*/
@Deprecated
public static boolean hasResourceAssigned(HelixDataAccessor dataAccessor, String clusterId,
String instanceName) {
return isResourceAssigned(dataAccessor, instanceName);
}
/**
* Method to check if the instance is assigned at least 1 resource, not in a idle state;
* Independent of the instance alive/enabled status
* @param dataAccessor
* @param instanceName
* @return
*/
public static boolean isResourceAssigned(HelixDataAccessor dataAccessor, String instanceName) {
PropertyKey.Builder propertyKeyBuilder = dataAccessor.keyBuilder();
LiveInstance liveInstance = dataAccessor.getProperty(propertyKeyBuilder.liveInstance(instanceName));
if (liveInstance != null) {
String sessionId = liveInstance.getEphemeralOwner();
List<String> resourceNames = dataAccessor.getChildNames(propertyKeyBuilder.currentStates(instanceName, sessionId));
for (String resourceName : resourceNames) {
PropertyKey currentStateKey = propertyKeyBuilder.currentState(instanceName, sessionId, resourceName);
CurrentState currentState = dataAccessor.getProperty(currentStateKey);
if (currentState != null && currentState.getPartitionStateMap().size() > 0) {
return true;
}
}
}
_logger.warn(String.format("The instance %s does not have resource assigned on it.", instanceName));
return false;
}
/**
* Method to check if the instance has any disabled partition assigned
* @param dataAccessor
* @param clusterId
* @param instanceName
* @return
*/
public static boolean hasDisabledPartitions(HelixDataAccessor dataAccessor, String clusterId,
String instanceName) {
PropertyKey propertyKey = dataAccessor.keyBuilder().instanceConfig(instanceName);
InstanceConfig instanceConfig = dataAccessor.getProperty(propertyKey);
if (instanceConfig != null) {
// Originally, Helix only checks whether disabled partition map has entries or not. But this
// could cause problem when some of partitions disabled and enabled back, the resource entries
// are still left there. For detailed check, we shall check the whether partition list is empty
// or not
for (List<String> disabledPartitions : instanceConfig.getDisabledPartitionsMap().values()) {
if (disabledPartitions != null && disabledPartitions.size() > 0) {
return true;
}
}
return false;
}
throw new HelixException("Fail to get instance config for " + instanceName);
}
/**
* Method to check if the instance has valid configuration.
* Instance stability check requires PERSIST_INTERMEDIATE_ASSIGNMENT turned on!
*
* @param dataAccessor
* @param clusterId
* @param instanceName
* @return
*/
public static boolean hasValidConfig(HelixDataAccessor dataAccessor, String clusterId,
String instanceName) {
PropertyKey.Builder keyBuilder = dataAccessor.keyBuilder();
ClusterConfig clusterConfig = dataAccessor.getProperty(keyBuilder.clusterConfig());
if (clusterConfig == null) {
_logger.error("Cluster config is missing in cluster " + clusterId);
return false;
}
if (!clusterConfig.isPersistIntermediateAssignment()) {
_logger.error(
"Cluster config {} is not turned on, which is required for instance stability check.",
ClusterConfig.ClusterConfigProperty.PERSIST_INTERMEDIATE_ASSIGNMENT.toString());
return false;
}
PropertyKey propertyKey = keyBuilder.instanceConfig(instanceName);
InstanceConfig instanceConfig = dataAccessor.getProperty(propertyKey);
return instanceConfig != null && instanceConfig.isValid();
}
/**
* Method to check if the instance has error partitions
* @param dataAccessor
* @param clusterId
* @param instanceName
* @return
*/
public static boolean hasErrorPartitions(HelixDataAccessor dataAccessor, String clusterId,
String instanceName) {
PropertyKey.Builder propertyKeyBuilder = new PropertyKey.Builder(clusterId);
PropertyKey liveInstanceKey = propertyKeyBuilder.liveInstance(instanceName);
LiveInstance liveInstance = dataAccessor.getProperty(liveInstanceKey);
if (liveInstance != null) {
String sessionId = liveInstance.getEphemeralOwner();
PropertyKey currentStatesKey = propertyKeyBuilder.currentStates(instanceName, sessionId);
List<String> resourceNames = dataAccessor.getChildNames(currentStatesKey);
for (String resourceName : resourceNames) {
PropertyKey key = propertyKeyBuilder.currentState(instanceName, sessionId, resourceName);
CurrentState currentState = dataAccessor.getProperty(key);
if (currentState != null
&& currentState.getPartitionStateMap().containsValue(HelixDefinedState.ERROR.name())) {
_logger.warn("The instance {} has error partitions on it.", instanceName);
return true;
}
}
}
return false;
}
/**
* Get the problematic partitions on the to-be-stop instance
* Requirement:
* If the instance gets stopped and the partitions on the instance are OFFLINE,
* the cluster still have enough "healthy" replicas on other sibling instances
*
* - sibling instances mean those who share the same partition (replicas) of the to-be-stop instance
*
* @param globalPartitionHealthStatus (instance => (partition name, health status))
* @param instanceToBeStop The instance to be stopped
* @param dataAccessor The data accessor
* @return A list of problematic partitions if the instance is stopped
*/
public static Map<String, List<String>> perPartitionHealthCheck(List<ExternalView> externalViews,
Map<String, Map<String, Boolean>> globalPartitionHealthStatus, String instanceToBeStop,
HelixDataAccessor dataAccessor) {
Map<String, List<String>> unhealthyPartitions = new HashMap<>();
for (ExternalView externalView : externalViews) {
// Skip ANY_LIVEINSTANCES resources check, since ANY_LIVEINSTANCES resources have single partition
// with 1 replica. There is no need to check sibiling replicas.
if (ResourceConfig.ResourceConfigConstants.ANY_LIVEINSTANCE.name()
.equals(externalView.getReplicas())) {
continue;
}
StateModelDefinition stateModelDefinition = dataAccessor
.getProperty(dataAccessor.keyBuilder().stateModelDef(externalView.getStateModelDefRef()));
for (String partition : externalView.getPartitionSet()) {
Map<String, String> stateMap = externalView.getStateMap(partition);
// Only check if instance holds top state
if (stateMap.containsKey(instanceToBeStop)
&& stateMap.get(instanceToBeStop).equals(stateModelDefinition.getTopState())) {
for (String siblingInstance : stateMap.keySet()) {
// Skip this self check
if (siblingInstance.equals(instanceToBeStop)) {
continue;
}
// If the state is init state, we add appropriate messages
if (stateMap.get(siblingInstance).equals(stateModelDefinition.getInitialState())) {
unhealthyPartitions.computeIfAbsent(partition, list -> new ArrayList<>())
.add(PARTITION_INITIAL_STATE_FAIL);
continue;
}
// If we failed to get partition assignment for one sibling instance, we add the
// instance name in return error for debuggability.
if (!globalPartitionHealthStatus.containsKey(siblingInstance)
|| globalPartitionHealthStatus.get(siblingInstance).isEmpty()) {
unhealthyPartitions.computeIfAbsent(partition, list -> new ArrayList<>())
.add(HOST_NO_STATE_ERROR + siblingInstance);
} else if (!(globalPartitionHealthStatus.get(siblingInstance)
.getOrDefault(partition, false))) {
// We are checking sibling partition healthy status. So if partition health does not
// exist or it is not healthy. We should mark this partition is unhealthy.
unhealthyPartitions.computeIfAbsent(partition, list -> new ArrayList<>())
.add(UNHEALTHY_PARTITION);
}
}
}
}
}
return unhealthyPartitions;
}
/**
* Check instance is already in the stable state. Here stable means all the ideal state mapping
* matches external view (view of current state).
* @param dataAccessor
* @param instanceName
* @return
*/
public static boolean isInstanceStable(HelixDataAccessor dataAccessor, String instanceName) {
PropertyKey.Builder keyBuilder = dataAccessor.keyBuilder();
ClusterConfig clusterConfig = dataAccessor.getProperty(keyBuilder.clusterConfig());
if (clusterConfig == null) {
throw new HelixException("Missing cluster config!");
}
List<String> idealStateNames = dataAccessor.getChildNames(keyBuilder.idealStates());
for (String idealStateName : idealStateNames) {
IdealState idealState = dataAccessor.getProperty(keyBuilder.idealStates(idealStateName));
if (idealState == null || !idealState.isEnabled() || !idealState.isValid()
|| TaskConstants.STATE_MODEL_NAME.equals(idealState.getStateModelDefRef())) {
continue;
}
ExternalView externalView = dataAccessor.getProperty(keyBuilder.externalView(idealStateName));
if (externalView == null) {
throw new HelixException(
String.format("Resource %s does not have external view!", idealStateName));
}
for (String partition : idealState.getPartitionSet()) {
Map<String, String> isPartitionMap = idealState.getInstanceStateMap(partition);
if (isPartitionMap == null) {
throw new HelixException(String
.format("Partition %s of resource %s does not have an ideal state partition map",
partition, idealStateName));
}
if (isPartitionMap.containsKey(instanceName)) {
Map<String, String> evPartitionMap = externalView.getStateMap(partition);
if (evPartitionMap == null) {
throw new HelixException(String
.format("Partition %s of resource %s does not have an external view partition map",
partition, idealStateName));
}
if (!evPartitionMap.containsKey(instanceName)
|| !evPartitionMap.get(instanceName).equals(isPartitionMap.get(instanceName))) {
// only checks the state from IS matches EV. Return false when
// 1. This partition not has current state on this instance
// 2. The state does not match the state on ideal state
return false;
}
}
}
}
return true;
}
/**
* Check if sibling nodes of the instance meet min active replicas constraint
* Two instances are sibling of each other if they host the same partition
* WARNING: The check uses ExternalView to reduce network traffic but suffer from accuracy
* due to external view propagation latency
*
* TODO: Use in memory cache and query instance's currentStates
*
* @param dataAccessor
* @param instanceName
* @return
*/
public static boolean siblingNodesActiveReplicaCheck(HelixDataAccessor dataAccessor, String instanceName) {
PropertyKey.Builder propertyKeyBuilder = dataAccessor.keyBuilder();
List<String> resources = dataAccessor.getChildNames(propertyKeyBuilder.idealStates());
for (String resourceName : resources) {
IdealState idealState = dataAccessor.getProperty(propertyKeyBuilder.idealStates(resourceName));
if (idealState == null || !idealState.isEnabled() || !idealState.isValid()
|| TaskConstants.STATE_MODEL_NAME.equals(idealState.getStateModelDefRef())) {
continue;
}
ExternalView externalView =
dataAccessor.getProperty(propertyKeyBuilder.externalView(resourceName));
if (externalView == null) {
throw new HelixException(
String.format("Resource %s does not have external view!", resourceName));
}
// Get the minActiveReplicas constraint for the resource
int minActiveReplicas = externalView.getMinActiveReplicas();
if (minActiveReplicas == -1) {
_logger.warn("Resource {} is missing minActiveReplica field. Skip the sibling check",
resourceName);
continue;
}
String stateModeDef = externalView.getStateModelDefRef();
StateModelDefinition stateModelDefinition =
dataAccessor.getProperty(propertyKeyBuilder.stateModelDef(stateModeDef));
Set<String> unhealthyStates = new HashSet<>(UNHEALTHY_STATES);
if (stateModelDefinition != null) {
unhealthyStates.add(stateModelDefinition.getInitialState());
}
for (String partition : externalView.getPartitionSet()) {
Map<String, String> stateByInstanceMap = externalView.getStateMap(partition);
// found the resource hosted on the instance
if (stateByInstanceMap.containsKey(instanceName)) {
int numHealthySiblings = 0;
for (Map.Entry<String, String> entry : stateByInstanceMap.entrySet()) {
if (!entry.getKey().equals(instanceName)
&& !unhealthyStates.contains(entry.getValue())) {
numHealthySiblings++;
}
}
if (numHealthySiblings < minActiveReplicas) {
_logger.info(
"Partition {} doesn't have enough active replicas in sibling nodes. NumHealthySiblings: {}, minActiveReplicas: {}",
partition, numHealthySiblings, minActiveReplicas);
return false;
}
}
}
}
return true;
}
}
| 9,995 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/util/ZKClientPool.java | package org.apache.helix.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.helix.manager.zk.ZNRecordSerializer;
import org.apache.helix.zookeeper.impl.client.ZkClient;
import org.apache.zookeeper.ZooKeeper.States;
public class ZKClientPool {
static final Map<String, ZkClient> _zkClientMap = new ConcurrentHashMap<>();
static final int DEFAULT_SESSION_TIMEOUT = 30 * 1000;
public static ZkClient getZkClient(String zkServer) {
// happy path that we cache the zkclient and it's still connected
if (_zkClientMap.containsKey(zkServer)) {
ZkClient zkClient = _zkClientMap.get(zkServer);
if (zkClient.getConnection().getZookeeperState() == States.CONNECTED) {
return zkClient;
}
}
synchronized (_zkClientMap) {
// if we cache a stale zkclient, purge it
if (_zkClientMap.containsKey(zkServer)) {
ZkClient zkClient = _zkClientMap.get(zkServer);
if (zkClient.getConnection().getZookeeperState() != States.CONNECTED) {
_zkClientMap.remove(zkServer);
}
}
// get a new zkclient
if (!_zkClientMap.containsKey(zkServer)) {
ZkClient zkClient =
new ZkClient(zkServer, DEFAULT_SESSION_TIMEOUT, ZkClient.DEFAULT_CONNECTION_TIMEOUT,
new ZNRecordSerializer());
_zkClientMap.put(zkServer, zkClient);
}
return _zkClientMap.get(zkServer);
}
}
public static void reset() {
_zkClientMap.clear();
}
public static void main(String[] args) throws InterruptedException {
Thread /*
* _dataSampleThread = new Thread(new Runnable()
* {
* @Override
* public void run()
* {
* int i = 0;
* while(!Thread.currentThread().isInterrupted())
* {
* try
* {
* // if the queue is empty, sleep 100 ms and try again
* Thread.sleep(1000);
* System.out.println(i++ + "...");
* throw new RuntimeException("" + i);
* }
* catch (InterruptedException e)
* {
* System.out.println("Collector thread interrupted" + e);
* return;
* }
* catch(Throwable th)
* {
* System.out.println("Collector thread exception/ error" + th);
* }
* }
* }
* });
* _dataSampleThread.start();
* Thread.sleep(10000);
* _dataSampleThread.interrupt();
*/
_dataSampleThread = new Thread(new Runnable() {
@Override
public void run() {
int i = 0;
while (!Thread.currentThread().isInterrupted()) {
// if the queue is empty, sleep 100 ms and try again
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
System.out.println(i++ + "...");
throw new Error("" + i);
}
}
});
_dataSampleThread.start();
Thread.sleep(10000);
_dataSampleThread.interrupt();
}
}
| 9,996 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/util/MessageUtil.java | package org.apache.helix.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.UUID;
import org.apache.helix.model.LiveInstance;
import org.apache.helix.model.Message;
import org.apache.helix.model.Resource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Message utils to operate on message such creating messages.
*/
public class MessageUtil {
private static final Logger LOG = LoggerFactory.getLogger(MessageUtil.class);
// TODO: Make the message retry count configurable through the Cluster Config or IdealStates.
public final static int DEFAULT_STATE_TRANSITION_MESSAGE_RETRY_COUNT = 3;
public static Message createStateTransitionCancellationMessage(String srcInstanceName,
String srcSessionId, Resource resource, String partitionName, String instanceName,
String sessionId, String stateModelDefName, String fromState, String toState,
String nextState, Message cancellationMessage, boolean isCancellationEnabled,
String currentState) {
if (isCancellationEnabled && cancellationMessage == null) {
LOG.info("Create cancellation message of the state transition for {}.{} on {}, "
+ "currentState: {}, nextState: {}, toState: {}", resource.getResourceName(),
partitionName, instanceName, currentState, nextState == null ? "N/A" : nextState,
toState);
Message message =
createStateTransitionMessage(Message.MessageType.STATE_TRANSITION_CANCELLATION,
srcInstanceName, srcSessionId, resource, partitionName, instanceName, currentState,
nextState, sessionId, stateModelDefName);
message.setFromState(fromState);
message.setToState(toState);
return message;
}
return null;
}
public static Message createStateTransitionMessage(String srcInstanceName, String srcSessionId,
Resource resource, String partitionName, String instanceName, String currentState,
String nextState, String tgtSessionId, String stateModelDefName) {
Message message =
createStateTransitionMessage(Message.MessageType.STATE_TRANSITION, srcInstanceName,
srcSessionId, resource, partitionName, instanceName, currentState, nextState, tgtSessionId,
stateModelDefName);
// Set the retry count for state transition messages.
// TODO: make the retry count configurable in ClusterConfig or IdealState
message.setRetryCount(DEFAULT_STATE_TRANSITION_MESSAGE_RETRY_COUNT);
if (resource.getResourceGroupName() != null) {
message.setResourceGroupName(resource.getResourceGroupName());
}
if (resource.getResourceTag() != null) {
message.setResourceTag(resource.getResourceTag());
}
return message;
}
/**
* Creates a message to change participant status
* {@link org.apache.helix.model.LiveInstance.LiveInstanceStatus}
*
* @param currentState current status of the live instance
* @param nextState next status that will be changed to
* @param srcInstanceName source instance name
* @param srcSessionId session id for the source instance
* @param tgtInstanceName target instance name
* @param tgtSessionId target instance session id
* @return participant status change message
*/
public static Message createStatusChangeMessage(LiveInstance.LiveInstanceStatus currentState,
LiveInstance.LiveInstanceStatus nextState, String srcInstanceName, String srcSessionId,
String tgtInstanceName, String tgtSessionId) {
return createBasicMessage(Message.MessageType.PARTICIPANT_STATUS_CHANGE, srcInstanceName,
srcSessionId, tgtInstanceName, tgtSessionId, currentState.name(), nextState.name());
}
/* Creates a message that that has the least required fields. */
private static Message createBasicMessage(Message.MessageType messageType, String srcInstanceName,
String srcSessionId, String tgtInstanceName, String tgtSessionId, String currentState,
String nextState) {
String uuid = UUID.randomUUID().toString();
Message message = new Message(messageType, uuid);
message.setSrcName(srcInstanceName);
message.setTgtName(tgtInstanceName);
message.setMsgState(Message.MessageState.NEW);
message.setFromState(currentState);
message.setToState(nextState);
message.setTgtSessionId(tgtSessionId);
message.setSrcSessionId(srcSessionId);
message.setExpectedSessionId(srcSessionId);
return message;
}
/* Creates state transition or state transition cancellation message */
private static Message createStateTransitionMessage(Message.MessageType messageType,
String srcInstanceName, String srcSessionId, Resource resource, String partitionName,
String instanceName, String currentState, String nextState, String tgtSessionId,
String stateModelDefName) {
Message message =
createBasicMessage(messageType, srcInstanceName, srcSessionId, instanceName, tgtSessionId,
currentState, nextState);
message.setPartitionName(partitionName);
message.setStateModelDef(stateModelDefName);
message.setResourceName(resource.getResourceName());
message.setStateModelFactoryName(resource.getStateModelFactoryname());
message.setBucketSize(resource.getBucketSize());
return message;
}
}
| 9,997 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/util/StatusUpdateUtil.java | package org.apache.helix.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.io.PrintWriter;
import java.io.StringWriter;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixProperty;
import org.apache.helix.InstanceType;
import org.apache.helix.PropertyKey;
import org.apache.helix.PropertyKey.Builder;
import org.apache.helix.SystemPropertyKeys;
import org.apache.helix.model.Error;
import org.apache.helix.model.Message;
import org.apache.helix.model.Message.MessageType;
import org.apache.helix.model.StatusUpdate;
import org.apache.helix.zookeeper.datamodel.ZNRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Util class to create statusUpdates ZK records and error ZK records. These message
* records are for diagnostics only, and they are stored on the "StatusUpdates" and
* "errors" ZNodes in the zookeeper instances.
*/
public class StatusUpdateUtil {
static Logger _logger = LoggerFactory.getLogger(StatusUpdateUtil.class);
public static final boolean ERROR_LOG_TO_ZK_ENABLED =
Boolean.getBoolean(SystemPropertyKeys.STATEUPDATEUTIL_ERROR_PERSISTENCY_ENABLED);
public static class Transition implements Comparable<Transition> {
private final String _msgID;
private final long _timeStamp;
private final String _from;
private final String _to;
public Transition(String msgID, long timeStamp, String from, String to) {
this._msgID = msgID;
this._timeStamp = timeStamp;
this._from = from;
this._to = to;
}
@Override
public int compareTo(Transition t) {
if (_timeStamp < t._timeStamp)
return -1;
else if (_timeStamp > t._timeStamp)
return 1;
else
return 0;
}
public boolean equals(Transition t) {
return (_timeStamp == t._timeStamp && _from.equals(t._from) && _to.equals(t._to));
}
public String getFromState() {
return _from;
}
public String getToState() {
return _to;
}
public String getMsgID() {
return _msgID;
}
@Override
public String toString() {
return _msgID + ":" + _timeStamp + ":" + _from + "->" + _to;
}
}
public static enum TaskStatus {
UNKNOWN,
NEW,
SCHEDULED,
INVOKING,
COMPLETED,
FAILED
}
public static class StatusUpdateContents {
private final List<Transition> _transitions;
private final Map<String, TaskStatus> _taskMessages;
private StatusUpdateContents(List<Transition> transitions, Map<String, TaskStatus> taskMessages) {
this._transitions = transitions;
this._taskMessages = taskMessages;
}
public static StatusUpdateContents getStatusUpdateContents(HelixDataAccessor accessor,
String instance, String resourceGroup, String partition) {
return getStatusUpdateContents(accessor, instance, resourceGroup, null, partition);
}
// TODO: We should build a map and return the key instead of searching
// everytime
// for an (instance, resourceGroup, session, partition) tuple.
// But such a map is very similar to what exists in ZNRecord
// passing null for sessionID results in searching across all sessions
public static StatusUpdateContents getStatusUpdateContents(HelixDataAccessor accessor,
String instance, String resourceGroup, String sessionID, String partition) {
Builder keyBuilder = accessor.keyBuilder();
List<ZNRecord> instances =
HelixProperty.convertToList(accessor.getChildValues(keyBuilder.instanceConfigs(), true));
List<ZNRecord> partitionRecords = new ArrayList<ZNRecord>();
for (ZNRecord znRecord : instances) {
String instanceName = znRecord.getId();
if (!instanceName.equals(instance)) {
continue;
}
List<String> sessions = accessor.getChildNames(keyBuilder.sessions(instanceName));
for (String session : sessions) {
if (sessionID != null && !session.equals(sessionID)) {
continue;
}
List<String> resourceGroups =
accessor.getChildNames(keyBuilder.stateTransitionStatus(instanceName, session));
for (String resourceGroupName : resourceGroups) {
if (!resourceGroupName.equals(resourceGroup)) {
continue;
}
List<String> partitionStrings =
accessor.getChildNames(keyBuilder.stateTransitionStatus(instanceName, session,
resourceGroupName));
for (String partitionString : partitionStrings) {
ZNRecord partitionRecord =
accessor.getProperty(
keyBuilder.stateTransitionStatus(instanceName, session, resourceGroupName,
partitionString)).getRecord();
if (!partitionString.equals(partition)) {
continue;
}
partitionRecords.add(partitionRecord);
}
}
}
}
return new StatusUpdateContents(getSortedTransitions(partitionRecords),
getTaskMessages(partitionRecords));
}
public List<Transition> getTransitions() {
return _transitions;
}
public Map<String, TaskStatus> getTaskMessages() {
return _taskMessages;
}
// input: List<ZNRecord> corresponding to (instance, database,
// partition) tuples across all sessions
// return list of transitions sorted from earliest to latest
private static List<Transition> getSortedTransitions(List<ZNRecord> partitionRecords) {
List<Transition> transitions = new ArrayList<Transition>();
for (ZNRecord partition : partitionRecords) {
Map<String, Map<String, String>> mapFields = partition.getMapFields();
for (String key : mapFields.keySet()) {
if (key.startsWith("MESSAGE")) {
Map<String, String> m = mapFields.get(key);
long createTimeStamp = 0;
try {
createTimeStamp = Long.parseLong(m.get("CREATE_TIMESTAMP"));
} catch (Exception e) {
}
transitions.add(new Transition(m.get("MSG_ID"), createTimeStamp, m.get("FROM_STATE"), m
.get("TO_STATE")));
}
}
}
Collections.sort(transitions);
return transitions;
}
private static Map<String, TaskStatus> getTaskMessages(List<ZNRecord> partitionRecords) {
Map<String, TaskStatus> taskMessages = new HashMap<String, TaskStatus>();
for (ZNRecord partition : partitionRecords) {
Map<String, Map<String, String>> mapFields = partition.getMapFields();
// iterate over the task status updates in the order they occurred
// so that the last status can be recorded
for (String key : mapFields.keySet()) {
if (key.contains("STATE_TRANSITION")) {
Map<String, String> m = mapFields.get(key);
String id = m.get("MSG_ID");
String statusString = m.get("AdditionalInfo");
TaskStatus status = TaskStatus.UNKNOWN;
if (statusString.contains("scheduled"))
status = TaskStatus.SCHEDULED;
else if (statusString.contains("invoking"))
status = TaskStatus.INVOKING;
else if (statusString.contains("completed"))
status = TaskStatus.COMPLETED;
taskMessages.put(id, status);
}
}
}
return taskMessages;
}
}
public enum Level {
HELIX_ERROR,
HELIX_WARNING,
HELIX_INFO
}
/**
* Creates an empty ZNRecord as the statusUpdate/error record
* @param id
*/
public ZNRecord createEmptyStatusUpdateRecord(String id) {
return new ZNRecord(id);
}
/**
* Create a ZNRecord for a message, which stores the content of the message (stored in
* simple fields) into the ZNRecord mapFields. In this way, the message update can be
* merged with the previous status update record in the zookeeper. See ZNRecord.merge()
* for more details.
*/
ZNRecord createMessageLogRecord(Message message) {
ZNRecord result = new ZNRecord(getStatusUpdateRecordName(message));
String mapFieldKey = "MESSAGE " + message.getMsgId();
result.setMapField(mapFieldKey, new TreeMap<String, String>());
// Store all the simple fields of the message in the new ZNRecord's map
// field.
for (String simpleFieldKey : message.getRecord().getSimpleFields().keySet()) {
result.getMapField(mapFieldKey).put(simpleFieldKey,
message.getRecord().getSimpleField(simpleFieldKey));
}
if (message.getResultMap() != null) {
result.setMapField("MessageResult", message.getResultMap());
}
return result;
}
Map<String, String> _recordedMessages = new ConcurrentHashMap<>();
/**
* Create a statusupdate that is related to a cluster manager message.
* @param message
* the related cluster manager message
* @param level
* the error level
* @param classInfo
* class info about the class that reports the status update
* @param additionalInfo
* info the additional debug information
*/
public ZNRecord createMessageStatusUpdateRecord(Message message, Level level, Class classInfo,
String additionalInfo) {
ZNRecord result = createEmptyStatusUpdateRecord(getStatusUpdateRecordName(message));
Map<String, String> contentMap = new TreeMap<String, String>();
contentMap.put("Message state",
(message.getMsgState() == null ? "NULL" : message.getMsgState().toString()));
contentMap.put("AdditionalInfo", additionalInfo);
contentMap.put("Class", classInfo.toString());
contentMap.put("MSG_ID", message.getMsgId());
result.setMapField(generateMapFieldId(level, getRecordIdForMessage(message)), contentMap);
return result;
}
private String getRecordIdForMessage(Message message) {
if (message.getMsgType().equals(MessageType.STATE_TRANSITION)) {
return message.getPartitionName() + " Trans:" + message.getFromState().charAt(0) + "->"
+ message.getToState().charAt(0) + " " + UUID.randomUUID().toString();
} else {
return message.getMsgType() + " " + UUID.randomUUID().toString();
}
}
private String generateMapFieldId(Level level, String recordId) {
DateFormat formatter = new SimpleDateFormat("yyyyMMdd-HHmmss.SSSSSS");
String time = formatter.format(new Date());
return String.format("%4s %26s ", level.toString(), time) + recordId;
}
@Deprecated
public void logMessageStatusUpdateRecord(Message message, Level level, Class classInfo,
String additionalInfo, HelixDataAccessor accessor) {
try {
ZNRecord record = createMessageStatusUpdateRecord(message, level, classInfo, additionalInfo);
publishStatusUpdateRecord(record, message, level, accessor,
message.getTgtName().equalsIgnoreCase(InstanceType.CONTROLLER.name()));
} catch (Exception e) {
_logger.error("Exception while logging status update", e);
}
}
/**
* Create a statusupdate that is related to a cluster manager message, then record it to
* the zookeeper store.
* @param message
* the related cluster manager message
* @param level
* the error level
* @param classInfo
* class info about the class that reports the status update
* @param additionalInfo
* info the additional debug information
* @param manager
* the HelixManager that writes the status update to zookeeper
*/
public void logMessageStatusUpdateRecord(Message message, Level level, Class classInfo,
String additionalInfo, HelixManager manager) {
try {
ZNRecord record = createMessageStatusUpdateRecord(message, level, classInfo, additionalInfo);
publishStatusUpdateRecord(record, message, level, manager.getHelixDataAccessor(),
manager.getInstanceType().equals(InstanceType.CONTROLLER) || manager.getInstanceType()
.equals(InstanceType.CONTROLLER_PARTICIPANT));
} catch (Exception e) {
_logger.error("Exception while logging status update", e);
}
}
public enum ErrorType {
RebalanceResourceFailure,
}
public void logError(ErrorType errorType, Class classInfo, String additionalInfo, HelixManager helixManager) {
if (helixManager != null) {
logError(errorType, "ErrorInfo", helixManager.getInstanceName(), helixManager.getSessionId(), additionalInfo,
classInfo, helixManager.getHelixDataAccessor(),
helixManager.getInstanceType().equals(InstanceType.CONTROLLER) || helixManager.getInstanceType()
.equals(InstanceType.CONTROLLER_PARTICIPANT));
} else {
_logger.error("Exception while logging error. HelixManager is null.");
}
}
private void logError(ErrorType errorType, String updateKey, String instanceName,
String sessionId, String additionalInfo, Class classInfo, HelixDataAccessor accessor,
boolean isController) {
try {
ZNRecord record = createEmptyStatusUpdateRecord(sessionId + "__" + instanceName);
Map<String, String> contentMap = new TreeMap<>();
contentMap.put("AdditionalInfo", additionalInfo);
contentMap.put("Class", classInfo.toString());
contentMap.put("SessionId", sessionId);
record.setMapField(generateMapFieldId(Level.HELIX_ERROR, updateKey), contentMap);
publishErrorRecord(record, instanceName, errorType.name(), updateKey, sessionId, accessor,
isController);
} catch (Exception e) {
_logger.error("Exception while logging error", e);
}
}
public void logError(Message message, Class classInfo, String additionalInfo, HelixManager manager) {
logMessageStatusUpdateRecord(message, Level.HELIX_ERROR, classInfo, additionalInfo, manager);
}
public void logError(Message message, Class classInfo, Exception e, String additionalInfo,
HelixManager manager) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
e.printStackTrace(pw);
logMessageStatusUpdateRecord(message, Level.HELIX_ERROR, classInfo,
additionalInfo + sw.toString(), manager);
}
public void logInfo(Message message, Class classInfo, String additionalInfo,
HelixManager manager) {
logMessageStatusUpdateRecord(message, Level.HELIX_INFO, classInfo, additionalInfo, manager);
}
public void logWarning(Message message, Class classInfo, String additionalInfo,
HelixManager manager) {
logMessageStatusUpdateRecord(message, Level.HELIX_WARNING, classInfo, additionalInfo, manager);
}
@Deprecated
public void logError(Message message, Class classInfo, String additionalInfo,
HelixDataAccessor accessor) {
logMessageStatusUpdateRecord(message, Level.HELIX_ERROR, classInfo, additionalInfo, accessor);
}
@Deprecated
public void logError(Message message, Class classInfo, Exception e, String additionalInfo,
HelixDataAccessor accessor) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
e.printStackTrace(pw);
logMessageStatusUpdateRecord(message, Level.HELIX_ERROR, classInfo,
additionalInfo + sw.toString(), accessor);
}
@Deprecated
public void logInfo(Message message, Class classInfo, String additionalInfo,
HelixDataAccessor accessor) {
logMessageStatusUpdateRecord(message, Level.HELIX_INFO, classInfo, additionalInfo, accessor);
}
@Deprecated
public void logWarning(Message message, Class classInfo, String additionalInfo,
HelixDataAccessor accessor) {
logMessageStatusUpdateRecord(message, Level.HELIX_WARNING, classInfo, additionalInfo, accessor);
}
private String getStatusUpdateKey(Message message) {
if (message.getMsgType().equalsIgnoreCase(MessageType.STATE_TRANSITION.name())) {
return message.getPartitionName();
}
return message.getMsgId();
}
/**
* Generate the sub-path under STATUSUPDATE or ERROR path for a status update
*/
String getStatusUpdateSubPath(Message message) {
if (message.getMsgType().equalsIgnoreCase(MessageType.STATE_TRANSITION.name())) {
return message.getResourceName();
}
return message.getMsgType();
}
String getStatusUpdateRecordName(Message message) {
if (message.getMsgType().equalsIgnoreCase(MessageType.STATE_TRANSITION.name())) {
return message.getTgtSessionId() + "__" + message.getResourceName();
}
return message.getMsgId();
}
/**
* Write a status update record to zookeeper to the zookeeper store.
* @param record
* the status update record
* @param message
* the message to be logged
* @param level
* the error level of the message update
* @param accessor
* the zookeeper data accessor that writes the status update to zookeeper
* @param isController
* if the update is for a controller instance or not
*/
void publishStatusUpdateRecord(ZNRecord record, Message message, Level level,
HelixDataAccessor accessor, boolean isController) {
String instanceName = message.getTgtName();
String statusUpdateSubPath = getStatusUpdateSubPath(message);
String statusUpdateKey = getStatusUpdateKey(message);
String sessionId = message.getExecutionSessionId();
if (sessionId == null) {
sessionId = message.getTgtSessionId();
}
if (sessionId == null) {
sessionId = "*";
}
Builder keyBuilder = accessor.keyBuilder();
if (!_recordedMessages.containsKey(message.getMsgId())) {
ZNRecord statusUpdateRecord = createMessageLogRecord(message);
PropertyKey propertyKey;
if (isController) {
propertyKey = keyBuilder.controllerTaskStatus(statusUpdateSubPath, statusUpdateKey);
} else {
propertyKey =
keyBuilder.stateTransitionStatus(instanceName, sessionId, statusUpdateSubPath,
statusUpdateKey);
}
accessor.updateProperty(propertyKey, new StatusUpdate(statusUpdateRecord));
if (_logger.isTraceEnabled()) {
_logger.trace("StatusUpdate path:" + propertyKey.getPath() + ", updates:"
+ statusUpdateRecord);
}
_recordedMessages.put(message.getMsgId(), message.getMsgId());
}
PropertyKey propertyKey;
if (isController) {
propertyKey = keyBuilder.controllerTaskStatus(statusUpdateSubPath, statusUpdateKey);
} else {
propertyKey =
keyBuilder.stateTransitionStatus(instanceName, sessionId, statusUpdateSubPath,
statusUpdateKey);
}
accessor.updateProperty(propertyKey, new StatusUpdate(record));
if (_logger.isTraceEnabled()) {
_logger.trace("StatusUpdate path:" + propertyKey.getPath() + ", updates:" + record);
}
// If the error level is ERROR, also write the record to "ERROR" ZNode
if (Level.HELIX_ERROR == level) {
publishErrorRecord(record, instanceName, statusUpdateSubPath, statusUpdateKey, sessionId,
accessor, isController);
}
}
/**
* Write an error record to zookeeper to the zookeeper store.
* @param record
* the status update record
* @param instanceName
* the instance name
* @param updateSubPath
* the error update sub path
* @param updateKey
* the error update key
* @param sessionId
* the session id
* @param accessor
* the zookeeper data accessor that writes the status update to zookeeper
* @param isController
* if the error log is for a controller instance or not
*/
void publishErrorRecord(ZNRecord record, String instanceName, String updateSubPath,
String updateKey, String sessionId, HelixDataAccessor accessor, boolean isController) {
if (!ERROR_LOG_TO_ZK_ENABLED) {
return;
}
Builder keyBuilder = accessor.keyBuilder();
if (isController) {
// TODO need to fix: ERRORS_CONTROLLER doesn't have a form of
// ../{sessionId}/{subPath}
accessor.setProperty(keyBuilder.controllerTaskError(updateSubPath), new Error(record));
} else {
accessor.updateProperty(keyBuilder.stateTransitionError(instanceName, sessionId,
updateSubPath, updateKey), new Error(record));
}
}
}
| 9,998 |
0 | Create_ds/helix/helix-core/src/main/java/org/apache/helix | Create_ds/helix/helix-core/src/main/java/org/apache/helix/util/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Helix utility classes
*
*/
package org.apache.helix.util; | 9,999 |