code
stringlengths 23
201k
| docstring
stringlengths 17
96.2k
| func_name
stringlengths 0
235
| language
stringclasses 1
value | repo
stringlengths 8
72
| path
stringlengths 11
317
| url
stringlengths 57
377
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
public double getDouble(String name, double defaultValue) {
return mProperties.getDouble(name, defaultValue);
}
|
Exposed for testability
@param properties properties config
|
getDouble
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/SecorConfig.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/SecorConfig.java
|
Apache-2.0
|
public long getLong(String name) {
return mProperties.getLong(name);
}
|
Exposed for testability
@param properties properties config
|
getLong
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/SecorConfig.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/SecorConfig.java
|
Apache-2.0
|
public String[] getStringArray(String name) {
return mProperties.getStringArray(name);
}
|
Exposed for testability
@param properties properties config
|
getStringArray
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/SecorConfig.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/SecorConfig.java
|
Apache-2.0
|
public String getThriftProtocolClass() {
return mProperties.getString("secor.thrift.protocol.class");
}
|
Exposed for testability
@param properties properties config
|
getThriftProtocolClass
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/SecorConfig.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/SecorConfig.java
|
Apache-2.0
|
public String getMetricsCollectorClass() {
return getString("secor.monitoring.metrics.collector.class");
}
|
Exposed for testability
@param properties properties config
|
getMetricsCollectorClass
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/SecorConfig.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/SecorConfig.java
|
Apache-2.0
|
public boolean getMicroMeterCollectorJmxEnabled() {
return getBoolean("secor.monitoring.metrics.collector.micrometer.jmx.enabled", false);
}
|
Exposed for testability
@param properties properties config
|
getMicroMeterCollectorJmxEnabled
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/SecorConfig.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/SecorConfig.java
|
Apache-2.0
|
public boolean getMicroMeterCollectorStatsdEnabled() {
return getBoolean("secor.monitoring.metrics.collector.micrometer.statsd.enabled", false);
}
|
Exposed for testability
@param properties properties config
|
getMicroMeterCollectorStatsdEnabled
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/SecorConfig.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/SecorConfig.java
|
Apache-2.0
|
public boolean getMicroMeterCollectorPrometheusEnabled() {
return getBoolean("secor.monitoring.metrics.collector.micrometer.prometheus.enabled", false);
}
|
Exposed for testability
@param properties properties config
|
getMicroMeterCollectorPrometheusEnabled
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/SecorConfig.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/SecorConfig.java
|
Apache-2.0
|
public int getMicroMeterCacheSize() {
return getInt("secor.monitoring.metrics.collector.micrometer.cache.size", 500);
}
|
Exposed for testability
@param properties properties config
|
getMicroMeterCacheSize
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/SecorConfig.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/SecorConfig.java
|
Apache-2.0
|
public Map<String, String> getPropertyMapForPrefix(String prefix) {
Iterator<String> keys = mProperties.getKeys(prefix);
Map<String, String> map = new HashMap<String, String>();
while (keys.hasNext()) {
String key = keys.next();
String value = mProperties.getString(key);
map.put(key.substring(prefix.length() + 1), value);
}
return map;
}
|
This method is used for fetching all the properties which start with the given prefix.
It returns a Map of all those key-val.
e.g.
a.b.c=val1
a.b.d=val2
a.b.e=val3
If prefix is a.b then,
These will be fetched as a map {c = val1, d = val2, e = val3}
@param prefix property prefix
@return
|
getPropertyMapForPrefix
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/SecorConfig.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/SecorConfig.java
|
Apache-2.0
|
public Map<String, String> getORCMessageSchema() {
return getPropertyMapForPrefix("secor.orc.message.schema");
}
|
This method is used for fetching all the properties which start with the given prefix.
It returns a Map of all those key-val.
e.g.
a.b.c=val1
a.b.d=val2
a.b.e=val3
If prefix is a.b then,
These will be fetched as a map {c = val1, d = val2, e = val3}
@param prefix property prefix
@return
|
getORCMessageSchema
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/SecorConfig.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/SecorConfig.java
|
Apache-2.0
|
public Map<String, String> getAvroMessageSchema() {
return getPropertyMapForPrefix("secor.avro.message.schema");
}
|
This method is used for fetching all the properties which start with the given prefix.
It returns a Map of all those key-val.
e.g.
a.b.c=val1
a.b.d=val2
a.b.e=val3
If prefix is a.b then,
These will be fetched as a map {c = val1, d = val2, e = val3}
@param prefix property prefix
@return
|
getAvroMessageSchema
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/SecorConfig.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/SecorConfig.java
|
Apache-2.0
|
public String getORCSchemaProviderClass(){
return getString("secor.orc.schema.provider");
}
|
This method is used for fetching all the properties which start with the given prefix.
It returns a Map of all those key-val.
e.g.
a.b.c=val1
a.b.d=val2
a.b.e=val3
If prefix is a.b then,
These will be fetched as a map {c = val1, d = val2, e = val3}
@param prefix property prefix
@return
|
getORCSchemaProviderClass
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/SecorConfig.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/SecorConfig.java
|
Apache-2.0
|
public Schema getSchema(String topic) {
Schema schema = schemas.get(topic);
if (schema == null) {
try {
SchemaMetadata schemaMetadata = schemaRegistryClient.getLatestSchemaMetadata(topic + "-value");
schema = schemaRegistryClient.getByID(schemaMetadata.getId());
schemas.put(topic, schema);
} catch (IOException e) {
throw new IllegalStateException("Unable to get Avro schema not found for topic " + topic);
} catch (RestClientException e) {
throw new IllegalStateException("Avro schema not found for topic " + topic);
}
}
return schema;
}
|
Get Avro schema of a topic. It uses the cache that either is set by calling {@link #deserialize(String, byte[])}
or querying this method to avoid hitting Schema Registry for each call.
It uses standard "subject name" strategy and it is topic_name-value.
@param topic a Kafka topic to query the schema for
@return Schema object for the topic
@throws IllegalStateException if there is no schema registered for this topic or it is not able to fetch it
|
getSchema
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/SecorSchemaRegistryClient.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/SecorSchemaRegistryClient.java
|
Apache-2.0
|
@Override
public byte[] serialize(String topic, GenericRecord record) throws IOException {
return serializer.serialize(topic, record);
}
|
Get Avro schema of a topic. It uses the cache that either is set by calling {@link #deserialize(String, byte[])}
or querying this method to avoid hitting Schema Registry for each call.
It uses standard "subject name" strategy and it is topic_name-value.
@param topic a Kafka topic to query the schema for
@return Schema object for the topic
@throws IllegalStateException if there is no schema registered for this topic or it is not able to fetch it
|
serialize
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/SecorSchemaRegistryClient.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/SecorSchemaRegistryClient.java
|
Apache-2.0
|
public static void registerHook(int priority, Runnable hook) {
HOOKS.computeIfAbsent(priority, key -> new ArrayList<>()).add(hook);
LOG.info("Shut down hook with priority {} added to shut down hook registry", priority);
}
|
Registry for shutdown hooks.
Allows running shutdown hooks by specific order by executing multiple Runnables on single shutdown hook.
@author Paulius Dambrauskas (p.dambrauskas@gmail.com)
|
registerHook
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ShutdownHookRegistry.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ShutdownHookRegistry.java
|
Apache-2.0
|
public static void runHooks() {
HOOKS.entrySet().stream().sorted(Map.Entry.comparingByKey()).forEach(entry -> {
LOG.info("Running hooks for priority {}", entry.getKey());
entry.getValue().parallelStream().forEach(Runnable::run);
});
}
|
Registry for shutdown hooks.
Allows running shutdown hooks by specific order by executing multiple Runnables on single shutdown hook.
@author Paulius Dambrauskas (p.dambrauskas@gmail.com)
|
runHooks
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ShutdownHookRegistry.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ShutdownHookRegistry.java
|
Apache-2.0
|
public String getTopic() {
return mTopic;
}
|
Topic partition describes a kafka message topic-partition pair.
@author Pawel Garbacki (pawel@pinterest.com)
|
getTopic
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/TopicPartition.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/TopicPartition.java
|
Apache-2.0
|
public int getPartition() {
return mPartition;
}
|
Topic partition describes a kafka message topic-partition pair.
@author Pawel Garbacki (pawel@pinterest.com)
|
getPartition
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/TopicPartition.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/TopicPartition.java
|
Apache-2.0
|
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TopicPartition that = (TopicPartition) o;
if (mPartition != that.mPartition) return false;
if (mTopic != null ? !mTopic.equals(that.mTopic) : that.mTopic != null) return false;
return true;
}
|
Topic partition describes a kafka message topic-partition pair.
@author Pawel Garbacki (pawel@pinterest.com)
|
equals
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/TopicPartition.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/TopicPartition.java
|
Apache-2.0
|
@Override
public int hashCode() {
int result = mTopic != null ? mTopic.hashCode() : 0;
result = 31 * result + mPartition;
return result;
}
|
Topic partition describes a kafka message topic-partition pair.
@author Pawel Garbacki (pawel@pinterest.com)
|
hashCode
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/TopicPartition.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/TopicPartition.java
|
Apache-2.0
|
@Override
public String toString() {
return "TopicPartition{" +
"mTopic='" + mTopic + '\'' +
", mPartition=" + mPartition +
'}';
}
|
Topic partition describes a kafka message topic-partition pair.
@author Pawel Garbacki (pawel@pinterest.com)
|
toString
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/TopicPartition.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/TopicPartition.java
|
Apache-2.0
|
public String getTopic() {
return mTopic;
}
|
Topic partition group describes a kafka message topic-partitions pair.
@author Henry Cai (hcai@pinterest.com)
|
getTopic
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/TopicPartitionGroup.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/TopicPartitionGroup.java
|
Apache-2.0
|
public int[] getPartitions() {
return mPartitions;
}
|
Topic partition group describes a kafka message topic-partitions pair.
@author Henry Cai (hcai@pinterest.com)
|
getPartitions
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/TopicPartitionGroup.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/TopicPartitionGroup.java
|
Apache-2.0
|
public List<TopicPartition> getTopicPartitions() {
List<TopicPartition> tps = new ArrayList<TopicPartition>();
for (int p : mPartitions) {
tps.add(new TopicPartition(mTopic, p));
}
return tps;
}
|
Topic partition group describes a kafka message topic-partitions pair.
@author Henry Cai (hcai@pinterest.com)
|
getTopicPartitions
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/TopicPartitionGroup.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/TopicPartitionGroup.java
|
Apache-2.0
|
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TopicPartitionGroup that = (TopicPartitionGroup) o;
if (!Arrays.equals(mPartitions, that.mPartitions)) return false;
if (mTopic != null ? !mTopic.equals(that.mTopic) : that.mTopic != null) return false;
return true;
}
|
Topic partition group describes a kafka message topic-partitions pair.
@author Henry Cai (hcai@pinterest.com)
|
equals
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/TopicPartitionGroup.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/TopicPartitionGroup.java
|
Apache-2.0
|
@Override
public int hashCode() {
int result = mTopic != null ? mTopic.hashCode() : 0;
result = 31 * result + Arrays.hashCode(mPartitions);
return result;
}
|
Topic partition group describes a kafka message topic-partitions pair.
@author Henry Cai (hcai@pinterest.com)
|
hashCode
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/TopicPartitionGroup.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/TopicPartitionGroup.java
|
Apache-2.0
|
@Override
public String toString() {
return "TopicPartitionGroup{" +
"mTopic='" + mTopic + '\'' +
", mPartitions=" + Arrays.toString(mPartitions) +
'}';
}
|
Topic partition group describes a kafka message topic-partitions pair.
@author Henry Cai (hcai@pinterest.com)
|
toString
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/TopicPartitionGroup.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/TopicPartitionGroup.java
|
Apache-2.0
|
@Override
public void close() throws IOException {
if (mCurator != null) {
mCurator.close();
}
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
close
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
private Iterable<InetSocketAddress> getZookeeperAddresses() {
String zookeeperQuorum = mConfig.getZookeeperQuorum();
String[] hostports = zookeeperQuorum.split(",");
LinkedList<InetSocketAddress> result = new LinkedList<InetSocketAddress>();
for (String hostport : hostports) {
String[] elements = hostport.split(":");
assert elements.length == 2: Integer.toString(elements.length) + " == 2";
String host = elements[0];
int port = Integer.parseInt(elements[1]);
result.add(InetSocketAddress.createUnresolved(host, port));
}
return result;
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
getZookeeperAddresses
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
public void lock(String lockPath) {
assert mLocks.get(lockPath) == null: "mLocks.get(" + lockPath + ") == null";
InterProcessMutex distributedLock = new InterProcessMutex(mCurator, lockPath);
mLocks.put(lockPath, distributedLock);
try {
distributedLock.acquire();
} catch (Exception ex) {
throw new RuntimeException("Unexpected ZK error", ex);
}
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
lock
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
public void unlock(String lockPath) {
InterProcessMutex distributedLock = mLocks.get(lockPath);
assert distributedLock != null: "mLocks.get(" + lockPath + ") != null";
try {
distributedLock.release();
} catch (Exception ex) {
throw new RuntimeException("Unexpected ZK error", ex);
}
mLocks.remove(lockPath);
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
unlock
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
protected String getOffsetGroupPath(String subPath) {
String stripped = StringUtils.strip(mConfig.getKafkaZookeeperPath(), "/");
String path = Joiner.on("/").skipNulls().join(
"",
stripped.equals("") ? null : stripped,
"consumers",
mConfig.getKafkaGroup(),
subPath
);
return path;
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
getOffsetGroupPath
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
protected String getCommittedOffsetGroupPath() {
if (Strings.isNullOrEmpty(mCommittedOffsetGroupPath)) {
String stripped = StringUtils.strip(mConfig.getKafkaZookeeperPath(), "/");
mCommittedOffsetGroupPath = getOffsetGroupPath("offsets");
}
return mCommittedOffsetGroupPath;
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
getCommittedOffsetGroupPath
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
protected String getLastSeenOffsetGroupPath() {
if (Strings.isNullOrEmpty(mLastSeenOffsetGroupPath)) {
String stripped = StringUtils.strip(mConfig.getKafkaZookeeperPath(), "/");
mLastSeenOffsetGroupPath = getOffsetGroupPath("lastSeen");
}
return mLastSeenOffsetGroupPath;
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
getLastSeenOffsetGroupPath
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
private String getCommittedOffsetTopicPath(String topic) {
return getCommittedOffsetGroupPath() + "/" + topic;
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
getCommittedOffsetTopicPath
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
private String getLastSeenOffsetTopicPath(String topic) {
return getLastSeenOffsetGroupPath() + "/" + topic;
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
getLastSeenOffsetTopicPath
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
private String getCommittedOffsetPartitionPath(TopicPartition topicPartition) {
return getCommittedOffsetTopicPath(topicPartition.getTopic()) + "/" +
topicPartition.getPartition();
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
getCommittedOffsetPartitionPath
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
private String getLastSeenOffsetPartitionPath(TopicPartition topicPartition) {
return getLastSeenOffsetTopicPath(topicPartition.getTopic()) + "/" +
topicPartition.getPartition();
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
getLastSeenOffsetPartitionPath
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
public long getCommittedOffsetCount(TopicPartition topicPartition) throws Exception {
String offsetPath = getCommittedOffsetPartitionPath(topicPartition);
try {
byte[] data = mCurator.getData().forPath(offsetPath);
return Long.parseLong(new String(data));
} catch (KeeperException.NoNodeException exception) {
LOG.warn("path {} does not exist in zookeeper", offsetPath);
return -1;
}
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
getCommittedOffsetCount
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
public long getLastSeenOffsetCount(TopicPartition topicPartition) throws Exception {
String offsetPath = getLastSeenOffsetPartitionPath(topicPartition);
try {
byte[] data = mCurator.getData().forPath(offsetPath);
return Long.parseLong(new String(data));
} catch (KeeperException.NoNodeException exception) {
LOG.warn("path {} does not exist in zookeeper", offsetPath);
return -1;
}
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
getLastSeenOffsetCount
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
public List<Integer> getCommittedOffsetPartitions(String topic) throws Exception {
String topicPath = getCommittedOffsetTopicPath(topic);
List<String> partitions = mCurator.getChildren().forPath(topicPath);
LinkedList<Integer> result = new LinkedList<Integer>();
for (String partitionPath : partitions) {
String[] elements = partitionPath.split("/");
String partition = elements[elements.length - 1];
result.add(Integer.valueOf(partition));
}
return result;
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
getCommittedOffsetPartitions
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
public List<Integer> getLastSeenOffsetPartitions(String topic) throws Exception {
String topicPath = getLastSeenOffsetTopicPath(topic);
List<String> partitions = mCurator.getChildren().forPath(topicPath);
LinkedList<Integer> result = new LinkedList<Integer>();
for (String partitionPath : partitions) {
String[] elements = partitionPath.split("/");
String partition = elements[elements.length - 1];
result.add(Integer.valueOf(partition));
}
return result;
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
getLastSeenOffsetPartitions
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
public List<String> getCommittedOffsetTopics() throws Exception {
String offsetPath = getCommittedOffsetGroupPath();
List<String> topics = mCurator.getChildren().forPath(offsetPath);
LinkedList<String> result = new LinkedList<String>();
for (String topicPath : topics) {
String[] elements = topicPath.split("/");
String topic = elements[elements.length - 1];
result.add(topic);
}
return result;
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
getCommittedOffsetTopics
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
public List<String> getLastSeenOffsetTopics() throws Exception {
String offsetPath = getLastSeenOffsetGroupPath();
List<String> topics = mCurator.getChildren().forPath(offsetPath);
LinkedList<String> result = new LinkedList<String>();
for (String topicPath : topics) {
String[] elements = topicPath.split("/");
String topic = elements[elements.length - 1];
result.add(topic);
}
return result;
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
getLastSeenOffsetTopics
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
private void createMissingParents(String path) throws Exception {
Stat stat = mCurator.checkExists().forPath(path);
if (stat == null) {
mCurator.create()
.creatingParentsIfNeeded()
.withMode(CreateMode.PERSISTENT)
.withACL(ZooDefs.Ids.OPEN_ACL_UNSAFE)
.forPath(path);
}
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
createMissingParents
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
public void setCommittedOffsetCount(TopicPartition topicPartition, long count)
throws Exception {
String offsetPath = getCommittedOffsetPartitionPath(topicPartition);
LOG.info("creating missing parents for zookeeper path {}", offsetPath);
createMissingParents(offsetPath);
byte[] data = Long.toString(count).getBytes();
try {
LOG.info("setting zookeeper path {} value {}", offsetPath, count);
// -1 matches any version
mCurator.setData().forPath(offsetPath, data);
} catch (KeeperException.NoNodeException exception) {
LOG.warn("Failed to set value to path " + offsetPath, exception);
}
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
setCommittedOffsetCount
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
public void setLastSeenOffsetCount(TopicPartition topicPartition, long count)
throws Exception {
String offsetPath = getLastSeenOffsetPartitionPath(topicPartition);
LOG.info("creating missing parents for zookeeper path {}", offsetPath);
createMissingParents(offsetPath);
byte[] data = Long.toString(count).getBytes();
try {
LOG.info("setting zookeeper path {} value {}", offsetPath, count);
// -1 matches any version
mCurator.setData().forPath(offsetPath, data);
} catch (KeeperException.NoNodeException exception) {
LOG.warn("Failed to set value to path " + offsetPath, exception);
}
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
setLastSeenOffsetCount
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
public void deleteCommittedOffsetTopicCount(String topic) throws Exception {
List<Integer> partitions = getCommittedOffsetPartitions(topic);
for (Integer partition : partitions) {
TopicPartition topicPartition = new TopicPartition(topic, partition);
String offsetPath = getCommittedOffsetPartitionPath(topicPartition);
LOG.info("deleting path {}", offsetPath);
mCurator.delete().forPath(offsetPath);
}
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
deleteCommittedOffsetTopicCount
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
public void deleteLastSeenOffsetTopicCount(String topic) throws Exception {
List<Integer> partitions = getLastSeenOffsetPartitions(topic);
for (Integer partition : partitions) {
TopicPartition topicPartition = new TopicPartition(topic, partition);
String offsetPath = getLastSeenOffsetPartitionPath(topicPartition);
LOG.info("deleting path {}", offsetPath);
mCurator.delete().forPath(offsetPath);
}
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
deleteLastSeenOffsetTopicCount
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
public void deleteCommittedOffsetPartitionCount(TopicPartition topicPartition)
throws Exception {
String offsetPath = getCommittedOffsetPartitionPath(topicPartition);
LOG.info("deleting path {}", offsetPath);
mCurator.delete().forPath(offsetPath);
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
deleteCommittedOffsetPartitionCount
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
public void deleteLastSeenOffsetPartitionCount(TopicPartition topicPartition)
throws Exception {
String offsetPath = getLastSeenOffsetPartitionPath(topicPartition);
LOG.info("deleting path {}", offsetPath);
mCurator.delete().forPath(offsetPath);
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
deleteLastSeenOffsetPartitionCount
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
protected void setConfig(SecorConfig config) {
this.mConfig = config;
}
|
ZookeeperConnector implements interactions with Zookeeper.
@author Pawel Garbacki (pawel@pinterest.com)
|
setConfig
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/ZookeeperConnector.java
|
Apache-2.0
|
public void start() {
Duration[] defaultLatchIntervals = {Duration.apply(1, TimeUnit.MINUTES)};
Map<String, CustomHttpHandler> handlers = mPrometheusEnabled ?
new Map.Map1<>("/prometheus", new PrometheusHandler()) : Map$.MODULE$.empty();
@SuppressWarnings("deprecation")
AdminServiceFactory adminServiceFactory = new AdminServiceFactory(
this.mPort,
20,
List$.MODULE$.<StatsFactory>empty(),
Option.<String>empty(),
List$.MODULE$.<Regex>empty(),
handlers,
JavaConversions
.asScalaBuffer(Arrays.asList(defaultLatchIntervals)).toList()
);
RuntimeEnvironment runtimeEnvironment = new RuntimeEnvironment(this);
adminServiceFactory.apply(runtimeEnvironment);
try {
Properties properties = new Properties();
properties.load(this.getClass().getResource("build.properties").openStream());
String buildRevision = properties.getProperty("build_revision", "unknown");
LOG.info("build.properties build_revision: {}",
properties.getProperty("build_revision", "unknown"));
StatsUtil.setLabel("secor.build_revision", buildRevision);
} catch (Throwable t) {
LOG.error("Failed to load properties from build.properties", t);
}
}
|
OstrichAdminService initializes export of metrics to Ostrich.
@author Pawel Garbacki (pawel@pinterest.com)
|
start
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/monitoring/OstrichAdminService.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/monitoring/OstrichAdminService.java
|
Apache-2.0
|
@Override
public void handle(HttpExchange exchange) {
Optional<PrometheusMeterRegistry> registry = Metrics.globalRegistry.getRegistries().stream()
.filter(meterRegistry -> meterRegistry instanceof PrometheusMeterRegistry)
.map(meterRegistry -> (PrometheusMeterRegistry) meterRegistry)
.findFirst();
if (registry.isPresent()) {
this.render(registry.get().scrape(), exchange, HttpStatus.SC_OK);
} else {
LOG.warn("Trying to scrape prometheus, while it is disabled, " +
"set \"secor.monitoring.metrics.collector.micrometer.prometheus.enabled\" to \"true\"");
this.render("Not Found", exchange, HttpStatus.SC_NOT_FOUND);
}
}
|
Initializes Http Endpoint for Prometheus
@author Paulius Dambrauskas (p.dambrauskas@gmail.com)
|
handle
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/common/monitoring/PrometheusHandler.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/common/monitoring/PrometheusHandler.java
|
Apache-2.0
|
private void init() throws Exception {
mOffsetTracker = new OffsetTracker();
if (mConfig.getDeterministicUpload()) {
mDeterministicUploadPolicyTracker = new DeterministicUploadPolicyTracker(
mConfig.getMaxFileTimestampRangeMillis(), mConfig.getMaxInputPayloadSizeBytes()
);
} else {
mDeterministicUploadPolicyTracker = null;
}
mKafkaMessageIterator = KafkaMessageIteratorFactory.getIterator(mConfig.getKafkaMessageIteratorClass(), mConfig);
mMessageReader = new MessageReader(mConfig, mOffsetTracker, mKafkaMessageIterator);
FileRegistry fileRegistry = new FileRegistry(mConfig);
UploadManager uploadManager = ReflectionUtil.createUploadManager(mConfig.getUploadManagerClass(), mConfig);
mUploader = ReflectionUtil.createUploader(mConfig.getUploaderClass());
mUploader.init(mConfig, mOffsetTracker, fileRegistry, uploadManager, mMessageReader, mMetricCollector,
mDeterministicUploadPolicyTracker);
if (mKafkaMessageIterator instanceof RebalanceSubscriber) {
((RebalanceSubscriber) mKafkaMessageIterator).subscribe(new RebalanceHandler(mUploader, fileRegistry, mOffsetTracker), mConfig);
isLegacyConsumer = false;
}
mMessageWriter = new MessageWriter(mConfig, mOffsetTracker, fileRegistry, mDeterministicUploadPolicyTracker);
mMessageParser = ReflectionUtil.createMessageParser(mConfig.getMessageParserClass(), mConfig);
mMessageTransformer = ReflectionUtil.createMessageTransformer(mConfig.getMessageTransformerClass(), mConfig);
mBadMessages = 0.;
mUploadOnShutdown = mConfig.getUploadOnShutdown();
if (mUploadOnShutdown) {
if (mDeterministicUploadPolicyTracker != null) {
throw new RuntimeException("Can't set secor.upload.on.shutdown with secor.upload.deterministic!");
}
ShutdownHookRegistry.registerHook(1, new FinalUploadShutdownHook());
}
}
|
Consumer is a top-level component coordinating reading, writing, and uploading Kafka log
messages. It is implemented as a thread with the intent of running multiple consumer
concurrently.
Note that consumer is not fixed with a specific topic partition. Kafka rebalancing mechanism
allocates topic partitions to consumers dynamically to accommodate consumer population changes.
@author Pawel Garbacki (pawel@pinterest.com)
|
init
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/consumer/Consumer.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/consumer/Consumer.java
|
Apache-2.0
|
@Override
public void run() {
if (mCallingSystemExit) {
// We're shutting down because a consumer thread crashed. We don't want to do a final
// upload: we just want to exit. If this particular thread was the one that crashed,
// we would deadlock if we didn't return here (the Consumer thread is blocked
// in System.exit on shutdown handlers to run, and this thread would be blocked on the
// Consumer thread to exit). Even if it were a different thread that crashed, we
// still want to exit the process as soon as possible: until we restart the process,
// the partition being read by the other thread won't be consumed by anyone.
return;
}
mShuttingDown = true;
try {
Consumer.this.join();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
|
Consumer is a top-level component coordinating reading, writing, and uploading Kafka log
messages. It is implemented as a thread with the intent of running multiple consumer
concurrently.
Note that consumer is not fixed with a specific topic partition. Kafka rebalancing mechanism
allocates topic partitions to consumers dynamically to accommodate consumer population changes.
@author Pawel Garbacki (pawel@pinterest.com)
|
run
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/consumer/Consumer.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/consumer/Consumer.java
|
Apache-2.0
|
@Override
public void run() {
try {
try {
// init() cannot be called in the constructor since it contains logic dependent on the
// thread id.
init();
} catch (Exception e) {
throw new RuntimeException("Failed to initialize the consumer", e);
}
// check upload policy every N seconds or 10,000 messages/consumer timeouts
long checkEveryNSeconds = Math.min(10 * 60, mConfig.getMaxFileAgeSeconds() / 2);
long checkMessagesPerSecond = mConfig.getMessagesPerSecond();
long nMessages = 0;
long lastChecked = System.currentTimeMillis();
while (true) {
boolean hasMoreMessages = consumeNextMessage();
if (!hasMoreMessages) {
break;
}
if (mUploadOnShutdown && mShuttingDown) {
LOG.info("Shutting down");
break;
}
long now = System.currentTimeMillis();
if (mDeterministicUploadPolicyTracker != null ||
nMessages++ % checkMessagesPerSecond == 0 ||
(now - lastChecked) > checkEveryNSeconds * 1000) {
lastChecked = now;
checkUploadPolicy(false);
}
}
if (mDeterministicUploadPolicyTracker == null) {
LOG.info("Done reading messages; uploading what we have");
checkUploadPolicy(true);
}
LOG.info("Consumer thread done");
} catch (Throwable t) {
LOG.error("Thread failed", t);
mCallingSystemExit = true;
System.exit(1);
}
}
|
Consumer is a top-level component coordinating reading, writing, and uploading Kafka log
messages. It is implemented as a thread with the intent of running multiple consumer
concurrently.
Note that consumer is not fixed with a specific topic partition. Kafka rebalancing mechanism
allocates topic partitions to consumers dynamically to accommodate consumer population changes.
@author Pawel Garbacki (pawel@pinterest.com)
|
run
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/consumer/Consumer.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/consumer/Consumer.java
|
Apache-2.0
|
protected void checkUploadPolicy(boolean forceUpload) {
try {
mUploader.applyPolicy(forceUpload);
} catch (Exception e) {
throw new RuntimeException("Failed to apply upload policy", e);
}
}
|
Consumer is a top-level component coordinating reading, writing, and uploading Kafka log
messages. It is implemented as a thread with the intent of running multiple consumer
concurrently.
Note that consumer is not fixed with a specific topic partition. Kafka rebalancing mechanism
allocates topic partitions to consumers dynamically to accommodate consumer population changes.
@author Pawel Garbacki (pawel@pinterest.com)
|
checkUploadPolicy
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/consumer/Consumer.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/consumer/Consumer.java
|
Apache-2.0
|
protected boolean consumeNextMessage() {
Message rawMessage = null;
try {
boolean hasNext = mMessageReader.hasNext();
if (!hasNext) {
return false;
}
rawMessage = mMessageReader.read();
} catch (LegacyConsumerTimeoutException e) {
// We wait for a new message with a timeout to periodically apply the upload policy
// even if no messages are delivered.
LOG.trace("Consumer timed out", e);
}
if (rawMessage != null) {
// Before parsing, update the offset and remove any redundant data
adjustOffsets(rawMessage);
ParsedMessage parsedMessage = null;
try {
Message transformedMessage = mMessageTransformer.transform(rawMessage);
if (transformedMessage == null) {
return true;
}
parsedMessage = mMessageParser.parse(transformedMessage);
if (parsedMessage != null) {
writeMessage(rawMessage, parsedMessage);
}
} catch (Exception e) {
handleWriteError(rawMessage, parsedMessage, e);
}
}
return true;
}
|
Consumer is a top-level component coordinating reading, writing, and uploading Kafka log
messages. It is implemented as a thread with the intent of running multiple consumer
concurrently.
Note that consumer is not fixed with a specific topic partition. Kafka rebalancing mechanism
allocates topic partitions to consumers dynamically to accommodate consumer population changes.
@author Pawel Garbacki (pawel@pinterest.com)
|
consumeNextMessage
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/consumer/Consumer.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/consumer/Consumer.java
|
Apache-2.0
|
public OffsetTracker getOffsetTracker() {
return this.mOffsetTracker;
}
|
Helper to get the offset tracker (used in tests)
@return the offset tracker
|
getOffsetTracker
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/consumer/Consumer.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/consumer/Consumer.java
|
Apache-2.0
|
private void adjustOffsets(Message rawMessage) {
try {
mMessageWriter.adjustOffset(rawMessage, isLegacyConsumer);
} catch (IOException e) {
throw new RuntimeException("Failed to adjust offset.", e);
}
}
|
Helper to get the offset tracker (used in tests)
@return the offset tracker
|
adjustOffsets
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/consumer/Consumer.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/consumer/Consumer.java
|
Apache-2.0
|
private void writeMessage(Message rawMessage, ParsedMessage parsedMessage) throws Exception {
mMessageWriter.write(parsedMessage);
mBadMessages *= DECAY;
mMetricCollector.metric("consumer.message_size_bytes", rawMessage.getPayload().length,
rawMessage.getTopic());
mMetricCollector.increment("consumer.throughput_bytes", rawMessage.getPayload().length,
rawMessage.getTopic());
if (mDeterministicUploadPolicyTracker != null) {
mDeterministicUploadPolicyTracker.track(rawMessage);
}
}
|
Helper to get the offset tracker (used in tests)
@return the offset tracker
|
writeMessage
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/consumer/Consumer.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/consumer/Consumer.java
|
Apache-2.0
|
private void handleWriteError(Message rawMessage, ParsedMessage parsedMessage, Exception exception) {
mMetricCollector.increment("consumer.message_errors.count", rawMessage.getTopic());
mBadMessages++;
if (mMaxBadMessages != -1 && mBadMessages > mMaxBadMessages) {
throw new RuntimeException("Failed to write message " + rawMessage, exception);
}
if (LOG.isTraceEnabled()) {
LOG.trace("Failed to write message raw: {}; parsed: {}", rawMessage, parsedMessage, exception);
}
}
|
Helper to get the offset tracker (used in tests)
@return the offset tracker
|
handleWriteError
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/consumer/Consumer.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/consumer/Consumer.java
|
Apache-2.0
|
public long getOffset() {
return this.mOffset;
}
|
Generic Object used to read next message from various file reader
implementations
@author Praveen Murugesan (praveen@uber.com)
|
getOffset
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/KeyValue.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/KeyValue.java
|
Apache-2.0
|
public byte[] getKafkaKey() {
return this.mKafkaKey;
}
|
Generic Object used to read next message from various file reader
implementations
@author Praveen Murugesan (praveen@uber.com)
|
getKafkaKey
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/KeyValue.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/KeyValue.java
|
Apache-2.0
|
public byte[] getValue() {
return this.mValue;
}
|
Generic Object used to read next message from various file reader
implementations
@author Praveen Murugesan (praveen@uber.com)
|
getValue
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/KeyValue.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/KeyValue.java
|
Apache-2.0
|
public long getTimestamp() {
return this.mTimestamp;
}
|
Generic Object used to read next message from various file reader
implementations
@author Praveen Murugesan (praveen@uber.com)
|
getTimestamp
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/KeyValue.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/KeyValue.java
|
Apache-2.0
|
public boolean hasKafkaKey() {
return this.mKafkaKey != null && this.mKafkaKey.length != 0;
}
|
Generic Object used to read next message from various file reader
implementations
@author Praveen Murugesan (praveen@uber.com)
|
hasKafkaKey
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/KeyValue.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/KeyValue.java
|
Apache-2.0
|
public boolean hasTimestamp(){
return this.mTimestamp != -1;
}
|
Generic Object used to read next message from various file reader
implementations
@author Praveen Murugesan (praveen@uber.com)
|
hasTimestamp
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/KeyValue.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/KeyValue.java
|
Apache-2.0
|
public List<MessageHeader> getHeaders() {
return mHeaders;
}
|
Generic Object used to read next message from various file reader
implementations
@author Praveen Murugesan (praveen@uber.com)
|
getHeaders
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/KeyValue.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/KeyValue.java
|
Apache-2.0
|
@Override
public void run() {
try {
FileUtils.deleteDirectory(this.mStagingDir);
} catch (IOException e) {
LOG.error("Failed deleting file", e);
}
}
|
Runnable used to delete staging folder content.
Deletes folders content, while keeping folder itself.
@author Paulius Dambrauskas (p.dambrauskas@gmail.com)
|
run
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/StagingDirectoryCleaner.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/StagingDirectoryCleaner.java
|
Apache-2.0
|
@Override
public FileReader BuildFileReader(LogFilePath logFilePath,
CompressionCodec codec) throws Exception {
return new JsonORCFileReader(logFilePath, codec);
}
|
ORC reader/writer implementation
@author Ashish (ashu.impetus@gmail.com)
|
BuildFileReader
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/JsonORCFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/JsonORCFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public FileWriter BuildFileWriter(LogFilePath logFilePath,
CompressionCodec codec) throws Exception {
return new JsonORCFileWriter(logFilePath, codec);
}
|
ORC reader/writer implementation
@author Ashish (ashu.impetus@gmail.com)
|
BuildFileWriter
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/JsonORCFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/JsonORCFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public KeyValue next() throws IOException {
boolean endOfBatch = false;
StringWriter sw = new StringWriter();
if (rowIndex > batch.size - 1) {
endOfBatch = !rows.nextBatch(batch);
rowIndex = 0;
}
if (endOfBatch) {
rows.close();
return null;
}
try {
JsonFieldFiller.processRow(new JSONWriter(sw), batch, schema,
rowIndex);
} catch (JSONException e) {
LOG.error("Unable to parse json {}", sw.toString());
return null;
}
rowIndex++;
return new KeyValue(offset++, sw.toString().getBytes(StandardCharsets.UTF_8));
}
|
ORC reader/writer implementation
@author Ashish (ashu.impetus@gmail.com)
|
next
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/JsonORCFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/JsonORCFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public void close() throws IOException {
rows.close();
}
|
ORC reader/writer implementation
@author Ashish (ashu.impetus@gmail.com)
|
close
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/JsonORCFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/JsonORCFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public long getLength() throws IOException {
return writer.getRawDataSize();
}
|
ORC reader/writer implementation
@author Ashish (ashu.impetus@gmail.com)
|
getLength
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/JsonORCFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/JsonORCFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public void write(KeyValue keyValue) throws IOException {
JsonElement jsonElement = gson.fromJson(new String(keyValue.getValue()), JsonElement.class);
if (jsonElement instanceof JsonObject) {
writeOne((JsonObject) jsonElement);
} else if (jsonElement instanceof JsonArray) {
// save each element in the array as a separate record
for (JsonElement arrayElement : (JsonArray) jsonElement) {
if (arrayElement instanceof JsonObject) {
writeOne((JsonObject) arrayElement);
} else {
throw new IOException("Cannot write " + keyValue + ": unsupported type " + jsonElement);
}
}
} else {
throw new IOException("Cannot write " + keyValue + ": unsupported type " + jsonElement);
}
}
|
ORC reader/writer implementation
@author Ashish (ashu.impetus@gmail.com)
|
write
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/JsonORCFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/JsonORCFileReaderWriterFactory.java
|
Apache-2.0
|
private void writeOne(JsonObject object) throws IOException {
rowIndex = batch.size++;
VectorColumnFiller.fillRow(rowIndex, converters, schema, batch, object);
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
}
|
ORC reader/writer implementation
@author Ashish (ashu.impetus@gmail.com)
|
writeOne
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/JsonORCFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/JsonORCFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public void close() throws IOException {
writer.addRowBatch(batch);
writer.close();
}
|
ORC reader/writer implementation
@author Ashish (ashu.impetus@gmail.com)
|
close
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/JsonORCFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/JsonORCFileReaderWriterFactory.java
|
Apache-2.0
|
private CompressionKind resolveCompression(CompressionCodec codec) {
if (codec instanceof Lz4Codec)
return CompressionKind.LZ4;
else if (codec instanceof SnappyCodec)
return CompressionKind.SNAPPY;
// although GZip and ZLIB are not same thing
// there is no better named codec for this case,
// use hadoop Gzip codec to enable ORC ZLIB compression
else if (codec instanceof GzipCodec)
return CompressionKind.ZLIB;
else
return CompressionKind.NONE;
}
|
Used for returning the compression kind used in ORC
@param codec
@return
|
resolveCompression
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/JsonORCFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/JsonORCFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public FileReader BuildFileReader(LogFilePath logFilePath, CompressionCodec codec) throws Exception {
return new MessagePackSequenceFileReader(logFilePath);
}
|
Sequence file reader writer implementation
@author Praveen Murugesan (praveen@uber.com)
|
BuildFileReader
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/MessagePackSequenceFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/MessagePackSequenceFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public FileWriter BuildFileWriter(LogFilePath logFilePath, CompressionCodec codec) throws IOException {
return new MessagePackSequenceFileWriter(logFilePath, codec);
}
|
Sequence file reader writer implementation
@author Praveen Murugesan (praveen@uber.com)
|
BuildFileWriter
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/MessagePackSequenceFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/MessagePackSequenceFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public KeyValue next() throws IOException {
if (mReader.next(mKey, mValue)) {
try(MessageUnpacker unpacker = MessagePack.newDefaultUnpacker(mKey.getBytes())) {
int mapSize = unpacker.unpackMapHeader();
long offset = 0;
long timestamp = -1;
byte[] keyBytes = EMPTY_BYTES;
for (int i = 0; i < mapSize; i++) {
int key = unpacker.unpackInt();
switch (key) {
case KAFKA_MESSAGE_OFFSET:
offset = unpacker.unpackLong();
break;
case KAFKA_MESSAGE_TIMESTAMP:
timestamp = unpacker.unpackLong();
break;
case KAFKA_HASH_KEY:
int keySize = unpacker.unpackBinaryHeader();
keyBytes = new byte[keySize];
unpacker.readPayload(keyBytes);
break;
}
}
return new KeyValue(offset, keyBytes, Arrays.copyOfRange(mValue.getBytes(), 0, mValue.getLength()), timestamp);
}
} else {
return null;
}
}
|
Sequence file reader writer implementation
@author Praveen Murugesan (praveen@uber.com)
|
next
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/MessagePackSequenceFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/MessagePackSequenceFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public void close() throws IOException {
this.mReader.close();
}
|
Sequence file reader writer implementation
@author Praveen Murugesan (praveen@uber.com)
|
close
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/MessagePackSequenceFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/MessagePackSequenceFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public long getLength() throws IOException {
return this.mWriter.getLength();
}
|
Sequence file reader writer implementation
@author Praveen Murugesan (praveen@uber.com)
|
getLength
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/MessagePackSequenceFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/MessagePackSequenceFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public void write(KeyValue keyValue) throws IOException {
byte[] kafkaKey = keyValue.hasKafkaKey() ? keyValue.getKafkaKey() : new byte[0];
long timestamp = keyValue.getTimestamp();
final int timestampLength = (keyValue.hasTimestamp()) ? 10 : 0;
// output size estimate
// 1 - map header
// 1 - message pack key
// 9 - max kafka offset
// 1 - message pack key
// 9 - kafka timestamp
// 1 - message pack key
// 5 - max (sane) kafka key size
// N - size of kafka key
// = 27 + N
ByteArrayOutputStream out = new ByteArrayOutputStream(17 + timestampLength + kafkaKey.length);
MessagePacker packer = MessagePack.newDefaultPacker(out)
.packMapHeader(numberOfFieldsMappedInHeader(keyValue))
.packInt(KAFKA_MESSAGE_OFFSET)
.packLong(keyValue.getOffset());
if (keyValue.hasTimestamp())
packer.packInt(KAFKA_MESSAGE_TIMESTAMP)
.packLong(timestamp);
if (keyValue.hasKafkaKey())
packer.packInt(KAFKA_HASH_KEY)
.packBinaryHeader(kafkaKey.length)
.writePayload(kafkaKey);
packer.close();
byte[] outBytes = out.toByteArray();
this.mKey.set(outBytes, 0, outBytes.length);
this.mValue.set(keyValue.getValue(), 0, keyValue.getValue().length);
this.mWriter.append(this.mKey, this.mValue);
}
|
Sequence file reader writer implementation
@author Praveen Murugesan (praveen@uber.com)
|
write
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/MessagePackSequenceFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/MessagePackSequenceFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public void close() throws IOException {
this.mWriter.close();
}
|
Sequence file reader writer implementation
@author Praveen Murugesan (praveen@uber.com)
|
close
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/MessagePackSequenceFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/MessagePackSequenceFileReaderWriterFactory.java
|
Apache-2.0
|
private int numberOfFieldsMappedInHeader(KeyValue keyValue) {
int fields = 1;
if (keyValue.hasKafkaKey())
fields++;
if (keyValue.hasTimestamp())
fields++;
return fields;
}
|
Sequence file reader writer implementation
@author Praveen Murugesan (praveen@uber.com)
|
numberOfFieldsMappedInHeader
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/MessagePackSequenceFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/MessagePackSequenceFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public FileReader BuildFileReader(LogFilePath logFilePath, CompressionCodec codec) throws Exception {
return new ProtobufParquetFileReader(logFilePath, codec);
}
|
Implementation for reading/writing protobuf messages to/from Parquet files.
@author Michael Spector (spektom@gmail.com)
|
BuildFileReader
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/ProtobufParquetFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/ProtobufParquetFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public FileWriter BuildFileWriter(LogFilePath logFilePath, CompressionCodec codec) throws Exception {
return new ProtobufParquetFileWriter(logFilePath, codec);
}
|
Implementation for reading/writing protobuf messages to/from Parquet files.
@author Michael Spector (spektom@gmail.com)
|
BuildFileWriter
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/ProtobufParquetFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/ProtobufParquetFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public KeyValue next() throws IOException {
Builder messageBuilder = (Builder) reader.read();
if (messageBuilder != null) {
return new KeyValue(offset++, messageBuilder.build().toByteArray());
}
return null;
}
|
Implementation for reading/writing protobuf messages to/from Parquet files.
@author Michael Spector (spektom@gmail.com)
|
next
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/ProtobufParquetFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/ProtobufParquetFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public void close() throws IOException {
reader.close();
}
|
Implementation for reading/writing protobuf messages to/from Parquet files.
@author Michael Spector (spektom@gmail.com)
|
close
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/ProtobufParquetFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/ProtobufParquetFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public long getLength() throws IOException {
return writer.getDataSize();
}
|
Implementation for reading/writing protobuf messages to/from Parquet files.
@author Michael Spector (spektom@gmail.com)
|
getLength
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/ProtobufParquetFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/ProtobufParquetFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public void write(KeyValue keyValue) throws IOException {
Message message = protobufUtil.decodeProtobufOrJsonMessage(topic, keyValue.getValue());
writer.write(message);
}
|
Implementation for reading/writing protobuf messages to/from Parquet files.
@author Michael Spector (spektom@gmail.com)
|
write
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/ProtobufParquetFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/ProtobufParquetFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public void close() throws IOException {
writer.close();
}
|
Implementation for reading/writing protobuf messages to/from Parquet files.
@author Michael Spector (spektom@gmail.com)
|
close
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/ProtobufParquetFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/ProtobufParquetFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public FileReader BuildFileReader(LogFilePath logFilePath, CompressionCodec codec) throws Exception {
return new SequenceFileReader(logFilePath);
}
|
Sequence file reader writer implementation
@author Praveen Murugesan (praveen@uber.com)
|
BuildFileReader
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/SequenceFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/SequenceFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public FileWriter BuildFileWriter(LogFilePath logFilePath, CompressionCodec codec) throws IOException {
return new SequenceFileWriter(logFilePath, codec);
}
|
Sequence file reader writer implementation
@author Praveen Murugesan (praveen@uber.com)
|
BuildFileWriter
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/SequenceFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/SequenceFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public KeyValue next() throws IOException {
if (mReader.next(mKey, mValue)) {
return new KeyValue(mKey.get(), Arrays.copyOfRange(mValue.getBytes(), 0, mValue.getLength()));
} else {
return null;
}
}
|
Sequence file reader writer implementation
@author Praveen Murugesan (praveen@uber.com)
|
next
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/SequenceFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/SequenceFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public void close() throws IOException {
this.mReader.close();
}
|
Sequence file reader writer implementation
@author Praveen Murugesan (praveen@uber.com)
|
close
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/SequenceFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/SequenceFileReaderWriterFactory.java
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.