code
stringlengths 23
201k
| docstring
stringlengths 17
96.2k
| func_name
stringlengths 0
235
| language
stringclasses 1
value | repo
stringlengths 8
72
| path
stringlengths 11
317
| url
stringlengths 57
377
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
@Override
public long getLength() throws IOException {
return this.mWriter.getLength();
}
|
Sequence file reader writer implementation
@author Praveen Murugesan (praveen@uber.com)
|
getLength
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/SequenceFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/SequenceFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public void write(KeyValue keyValue) throws IOException {
this.mKey.set(keyValue.getOffset());
this.mValue.set(keyValue.getValue(), 0, keyValue.getValue().length);
this.mWriter.append(this.mKey, this.mValue);
}
|
Sequence file reader writer implementation
@author Praveen Murugesan (praveen@uber.com)
|
write
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/SequenceFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/SequenceFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public void close() throws IOException {
this.mWriter.close();
LOG.info("Closing sequence file writer: {}", fsPath);
}
|
Sequence file reader writer implementation
@author Praveen Murugesan (praveen@uber.com)
|
close
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/SequenceFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/SequenceFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public FileReader BuildFileReader(LogFilePath logFilePath, CompressionCodec codec) throws Exception {
return new ThriftParquetFileReader(logFilePath, codec);
}
|
Adapted from
com.pinterest.secor.io.impl.ProtobufParquetFileReaderWriterFactory
Implementation for reading/writing thrift messages to/from Parquet files.
|
BuildFileReader
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/ThriftParquetFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/ThriftParquetFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public FileWriter BuildFileWriter(LogFilePath logFilePath, CompressionCodec codec) throws Exception {
return new ThriftParquetFileWriter(logFilePath, codec);
}
|
Adapted from
com.pinterest.secor.io.impl.ProtobufParquetFileReaderWriterFactory
Implementation for reading/writing thrift messages to/from Parquet files.
|
BuildFileWriter
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/ThriftParquetFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/ThriftParquetFileReaderWriterFactory.java
|
Apache-2.0
|
@SuppressWarnings("rawtypes")
@Override
public KeyValue next() throws IOException {
TBase msg = reader.read();
if (msg != null) {
try {
return new KeyValue(offset++, thriftUtil.encodeMessage(msg));
} catch (TException e) {
throw new IOException("cannot write message", e);
} catch (InstantiationException e) {
throw new IOException("cannot write message", e);
} catch (IllegalAccessException e) {
throw new IOException("cannot write message", e);
}
}
return null;
}
|
Adapted from
com.pinterest.secor.io.impl.ProtobufParquetFileReaderWriterFactory
Implementation for reading/writing thrift messages to/from Parquet files.
|
next
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/ThriftParquetFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/ThriftParquetFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public void close() throws IOException {
reader.close();
}
|
Adapted from
com.pinterest.secor.io.impl.ProtobufParquetFileReaderWriterFactory
Implementation for reading/writing thrift messages to/from Parquet files.
|
close
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/ThriftParquetFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/ThriftParquetFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public long getLength() throws IOException {
return writer.getDataSize();
}
|
Adapted from
com.pinterest.secor.io.impl.ProtobufParquetFileReaderWriterFactory
Implementation for reading/writing thrift messages to/from Parquet files.
|
getLength
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/ThriftParquetFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/ThriftParquetFileReaderWriterFactory.java
|
Apache-2.0
|
@SuppressWarnings("unchecked")
@Override
public void write(KeyValue keyValue) throws IOException {
Object message;
try {
message = thriftUtil.decodeMessage(topic, keyValue.getValue());
writer.write(message);
} catch (Exception e) {
throw new IOException("cannot write message", e);
}
}
|
Adapted from
com.pinterest.secor.io.impl.ProtobufParquetFileReaderWriterFactory
Implementation for reading/writing thrift messages to/from Parquet files.
|
write
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/ThriftParquetFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/ThriftParquetFileReaderWriterFactory.java
|
Apache-2.0
|
@Override
public void close() throws IOException {
writer.close();
}
|
Adapted from
com.pinterest.secor.io.impl.ProtobufParquetFileReaderWriterFactory
Implementation for reading/writing thrift messages to/from Parquet files.
|
close
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/io/impl/ThriftParquetFileReaderWriterFactory.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/io/impl/ThriftParquetFileReaderWriterFactory.java
|
Apache-2.0
|
public static void main(String[] args) {
if (args.length != 0) {
System.err.println("Usage: java -Dconfig=<secor_properties> " +
"-Dlog4j.configuration=<log4j_properties> ConsumerMain");
return;
}
try {
SecorConfig config = SecorConfig.load();
String stagingDirectoryPath = config.getLocalPath() + '/' + IdUtil.getLocalMessageDir();
ShutdownHookRegistry.registerHook(10, new StagingDirectoryCleaner(stagingDirectoryPath));
MetricCollector metricCollector = ReflectionUtil.createMetricCollector(config.getMetricsCollectorClass());
metricCollector.initialize(config);
OstrichAdminService ostrichService = new OstrichAdminService(config);
ostrichService.start();
FileUtil.configure(config);
LogFileDeleter logFileDeleter = new LogFileDeleter(config);
logFileDeleter.deleteOldLogs();
RateLimitUtil.configure(config);
LOG.info("starting {} consumer threads", config.getConsumerThreads());
LinkedList<Consumer> consumers = new LinkedList<Consumer>();
for (int i = 0; i < config.getConsumerThreads(); ++i) {
Consumer consumer = new Consumer(config, metricCollector);
consumers.add(consumer);
consumer.start();
}
for (Consumer consumer : consumers) {
consumer.join();
}
} catch (Throwable t) {
LOG.error("Consumer failed", t);
System.exit(1);
}
}
|
Secor consumer. See
https://docs.google.com/a/pinterest.com/document/d/1RHeH79O0e1WzsxumE24MIYqJFnRoRzQ3c74Wq3Q4R40/edit
for detailed design.
Run:
$ cd optimus/secor
$ mvn package
$ cd target
$ java -ea -Dlog4j.configuration=log4j.dev.properties -Dconfig=secor.dev.backup.properties \
-cp "secor-0.1-SNAPSHOT.jar:lib/*" com.pinterest.secor.main.ConsumerMain
@author Pawel Garbacki (pawel@pinterest.com)
|
main
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/main/ConsumerMain.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/main/ConsumerMain.java
|
Apache-2.0
|
private static CommandLine parseArgs(String[] args) throws ParseException {
Options options = new Options();
options.addOption(OptionBuilder.withLongOpt("file")
.withDescription("sequence file to read")
.hasArg()
.withArgName("<sequence_file_name>")
.withType(String.class)
.create("f"));
options.addOption("o", "print_offsets_only", false, "whether to print only offsets " +
"ignoring the message payload");
CommandLineParser parser = new GnuParser();
return parser.parse(options, args);
}
|
Log file printer main.
Run:
$ cd optimus/secor
$ mvn package
$ cd target
$ java -ea -Dlog4j.configuration=log4j.dev.properties -Dconfig=secor.dev.backup.properties \
-cp "secor-0.1-SNAPSHOT.jar:lib/*" com.pinterest.secor.main.LogFilePrinterMain -f \
s3n://bucket/path
@author Pawel Garbacki (pawel@pinterest.com)
|
parseArgs
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/main/LogFilePrinterMain.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/main/LogFilePrinterMain.java
|
Apache-2.0
|
public static void main(String[] args) {
try {
CommandLine commandLine = parseArgs(args);
SecorConfig config = SecorConfig.load();
FileUtil.configure(config);
LogFilePrinter printer = new LogFilePrinter(
commandLine.hasOption("print_offsets_only"));
printer.printFile(commandLine.getOptionValue("file"));
} catch (Throwable t) {
LOG.error("Log file printer failed", t);
System.exit(1);
}
}
|
Log file printer main.
Run:
$ cd optimus/secor
$ mvn package
$ cd target
$ java -ea -Dlog4j.configuration=log4j.dev.properties -Dconfig=secor.dev.backup.properties \
-cp "secor-0.1-SNAPSHOT.jar:lib/*" com.pinterest.secor.main.LogFilePrinterMain -f \
s3n://bucket/path
@author Pawel Garbacki (pawel@pinterest.com)
|
main
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/main/LogFilePrinterMain.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/main/LogFilePrinterMain.java
|
Apache-2.0
|
private static CommandLine parseArgs(String[] args) throws ParseException {
Options options = new Options();
options.addOption(OptionBuilder.withLongOpt("topic")
.withDescription("kafka topic name")
.hasArg()
.withArgName("<topic>")
.withType(String.class)
.create("t"));
options.addOption(OptionBuilder.withLongOpt("start_offset")
.withDescription("offset identifying the first set of files to check")
.hasArg()
.withArgName("<offset>")
.withType(Long.class)
.create("s"));
options.addOption(OptionBuilder.withLongOpt("end_offset")
.withDescription("offset identifying the last set of files to check")
.hasArg()
.withArgName("<offset>")
.withType(Long.class)
.create("e"));
options.addOption(OptionBuilder.withLongOpt("messages")
.withDescription("expected number of messages")
.hasArg()
.withArgName("<num_messages>")
.withType(Number.class)
.create("m"));
options.addOption("q", "sequence_offsets", false, "whether to verify that offsets " +
"increase sequentially. Requires loading all offsets in a snapshot " +
"to memory so use cautiously");
CommandLineParser parser = new GnuParser();
return parser.parse(options, args);
}
|
Log file verifier main.
Run:
$ cd optimus/secor
$ mvn package
$ cd target
$ java -ea -Dlog4j.configuration=log4j.dev.properties -Dconfig=secor.dev.backup.properties \
-cp "secor-0.1-SNAPSHOT.jar:lib/*" com.pinterest.secor.main.LogFileVerifierMain -t \
topic -q
@author Pawel Garbacki (pawel@pinterest.com)
|
parseArgs
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/main/LogFileVerifierMain.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/main/LogFileVerifierMain.java
|
Apache-2.0
|
public static void main(String[] args) {
try {
CommandLine commandLine = parseArgs(args);
SecorConfig config = SecorConfig.load();
FileUtil.configure(config);
LogFileVerifier verifier = new LogFileVerifier(config,
commandLine.getOptionValue("topic"));
long startOffset = -2;
long endOffset = Long.MAX_VALUE;
if (commandLine.hasOption("start_offset")) {
startOffset = Long.parseLong(commandLine.getOptionValue("start_offset"));
if (commandLine.hasOption("end_offset")) {
endOffset = Long.parseLong(commandLine.getOptionValue("end_offset"));
}
}
int numMessages = -1;
if (commandLine.hasOption("messages")) {
numMessages = ((Number) commandLine.getParsedOptionValue("messages")).intValue();
}
verifier.verifyCounts(startOffset, endOffset, numMessages);
if (commandLine.hasOption("sequence_offsets")) {
verifier.verifySequences(startOffset, endOffset);
}
System.out.println("verification succeeded");
} catch (Throwable t) {
LOG.error("Log file verifier failed", t);
System.exit(1);
}
}
|
Log file verifier main.
Run:
$ cd optimus/secor
$ mvn package
$ cd target
$ java -ea -Dlog4j.configuration=log4j.dev.properties -Dconfig=secor.dev.backup.properties \
-cp "secor-0.1-SNAPSHOT.jar:lib/*" com.pinterest.secor.main.LogFileVerifierMain -t \
topic -q
@author Pawel Garbacki (pawel@pinterest.com)
|
main
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/main/LogFileVerifierMain.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/main/LogFileVerifierMain.java
|
Apache-2.0
|
public static void main(String[] args) {
try {
SecorConfig config = SecorConfig.load();
FileUtil.configure(config);
PartitionFinalizer partitionFinalizer = new PartitionFinalizer(config);
partitionFinalizer.finalizePartitions();
} catch (Throwable t) {
LOG.error("Partition finalizer failed", t);
System.exit(1);
}
}
|
Partition finalizer main.
Run:
$ cd optimus/secor
$ mvn package
$ cd target
$ java -ea -Dlog4j.configuration=log4j.dev.properties -Dconfig=secor.dev.backup.properties \
-cp "secor-0.1-SNAPSHOT.jar:lib/*" com.pinterest.secor.main.PartitionFinalizerMain
@author Pawel Garbacki (pawel@pinterest.com)
|
main
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/main/PartitionFinalizerMain.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/main/PartitionFinalizerMain.java
|
Apache-2.0
|
private static void loop(ProgressMonitor progressMonitor, long interval) {
final ProgressMonitor monitor = progressMonitor;
Runnable runner = new Runnable() {
public void run() {
try {
monitor.exportStats();
} catch (Throwable t) {
LOG.error("Progress monitor failed", t);
}
}
};
ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
scheduler.scheduleAtFixedRate(runner, 0, interval, TimeUnit.SECONDS);
}
|
Progress monitor main.
Run:
$ cd optimus/secor
$ mvn package
$ cd target
$ java -ea -Dlog4j.configuration=log4j.dev.properties -Dconfig=secor.dev.backup.properties \
-cp "secor-0.1-SNAPSHOT.jar:lib/*" com.pinterest.secor.main.ProgressMonitorMain
@author Pawel Garbacki (pawel@pinterest.com)
|
loop
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/main/ProgressMonitorMain.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/main/ProgressMonitorMain.java
|
Apache-2.0
|
public void run() {
try {
monitor.exportStats();
} catch (Throwable t) {
LOG.error("Progress monitor failed", t);
}
}
|
Progress monitor main.
Run:
$ cd optimus/secor
$ mvn package
$ cd target
$ java -ea -Dlog4j.configuration=log4j.dev.properties -Dconfig=secor.dev.backup.properties \
-cp "secor-0.1-SNAPSHOT.jar:lib/*" com.pinterest.secor.main.ProgressMonitorMain
@author Pawel Garbacki (pawel@pinterest.com)
|
run
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/main/ProgressMonitorMain.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/main/ProgressMonitorMain.java
|
Apache-2.0
|
public static void main(String[] args) {
try {
SecorConfig config = SecorConfig.load();
ProgressMonitor progressMonitor = new ProgressMonitor(config);
long interval = config.getMonitoringIntervalSeconds();
if (interval > 0) {
loop(progressMonitor, interval);
} else {
progressMonitor.exportStats();
}
} catch (Throwable t) {
LOG.error("Progress monitor failed", t);
System.exit(1);
}
}
|
Progress monitor main.
Run:
$ cd optimus/secor
$ mvn package
$ cd target
$ java -ea -Dlog4j.configuration=log4j.dev.properties -Dconfig=secor.dev.backup.properties \
-cp "secor-0.1-SNAPSHOT.jar:lib/*" com.pinterest.secor.main.ProgressMonitorMain
@author Pawel Garbacki (pawel@pinterest.com)
|
main
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/main/ProgressMonitorMain.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/main/ProgressMonitorMain.java
|
Apache-2.0
|
private static CommandLine parseArgs(String[] args) throws ParseException {
Options options = new Options();
options.addOption(OptionBuilder.withLongOpt("topic")
.withDescription("topic to post to")
.hasArg()
.withArgName("<topic>")
.withType(String.class)
.create("t"));
options.addOption(OptionBuilder.withLongOpt("messages")
.withDescription("number of messages per producer to post")
.hasArg()
.withArgName("<num_messages>")
.withType(Number.class)
.create("m"));
options.addOption(OptionBuilder.withLongOpt("producers")
.withDescription("number of producer threads")
.hasArg()
.withArgName("<num_producer_threads>")
.withType(Number.class)
.create("p"));
options.addOption(OptionBuilder.withLongOpt("type")
.withDescription("type of producer - [json, binary]")
.hasArg()
.withArgName("<type>")
.withType(String.class)
.create("type"));
options.addOption(OptionBuilder.withLongOpt("broker")
.withDescription("broker string, e.g. localhost:9092")
.hasArg()
.withArgName("<broker>")
.withType(String.class)
.create("broker"));
options.addOption(OptionBuilder.withLongOpt("timeshift")
.withDescription("message timestamp adjustment in seconds, it will be deducted" +
" from current time")
.hasArg()
.withArgName("<timeshift>")
.withType(Number.class)
.create("timeshift"));
CommandLineParser parser = new GnuParser();
return parser.parse(options, args);
}
|
Test log message producer main.
Run:
$ cd optimus/secor
$ mvn package
$ cd target
$ java -ea -Dlog4j.configuration=log4j.dev.properties -Dconfig=secor.dev.backup.properties \
-cp "secor-0.1-SNAPSHOT.jar:lib/*" com.pinterest.secor.main.TestLogMessageProducerMain \
-t topic -m num_messages -p num_producer_threads
@author Pawel Garbacki (pawel@pinterest.com)
|
parseArgs
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/main/TestLogMessageProducerMain.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/main/TestLogMessageProducerMain.java
|
Apache-2.0
|
public static void main(String[] args) {
try {
CommandLine commandLine = parseArgs(args);
String topic = commandLine.getOptionValue("topic");
int messages = ((Number) commandLine.getParsedOptionValue("messages")).intValue();
int producers = ((Number) commandLine.getParsedOptionValue("producers")).intValue();
String broker = commandLine.getOptionValue("broker");
String type = commandLine.getOptionValue("type");
Number timeshiftNumber = ((Number)commandLine.getParsedOptionValue("timeshift"));
int timeshift = timeshiftNumber == null ? 0 : timeshiftNumber.intValue();
for (int i = 0; i < producers; ++i) {
TestLogMessageProducer producer = new TestLogMessageProducer(
topic, messages, type, broker, timeshift);
producer.start();
}
} catch (Throwable t) {
LOG.error("Log message producer failed", t);
System.exit(1);
}
}
|
Test log message producer main.
Run:
$ cd optimus/secor
$ mvn package
$ cd target
$ java -ea -Dlog4j.configuration=log4j.dev.properties -Dconfig=secor.dev.backup.properties \
-cp "secor-0.1-SNAPSHOT.jar:lib/*" com.pinterest.secor.main.TestLogMessageProducerMain \
-t topic -m num_messages -p num_producer_threads
@author Pawel Garbacki (pawel@pinterest.com)
|
main
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/main/TestLogMessageProducerMain.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/main/TestLogMessageProducerMain.java
|
Apache-2.0
|
private String bytesToString(byte[] bytes, boolean truncate) {
CharsetDecoder decoder = Charset.defaultCharset()
.newDecoder()
.onMalformedInput(CodingErrorAction.REPLACE)
.onUnmappableCharacter(CodingErrorAction.REPLACE);
ByteBuffer byteBuffer = ByteBuffer.wrap(bytes);
CharBuffer charBuffer;
try {
charBuffer = decoder.decode(byteBuffer);
} catch (CharacterCodingException e) {
// Shouldn't happen due to choosing REPLACE above, but Java makes us catch it anyway.
throw new RuntimeException(e);
}
String s = charBuffer.toString();
if (truncate && s.length() > TRUNCATED_STRING_MAX_LEN) {
return new StringBuilder().append(s, 0, TRUNCATED_STRING_MAX_LEN).append("[...]").toString();
} else {
return s;
}
}
|
Message key and payload may be arbitrary binary strings, so we should make sure we don't throw
when logging them by using a CharsetDecoder which replaces bad data. (While in practice `new String(bytes)`
does the same thing, the documentation for that method leaves that behavior unspecified.)
Additionally, in contexts where Message.toString() will be logged at a high level (including exception
messages), we truncate long keys and payloads, which may be very long binary data.
|
bytesToString
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/message/Message.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/message/Message.java
|
Apache-2.0
|
protected String fieldsToString(boolean truncate) {
return "topic='" + mTopic + '\'' +
", kafkaPartition=" + mKafkaPartition +
", offset=" + mOffset +
", kafkaKey=" + bytesToString(mKafkaKey, truncate) +
", payload=" + bytesToString(mPayload, truncate) +
", timestamp=" + mTimestamp +
", headers=" + mHeaders;
}
|
Message key and payload may be arbitrary binary strings, so we should make sure we don't throw
when logging them by using a CharsetDecoder which replaces bad data. (While in practice `new String(bytes)`
does the same thing, the documentation for that method leaves that behavior unspecified.)
Additionally, in contexts where Message.toString() will be logged at a high level (including exception
messages), we truncate long keys and payloads, which may be very long binary data.
|
fieldsToString
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/message/Message.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/message/Message.java
|
Apache-2.0
|
@Override
public String toString() {
return "Message{" + fieldsToString(false) + '}';
}
|
Message key and payload may be arbitrary binary strings, so we should make sure we don't throw
when logging them by using a CharsetDecoder which replaces bad data. (While in practice `new String(bytes)`
does the same thing, the documentation for that method leaves that behavior unspecified.)
Additionally, in contexts where Message.toString() will be logged at a high level (including exception
messages), we truncate long keys and payloads, which may be very long binary data.
|
toString
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/message/Message.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/message/Message.java
|
Apache-2.0
|
public String getTopic() {
return mTopic;
}
|
Message key and payload may be arbitrary binary strings, so we should make sure we don't throw
when logging them by using a CharsetDecoder which replaces bad data. (While in practice `new String(bytes)`
does the same thing, the documentation for that method leaves that behavior unspecified.)
Additionally, in contexts where Message.toString() will be logged at a high level (including exception
messages), we truncate long keys and payloads, which may be very long binary data.
|
getTopic
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/message/Message.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/message/Message.java
|
Apache-2.0
|
public int getKafkaPartition() {
return mKafkaPartition;
}
|
Message key and payload may be arbitrary binary strings, so we should make sure we don't throw
when logging them by using a CharsetDecoder which replaces bad data. (While in practice `new String(bytes)`
does the same thing, the documentation for that method leaves that behavior unspecified.)
Additionally, in contexts where Message.toString() will be logged at a high level (including exception
messages), we truncate long keys and payloads, which may be very long binary data.
|
getKafkaPartition
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/message/Message.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/message/Message.java
|
Apache-2.0
|
public long getOffset() {
return mOffset;
}
|
Message key and payload may be arbitrary binary strings, so we should make sure we don't throw
when logging them by using a CharsetDecoder which replaces bad data. (While in practice `new String(bytes)`
does the same thing, the documentation for that method leaves that behavior unspecified.)
Additionally, in contexts where Message.toString() will be logged at a high level (including exception
messages), we truncate long keys and payloads, which may be very long binary data.
|
getOffset
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/message/Message.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/message/Message.java
|
Apache-2.0
|
public byte[] getKafkaKey() {
return mKafkaKey;
}
|
Message key and payload may be arbitrary binary strings, so we should make sure we don't throw
when logging them by using a CharsetDecoder which replaces bad data. (While in practice `new String(bytes)`
does the same thing, the documentation for that method leaves that behavior unspecified.)
Additionally, in contexts where Message.toString() will be logged at a high level (including exception
messages), we truncate long keys and payloads, which may be very long binary data.
|
getKafkaKey
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/message/Message.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/message/Message.java
|
Apache-2.0
|
public byte[] getPayload() {
return mPayload;
}
|
Message key and payload may be arbitrary binary strings, so we should make sure we don't throw
when logging them by using a CharsetDecoder which replaces bad data. (While in practice `new String(bytes)`
does the same thing, the documentation for that method leaves that behavior unspecified.)
Additionally, in contexts where Message.toString() will be logged at a high level (including exception
messages), we truncate long keys and payloads, which may be very long binary data.
|
getPayload
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/message/Message.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/message/Message.java
|
Apache-2.0
|
public long getTimestamp() {
return mTimestamp;
}
|
Message key and payload may be arbitrary binary strings, so we should make sure we don't throw
when logging them by using a CharsetDecoder which replaces bad data. (While in practice `new String(bytes)`
does the same thing, the documentation for that method leaves that behavior unspecified.)
Additionally, in contexts where Message.toString() will be logged at a high level (including exception
messages), we truncate long keys and payloads, which may be very long binary data.
|
getTimestamp
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/message/Message.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/message/Message.java
|
Apache-2.0
|
public List<MessageHeader> getHeaders(){
return mHeaders;
}
|
Message key and payload may be arbitrary binary strings, so we should make sure we don't throw
when logging them by using a CharsetDecoder which replaces bad data. (While in practice `new String(bytes)`
does the same thing, the documentation for that method leaves that behavior unspecified.)
Additionally, in contexts where Message.toString() will be logged at a high level (including exception
messages), we truncate long keys and payloads, which may be very long binary data.
|
getHeaders
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/message/Message.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/message/Message.java
|
Apache-2.0
|
@Override
public String toString() {
return "ParsedMessage{" + fieldsToString(false) + ", mPartitions=" +
Arrays.toString(mPartitions) + '}';
}
|
Parsed message is a Kafka message that has been processed by the parser that extracted its
partitions.
@author Pawel Garbacki (pawel@pinterest.com)
|
toString
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/message/ParsedMessage.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/message/ParsedMessage.java
|
Apache-2.0
|
public String toTruncatedString() {
return "ParsedMessage{" + fieldsToString(true) + ", mPartitions=" +
Arrays.toString(mPartitions) + '}';
}
|
Parsed message is a Kafka message that has been processed by the parser that extracted its
partitions.
@author Pawel Garbacki (pawel@pinterest.com)
|
toTruncatedString
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/message/ParsedMessage.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/message/ParsedMessage.java
|
Apache-2.0
|
public String[] getPartitions() {
return mPartitions;
}
|
Parsed message is a Kafka message that has been processed by the parser that extracted its
partitions.
@author Pawel Garbacki (pawel@pinterest.com)
|
getPartitions
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/message/ParsedMessage.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/message/ParsedMessage.java
|
Apache-2.0
|
@Override
public void initialize(SecorConfig config) {
mConfig = config;
if (config.getMicroMeterCollectorStatsdEnabled()) {
MeterRegistry statsdRegistry =
new StatsdMeterRegistry(StatsdConfig.DEFAULT, Clock.SYSTEM);
Metrics.addRegistry(statsdRegistry);
registerSystemMetrics(statsdRegistry);
}
if (config.getMicroMeterCollectorJmxEnabled()) {
MeterRegistry jmxRegistry = new JmxMeterRegistry(JmxConfig.DEFAULT, Clock.SYSTEM);
Metrics.addRegistry(jmxRegistry);
registerSystemMetrics(jmxRegistry);
}
if (config.getMicroMeterCollectorPrometheusEnabled()) {
MeterRegistry prometheusRegistry = new PrometheusMeterRegistry(PrometheusConfig.DEFAULT);
Metrics.addRegistry(prometheusRegistry);
registerSystemMetrics(prometheusRegistry);
}
}
|
MicorMeter meters can integrate with many different metrics backend
(StatsD/Promethus/Graphite/JMX etc, see https://micrometer.io/docs)
|
initialize
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/monitoring/MicroMeterMetricCollector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/monitoring/MicroMeterMetricCollector.java
|
Apache-2.0
|
@Override
public void increment(String label, String topic) {
Metrics.counter(label, Collections.singletonList(Tag.of("topic", topic))).increment();
}
|
MicorMeter meters can integrate with many different metrics backend
(StatsD/Promethus/Graphite/JMX etc, see https://micrometer.io/docs)
|
increment
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/monitoring/MicroMeterMetricCollector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/monitoring/MicroMeterMetricCollector.java
|
Apache-2.0
|
@Override
public void increment(String label, int delta, String topic) {
Metrics.counter(label, Collections.singletonList(Tag.of("topic", topic))).increment(delta);
}
|
MicorMeter meters can integrate with many different metrics backend
(StatsD/Promethus/Graphite/JMX etc, see https://micrometer.io/docs)
|
increment
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/monitoring/MicroMeterMetricCollector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/monitoring/MicroMeterMetricCollector.java
|
Apache-2.0
|
@Override
public void metric(String label, double value, String topic) {
gauge(label, value, topic);
}
|
MicorMeter meters can integrate with many different metrics backend
(StatsD/Promethus/Graphite/JMX etc, see https://micrometer.io/docs)
|
metric
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/monitoring/MicroMeterMetricCollector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/monitoring/MicroMeterMetricCollector.java
|
Apache-2.0
|
@Override
public void gauge(String label, double value, String topic) {
String key = label + "_" + topic;
if (!mGaugeCache.containsKey(key) && mGaugeCache.size() >= mConfig.getMicroMeterCacheSize()) {
LOG.error("Gauge cache size reached maximum, this may result in inaccurate metrics, "
+ "you can increase cache size by changing "
+ "\"secor.monitoring.metrics.collector.micrometer.cache.size\" property.");
return;
}
mGaugeCache.put(key, value);
Metrics.gauge(label, Collections.singletonList(
Tag.of("topic", topic)), mGaugeCache, g -> g.get(key));
}
|
MicorMeter meters can integrate with many different metrics backend
(StatsD/Promethus/Graphite/JMX etc, see https://micrometer.io/docs)
|
gauge
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/monitoring/MicroMeterMetricCollector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/monitoring/MicroMeterMetricCollector.java
|
Apache-2.0
|
private void registerSystemMetrics(MeterRegistry registry) {
new JvmMemoryMetrics().bindTo(registry);
new JvmGcMetrics().bindTo(registry);
new ProcessorMetrics().bindTo(registry);
new JvmThreadMetrics().bindTo(registry);
}
|
MicorMeter meters can integrate with many different metrics backend
(StatsD/Promethus/Graphite/JMX etc, see https://micrometer.io/docs)
|
registerSystemMetrics
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/monitoring/MicroMeterMetricCollector.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/monitoring/MicroMeterMetricCollector.java
|
Apache-2.0
|
@Override
public long extractTimestampMillis(final Message message) {
try {
GenericRecord record = schemaRegistry.deserialize(message.getTopic(), message.getPayload());
if (record != null) {
Object fieldValue = record.get(mConfig.getMessageTimestampName());
if (fieldValue != null) {
Date dateFormat = DatatypeConverter.parseDateTime(fieldValue.toString()).getTime();
return dateFormat.getTime();
} else if (m_timestampRequired) {
throw new RuntimeException("Missing timestamp field for message: " + message);
}
} else {
throw new RuntimeException("Record is empty: " + message);
}
} catch (SerializationException e) {
LOG.error("Failed to parse record", e);
}
return 0;
}
|
AvroMessageParser extracts timestamp field (specified by 'message.timestamp.name')
from AVRO data and partitions data by date.
with support for ISO8601 date format
|
extractTimestampMillis
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/AvroIso8601MessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/AvroIso8601MessageParser.java
|
Apache-2.0
|
@Override
public long extractTimestampMillis(final Message message) {
try {
GenericRecord record = schemaRegistry.deserialize(message.getTopic(), message.getPayload());
if (record != null) {
Object fieldValue = record.get(mConfig.getMessageTimestampName());
if (fieldValue != null) {
return toMillis(Double.valueOf(fieldValue.toString()).longValue());
}
} else if (m_timestampRequired) {
throw new RuntimeException("Missing timestamp field for message: " + message);
}
} catch (Exception e) {
LOG.error("Failed to parse record", e);
}
return 0;
}
|
AvroMessageParser extracts timestamp field (specified by 'message.timestamp.name')
from AVRO data and partitions data by date.
|
extractTimestampMillis
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/AvroMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/AvroMessageParser.java
|
Apache-2.0
|
static String usingFieldPrefix(SecorConfig config) {
return config.getString("partitioner.granularity.field.prefix", config.getMessageSplitFieldName() + "=");
}
|
AvroMessageParser extracts timestamp field (specified by 'message.timestamp.name')
and a custom field specified by ''message.split.field.name'.
from AVRO data and partitions data by date and custom field.
By default the first partition will be the fieldValue=
If you want to set it to something else set 'partitioner.granularity.field.prefix'
This class was heavily based off SplitByFieldMessageParser (which supports JSON). Like
that other parser this parser doesn't support finalization of partitions.
|
usingFieldPrefix
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/AvroSplitByFieldMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/AvroSplitByFieldMessageParser.java
|
Apache-2.0
|
@Override
public long extractTimestampMillis(Message message) throws Exception {
throw new UnsupportedOperationException("Unsupported, use extractPartitions method instead");
}
|
AvroMessageParser extracts timestamp field (specified by 'message.timestamp.name')
and a custom field specified by ''message.split.field.name'.
from AVRO data and partitions data by date and custom field.
By default the first partition will be the fieldValue=
If you want to set it to something else set 'partitioner.granularity.field.prefix'
This class was heavily based off SplitByFieldMessageParser (which supports JSON). Like
that other parser this parser doesn't support finalization of partitions.
|
extractTimestampMillis
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/AvroSplitByFieldMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/AvroSplitByFieldMessageParser.java
|
Apache-2.0
|
@Override
public String[] extractPartitions(Message message) throws Exception {
GenericRecord record = schemaRegistryClient.deserialize(message.getTopic(), message.getPayload());
if (record == null) {
throw new RuntimeException("Failed to parse message as Avro object");
}
String eventType = extractEventType(record);
long timestampMillis = extractTimestampMillis(record);
String[] timestampPartitions = generatePartitions(timestampMillis, mUsingHourly, mUsingMinutely);
return (String[]) ArrayUtils.addAll(new String[]{mFieldPrefix + eventType}, timestampPartitions);
}
|
AvroMessageParser extracts timestamp field (specified by 'message.timestamp.name')
and a custom field specified by ''message.split.field.name'.
from AVRO data and partitions data by date and custom field.
By default the first partition will be the fieldValue=
If you want to set it to something else set 'partitioner.granularity.field.prefix'
This class was heavily based off SplitByFieldMessageParser (which supports JSON). Like
that other parser this parser doesn't support finalization of partitions.
|
extractPartitions
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/AvroSplitByFieldMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/AvroSplitByFieldMessageParser.java
|
Apache-2.0
|
@Override
public String[] getFinalizedUptoPartitions(List<Message> lastMessages,
List<Message> committedMessages) throws Exception {
throw new UnsupportedOperationException("Partition finalization is not supported");
}
|
AvroMessageParser extracts timestamp field (specified by 'message.timestamp.name')
and a custom field specified by ''message.split.field.name'.
from AVRO data and partitions data by date and custom field.
By default the first partition will be the fieldValue=
If you want to set it to something else set 'partitioner.granularity.field.prefix'
This class was heavily based off SplitByFieldMessageParser (which supports JSON). Like
that other parser this parser doesn't support finalization of partitions.
|
getFinalizedUptoPartitions
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/AvroSplitByFieldMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/AvroSplitByFieldMessageParser.java
|
Apache-2.0
|
@Override
public String[] getPreviousPartitions(String[] partitions) throws Exception {
throw new UnsupportedOperationException("Partition finalization is not supported");
}
|
AvroMessageParser extracts timestamp field (specified by 'message.timestamp.name')
and a custom field specified by ''message.split.field.name'.
from AVRO data and partitions data by date and custom field.
By default the first partition will be the fieldValue=
If you want to set it to something else set 'partitioner.granularity.field.prefix'
This class was heavily based off SplitByFieldMessageParser (which supports JSON). Like
that other parser this parser doesn't support finalization of partitions.
|
getPreviousPartitions
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/AvroSplitByFieldMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/AvroSplitByFieldMessageParser.java
|
Apache-2.0
|
protected String extractEventType(GenericRecord record) {
Object fieldValue = record.get(mSplitFieldName);
if (fieldValue == null) {
throw new RuntimeException("Could not find key " + mSplitFieldName + " in Avro message");
}
return fieldValue.toString();
}
|
AvroMessageParser extracts timestamp field (specified by 'message.timestamp.name')
and a custom field specified by ''message.split.field.name'.
from AVRO data and partitions data by date and custom field.
By default the first partition will be the fieldValue=
If you want to set it to something else set 'partitioner.granularity.field.prefix'
This class was heavily based off SplitByFieldMessageParser (which supports JSON). Like
that other parser this parser doesn't support finalization of partitions.
|
extractEventType
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/AvroSplitByFieldMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/AvroSplitByFieldMessageParser.java
|
Apache-2.0
|
protected long extractTimestampMillis(GenericRecord record) {
try {
if (record != null) {
Object fieldValue = record.get(mConfig.getMessageTimestampName());
if (fieldValue != null) {
return toMillis(Double.valueOf(fieldValue.toString()).longValue());
}
} else if (m_timestampRequired) {
throw new RuntimeException("Missing timestamp field for message: " + record.toString());
}
} catch (SerializationException e) {
LOG.error("Failed to parse record", e);
}
return 0;
}
|
AvroMessageParser extracts timestamp field (specified by 'message.timestamp.name')
and a custom field specified by ''message.split.field.name'.
from AVRO data and partitions data by date and custom field.
By default the first partition will be the fieldValue=
If you want to set it to something else set 'partitioner.granularity.field.prefix'
This class was heavily based off SplitByFieldMessageParser (which supports JSON). Like
that other parser this parser doesn't support finalization of partitions.
|
extractTimestampMillis
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/AvroSplitByFieldMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/AvroSplitByFieldMessageParser.java
|
Apache-2.0
|
@Override
public String[] extractPartitions(Message message) throws Exception {
long offset = message.getOffset();
long offsetsPerPartition = mConfig.getOffsetsPerPartition();
long partition = (offset / offsetsPerPartition) * offsetsPerPartition;
String[] dailyPartition = generatePartitions(new Date().getTime(), mUsingHourly, mUsingMinutely);
String dailyPartitionPath = StringUtils.join(dailyPartition, '/');
String[] result = {dailyPartitionPath, offsetPrefix + partition};
return result;
}
|
Offset message parser groups messages based on the offset ranges.
@author Ahsan Nabi Dar (ahsan@wego.com)
|
extractPartitions
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/DailyOffsetMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/DailyOffsetMessageParser.java
|
Apache-2.0
|
@Override
public long extractTimestampMillis(final Message message) {
return new Date().getTime(); //Daily Timestamp generation
}
|
Offset message parser groups messages based on the offset ranges.
@author Ahsan Nabi Dar (ahsan@wego.com)
|
extractTimestampMillis
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/DailyOffsetMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/DailyOffsetMessageParser.java
|
Apache-2.0
|
@Override
public String[] extractPartitions(Message message) {
JSONObject jsonObject = (JSONObject) JSONValue.parse(message.getPayload());
String result[] = { defaultDate };
if (jsonObject != null) {
Object fieldValue = getJsonFieldValue(jsonObject);
if (fieldValue != null && inputPattern != null) {
try {
Date dateFormat = inputFormatter.parse(fieldValue.toString());
result[0] = mDtPrefix + outputFormatter.format(dateFormat);
} catch (Exception e) {
LOG.warn("Impossible to convert date = {} with the input pattern = {}. Using date default = {}",
fieldValue.toString(), inputPattern.toString(), result[0]);
}
}
}
return result;
}
|
DateMessageParser extracts the timestamp field (specified by 'message.timestamp.name')
and the date pattern (specified by 'message.timestamp.input.pattern')
@see java.text.SimpleDateFormat
@author Lucas Zago (lucaszago@gmail.com)
|
extractPartitions
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/DateMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/DateMessageParser.java
|
Apache-2.0
|
@Override
public long extractTimestampMillis(final Message message) {
JSONObject jsonObject = (JSONObject) JSONValue.parse(message.getPayload());
Object fieldValue = jsonObject != null ? getJsonFieldValue(jsonObject) : null;
if (m_timestampRequired && fieldValue == null) {
throw new RuntimeException("Missing timestamp field for message: " + message);
}
if (fieldValue != null) {
try {
Date dateFormat = DatatypeConverter.parseDateTime(fieldValue.toString()).getTime();
return dateFormat.getTime();
} catch (IllegalArgumentException ex) {
if (m_timestampRequired){
throw new RuntimeException("Bad timestamp field for message: " + message);
}
}
}
return 0;
}
|
Iso8601MessageParser extracts timestamp field (specified by 'message.timestamp.name')
@author Jurriaan Pruis (email@jurriaanpruis.nl)
|
extractTimestampMillis
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/Iso8601MessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/Iso8601MessageParser.java
|
Apache-2.0
|
@Override
public long extractTimestampMillis(final Message message) {
JSONObject jsonObject = (JSONObject) JSONValue.parse(message.getPayload());
if (jsonObject != null) {
Object fieldValue = getJsonFieldValue(jsonObject);
if (fieldValue != null) {
return toMillis(Double.valueOf(fieldValue.toString()).longValue());
}
} else if (m_timestampRequired) {
throw new RuntimeException("Missing timestamp field for message: " + message);
}
return 0;
}
|
JsonMessageParser extracts timestamp field (specified by 'message.timestamp.name')
from JSON data and partitions data by date.
|
extractTimestampMillis
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/JsonMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/JsonMessageParser.java
|
Apache-2.0
|
@Override
public long extractTimestampMillis(Message message) throws Exception {
HashMap<String, Object> msgHash = (HashMap<String,Object>)mMessagePackObjectMapper.readValue(
message.getPayload(), mTypeReference);
Object timestampValue = msgHash.get(mConfig.getMessageTimestampName());
if (timestampValue instanceof Number) {
return toMillis(((Number) timestampValue).longValue());
} else if (timestampValue instanceof String) {
return toMillis(Long.parseLong((String) timestampValue));
} else {
return toMillis((Long) timestampValue);
}
}
|
MessagePack timestamped message parser.
Requires a second or ms timestamp.
Does not support message.timestamp.input.pattern.
@author Zack Dever (zack@rd.io)
|
extractTimestampMillis
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/MessagePackParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/MessagePackParser.java
|
Apache-2.0
|
static String usingOffsetPrefix(SecorConfig config) {
return config.getString("secor.offsets.prefix");
}
|
Message parser extracts partitions from messages.
@author Pawel Garbacki (pawel@pinterest.com)
|
usingOffsetPrefix
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/MessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/MessageParser.java
|
Apache-2.0
|
public ParsedMessage parse(Message message) throws Exception {
String[] partitions = extractPartitions(message);
return new ParsedMessage(message.getTopic(), message.getKafkaPartition(),
message.getOffset(), message.getKafkaKey(),
message.getPayload(), partitions, message.getTimestamp(),
message.getHeaders());
}
|
Message parser extracts partitions from messages.
@author Pawel Garbacki (pawel@pinterest.com)
|
parse
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/MessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/MessageParser.java
|
Apache-2.0
|
public Object getJsonFieldValue(JSONObject jsonObject) {
Object fieldValue = null;
if (mNestedFields != null) {
Object finalValue = null;
for (int i=0; i < mNestedFields.length; i++) {
if (!jsonObject.containsKey(mNestedFields[i])) {
LOG.warn("Could not find key {} in message", mConfig.getMessageTimestampName());
break;
}
if (i < (mNestedFields.length -1)) {
jsonObject = (JSONObject) jsonObject.get(mNestedFields[i]);
} else {
finalValue = jsonObject.get(mNestedFields[i]);
}
}
fieldValue = finalValue;
} else {
fieldValue = jsonObject.get(mConfig.getMessageTimestampName());
}
return fieldValue;
}
|
Message parser extracts partitions from messages.
@author Pawel Garbacki (pawel@pinterest.com)
|
getJsonFieldValue
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/MessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/MessageParser.java
|
Apache-2.0
|
@Override
public String[] extractPartitions(Message message) throws Exception {
long offset = message.getOffset();
long offsetsPerPartition = mConfig.getOffsetsPerPartition();
long partition = (offset / offsetsPerPartition) * offsetsPerPartition;
String[] result = {offsetPrefix + partition};
return result;
}
|
Offset message parser groups messages based on the offset ranges.
@author Pawel Garbacki (pawel@pinterest.com)
|
extractPartitions
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/OffsetMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/OffsetMessageParser.java
|
Apache-2.0
|
@Override
public String[] extractPartitions(Message message) throws Exception {
String[] dailyPartition = generatePartitions(new Date().getTime(), mUsingHourly, mUsingMinutely);
String dailyPartitionPath = StringUtils.join(dailyPartition, '/');
String[] result = {dailyPartitionPath};
return result;
}
|
Offset message parser groups messages based on the offset ranges.
@author Ahsan Nabi Dar (ahsan@wego.com)
|
extractPartitions
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/PartitionedMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/PartitionedMessageParser.java
|
Apache-2.0
|
@Override
public long extractTimestampMillis(final Message message) {
return new Date().getTime(); //Daily Timestamp generation
}
|
Offset message parser groups messages based on the offset ranges.
@author Ahsan Nabi Dar (ahsan@wego.com)
|
extractTimestampMillis
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/PartitionedMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/PartitionedMessageParser.java
|
Apache-2.0
|
private String[] getFinalizedUptoPartitions(String topic) throws Exception {
final int numPartitions = mKafkaClient.getNumPartitions(topic);
List<Message> lastMessages = new ArrayList<Message>(numPartitions);
List<Message> committedMessages = new ArrayList<Message>(numPartitions);
for (int partition = 0; partition < numPartitions; ++partition) {
TopicPartition topicPartition = new TopicPartition(topic, partition);
Message lastMessage = mKafkaClient.getLastMessage(topicPartition);
Message committedMessage = mKafkaClient.getCommittedMessage(topicPartition);
if (lastMessage == null || committedMessage == null) {
// This will happen if no messages have been posted to the given topic partition.
LOG.error("For topic {} partition {}, lastMessage: {}, committed: {}",
topicPartition.getTopic(), topicPartition.getPartition(),
lastMessage, committedMessage);
continue;
}
lastMessages.add(lastMessage);
committedMessages.add(committedMessage);
}
return mMessageParser.getFinalizedUptoPartitions(lastMessages, committedMessages);
}
|
Partition finalizer writes _SUCCESS files to date partitions that very likely won't be receiving
any new messages. It also adds those partitions to Hive.
@author Pawel Garbacki (pawel@pinterest.com)
|
getFinalizedUptoPartitions
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/PartitionFinalizer.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/PartitionFinalizer.java
|
Apache-2.0
|
private void finalizePartitionsUpTo(String topic, String[] uptoPartitions) throws Exception {
String prefix = FileUtil.getPrefix(topic, mConfig);
LOG.info("Finalize up to (but not include) {}, dim: {}",
uptoPartitions, uptoPartitions.length);
String[] previous = mMessageParser.getPreviousPartitions(uptoPartitions);
Stack<String[]> toBeFinalized = new Stack<String[]>();
// Walk backwards to collect all partitions which are previous to the upTo partition
// Do not include the upTo partition
// Stop at the first partition which already have the SUCCESS file
for (int i = 0; i < mLookbackPeriods; i++) {
LOG.info("Looking for partition: " + Arrays.toString(previous));
LogFilePath logFilePath = new LogFilePath(prefix, topic, previous,
mConfig.getGeneration(), 0, 0, mFileExtension);
if (FileUtil.s3PathPrefixIsAltered(logFilePath.getLogFilePath(), mConfig)) {
logFilePath = logFilePath.withPrefix(FileUtil.getS3AlternativePrefix(mConfig));
}
String logFileDir = logFilePath.getLogFileDir();
if (FileUtil.exists(logFileDir)) {
String successFilePath = logFileDir + "/_SUCCESS";
if (FileUtil.exists(successFilePath)) {
LOG.info(
"SuccessFile exist already, short circuit return. " + successFilePath);
break;
}
LOG.info("Folder {} exists and ready to be finalized.", logFileDir);
toBeFinalized.push(previous);
} else {
LOG.info("Folder {} doesn't exist, skip", logFileDir);
}
previous = mMessageParser.getPreviousPartitions(previous);
}
LOG.info("To be finalized partitions: {}", toBeFinalized);
if (toBeFinalized.isEmpty()) {
LOG.warn("There is no partitions to be finalized.");
return;
}
// Now walk forward the collected partitions to do the finalization
// Note we are deliberately walking backwards and then forwards to make sure we don't
// end up in a situation that a later date partition is finalized and then the system
// crashes (which creates unfinalized partition folders in between)
while (!toBeFinalized.isEmpty()) {
String[] current = toBeFinalized.pop();
LOG.info("Finalizing partition: " + Arrays.toString(current));
// We only perform hive registration on the last dimension of the partition array
// i.e. only do hive registration for the hourly folder, but not for the daily
if (uptoPartitions.length == current.length) {
try {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < current.length; i++) {
String par = current[i];
// We expect the partition array in the form of key=value if
// they need to go through hive registration
String[] parts = par.split("=");
assert parts.length == 2 : "wrong partition format: " + par;
if (i > 0) {
sb.append(",");
}
sb.append(parts[0]);
sb.append("='");
sb.append(parts[1]);
sb.append("'");
}
LOG.info("Hive partition string: " + sb);
String hiveTableName = mConfig.getHiveTableName(topic);
LOG.info("Hive table name from config: {}", hiveTableName);
if (hiveTableName == null) {
String hivePrefix = null;
try {
hivePrefix = mConfig.getHivePrefix();
hiveTableName = hivePrefix + topic;
LOG.info("Hive table name from prefix: {}", hiveTableName);
} catch (RuntimeException ex) {
LOG.warn("HivePrefix is not defined. Skip hive registration");
}
}
if (hiveTableName != null && mConfig.getQuboleEnabled()) {
mQuboleClient.addPartition(hiveTableName, sb.toString());
}
} catch (Exception e) {
LOG.error("failed to finalize topic " + topic, e);
continue;
}
}
// Generate the SUCCESS file at the end
LogFilePath logFilePath = new LogFilePath(prefix, topic, current,
mConfig.getGeneration(), 0, 0, mFileExtension);
if (FileUtil.s3PathPrefixIsAltered(logFilePath.getLogFilePath(), mConfig)) {
logFilePath = logFilePath.withPrefix(FileUtil.getS3AlternativePrefix(mConfig));
LOG.info("Will finalize alternative s3 logFilePath {}", logFilePath);
}
String logFileDir = logFilePath.getLogFileDir();
String successFilePath = logFileDir + "/_SUCCESS";
LOG.info("touching file {}", successFilePath);
FileUtil.touch(successFilePath);
}
}
|
Partition finalizer writes _SUCCESS files to date partitions that very likely won't be receiving
any new messages. It also adds those partitions to Hive.
@author Pawel Garbacki (pawel@pinterest.com)
|
finalizePartitionsUpTo
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/PartitionFinalizer.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/PartitionFinalizer.java
|
Apache-2.0
|
public void finalizePartitions() throws Exception {
List<String> topics = mZookeeperConnector.getCommittedOffsetTopics();
for (String topic : topics) {
if (!topic.matches(mConfig.getKafkaTopicFilter())) {
LOG.info("skipping topic {}", topic);
} else {
LOG.info("finalizing topic {}", topic);
String[] partitions = getFinalizedUptoPartitions(topic);
LOG.info("finalized timestamp for topic {} is {}", topic , partitions);
if (partitions != null) {
finalizePartitionsUpTo(topic, partitions);
}
}
}
}
|
Partition finalizer writes _SUCCESS files to date partitions that very likely won't be receiving
any new messages. It also adds those partitions to Hive.
@author Pawel Garbacki (pawel@pinterest.com)
|
finalizePartitions
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/PartitionFinalizer.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/PartitionFinalizer.java
|
Apache-2.0
|
@Override
public long extractTimestampMillis(final Message message) throws IOException {
return extractTimestampMillis(message.getTopic(), message.getPayload());
}
|
Protocol buffer message timestamp extractor
If <code>secor.protobuf.message.class</code> is not set assumes that the very
first <code>uint64</code> field in a message is the timestamp. Otherwise,
uses <code>message.timestamp.name</code> as a path to get to the timestamp
field within protobuf message.
@author Liam Stewart (liam.stewart@gmail.com)
|
extractTimestampMillis
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/ProtobufMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/ProtobufMessageParser.java
|
Apache-2.0
|
public long extractTimestampMillis(String topic, final byte[] bytes) throws IOException {
if (timestampFieldPath != null) {
com.google.protobuf.Message decodedMessage = protobufUtil.decodeProtobufOrJsonMessage(topic,
bytes);
int i = 0;
for (; i < timestampFieldPath.length - 1; ++i) {
decodedMessage = (com.google.protobuf.Message) decodedMessage
.getField(decodedMessage.getDescriptorForType().findFieldByName(timestampFieldPath[i]));
}
Object timestampObject = decodedMessage
.getField(decodedMessage.getDescriptorForType().findFieldByName(timestampFieldPath[i]));
if (timestampObject instanceof com.google.protobuf.Timestamp){
return Timestamps.toMillis((com.google.protobuf.Timestamp) timestampObject);
}else {
return toMillis((Long) timestampObject);
}
} else {
// Assume that the timestamp field is the first field, is required,
// and is a uint64.
CodedInputStream input = CodedInputStream.newInstance(bytes);
// Don't really care about the tag, but need to read it to get, to
// the payload.
input.readTag();
return toMillis(input.readUInt64());
}
}
|
Protocol buffer message timestamp extractor
If <code>secor.protobuf.message.class</code> is not set assumes that the very
first <code>uint64</code> field in a message is the timestamp. Otherwise,
uses <code>message.timestamp.name</code> as a path to get to the timestamp
field within protobuf message.
@author Liam Stewart (liam.stewart@gmail.com)
|
extractTimestampMillis
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/ProtobufMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/ProtobufMessageParser.java
|
Apache-2.0
|
private Map makeRequest(URL url, String body) throws IOException {
HttpURLConnection connection = null;
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestProperty("X-AUTH-TOKEN", mApiToken);
connection.setRequestProperty("Content-Type", "application/json");
connection.setRequestProperty("Accepts", "application/json");
connection.setRequestProperty("Accept", "*/*");
if (body != null) {
connection.setRequestMethod("POST");
connection.setRequestProperty("Content-Length",
Integer.toString(body.getBytes().length));
}
connection.setUseCaches (false);
connection.setDoInput(true);
connection.setDoOutput(true);
if (body != null) {
// Send request.
DataOutputStream dataOutputStream = new DataOutputStream(
connection.getOutputStream());
dataOutputStream.writeBytes(body);
dataOutputStream.flush();
dataOutputStream.close();
}
// Get Response.
InputStream inputStream = connection.getInputStream();
BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream));
Object responseObj = JSONValue.parse(reader);
if (!(responseObj instanceof Map)) {
throw new RuntimeException("command " + url + " body " + body + " unexpected " +
responseObj);
}
Map response = (Map)responseObj;
if (response.get("status").equals("error")) {
throw new RuntimeException("command " + url + " with body " + body + " failed " +
JSONObject.toJSONString(response));
}
return response;
} catch (IOException exception) {
if (connection != null) {
connection.disconnect();
}
throw exception;
}
}
|
Qubole client encapsulates communication with a Qubole cluster.
@author Pawel Garbacki (pawel@pinterest.com)
|
makeRequest
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/QuboleClient.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/QuboleClient.java
|
Apache-2.0
|
private int query(String query) throws IOException {
URL url = new URL("https://api.qubole.com/api/v1.2/commands");
JSONObject queryJson = new JSONObject();
queryJson.put("query", query);
String body = queryJson.toString();
Map response = makeRequest(url, body);
return (Integer) response.get("id");
}
|
Qubole client encapsulates communication with a Qubole cluster.
@author Pawel Garbacki (pawel@pinterest.com)
|
query
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/QuboleClient.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/QuboleClient.java
|
Apache-2.0
|
private void waitForCompletion(int commandId, long timeout) throws IOException, InterruptedException {
URL url = new URL("https://api.qubole.com/api/v1.2/commands/" + commandId);
long endTime = System.currentTimeMillis() + timeout;
while (System.currentTimeMillis() < endTime) {
Map response = makeRequest(url, null);
if (response.get("status").equals("done")) {
return;
}
System.out.println("waiting 3 seconds for results of query " + commandId +
". Current status " + response.get("status"));
Thread.sleep(3000);
}
throw new IOException("Qubole commandId" + commandId + " failed to return within timeout.");
}
|
Qubole client encapsulates communication with a Qubole cluster.
@author Pawel Garbacki (pawel@pinterest.com)
|
waitForCompletion
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/QuboleClient.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/QuboleClient.java
|
Apache-2.0
|
public void addPartition(String table, String partition) throws IOException,
InterruptedException {
String queryStr = "ALTER TABLE " + table + " ADD IF NOT EXISTS PARTITION (" + partition +
")";
int commandId = query(queryStr);
waitForCompletion(commandId, MAX_QUBOLE_WAIT_TIME_MS);
}
|
Qubole client encapsulates communication with a Qubole cluster.
@author Pawel Garbacki (pawel@pinterest.com)
|
addPartition
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/QuboleClient.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/QuboleClient.java
|
Apache-2.0
|
@Override
public long extractTimestampMillis(final Message message) {
String line = new String(message.getPayload());
Matcher m = mTsPattern.matcher(line);
if (m.find()) {
String tsValue = m.group(1);
if (tsValue != null) {
return toMillis(Long.parseLong(tsValue));
}
}
throw new NumberFormatException("Cannot find timestamp field in: " + line);
}
|
RegexMessageParser extracts timestamp field (specified by 'message.timestamp.input.pattern')
The pattern specifies the regular exp to extract the timestamp field from a free-text line.
* @author Henry Cai (hcai@pinterest.com)
|
extractTimestampMillis
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/RegexMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/RegexMessageParser.java
|
Apache-2.0
|
@Override
public long extractTimestampMillis(Message message) throws Exception {
throw new UnsupportedOperationException("Unsupported, use extractPartitions method instead");
}
|
SplitByFieldMessageParser extracts event type field (specified by 'message.split.field.name')
and timestamp field (specified by 'message.timestamp.name')
from JSON data and splits data into multiple outputs by event type and then partitions each output by date.
Caution: this parser doesn't support finalization of partitions.
|
extractTimestampMillis
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/SplitByFieldMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/SplitByFieldMessageParser.java
|
Apache-2.0
|
@Override
public String[] extractPartitions(Message message) throws Exception {
JSONObject jsonObject = (JSONObject) JSONValue.parse(message.getPayload());
if (jsonObject == null) {
throw new RuntimeException("Failed to parse message as Json object");
}
String eventType = extractEventType(jsonObject);
long timestampMillis = extractTimestampMillis(jsonObject);
String[] timestampPartitions = generatePartitions(timestampMillis, mUsingHourly, mUsingMinutely);
return (String[]) ArrayUtils.addAll(new String[]{eventType}, timestampPartitions);
}
|
SplitByFieldMessageParser extracts event type field (specified by 'message.split.field.name')
and timestamp field (specified by 'message.timestamp.name')
from JSON data and splits data into multiple outputs by event type and then partitions each output by date.
Caution: this parser doesn't support finalization of partitions.
|
extractPartitions
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/SplitByFieldMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/SplitByFieldMessageParser.java
|
Apache-2.0
|
@Override
public String[] getFinalizedUptoPartitions(List<Message> lastMessages,
List<Message> committedMessages) throws Exception {
throw new UnsupportedOperationException("Partition finalization is not supported");
}
|
SplitByFieldMessageParser extracts event type field (specified by 'message.split.field.name')
and timestamp field (specified by 'message.timestamp.name')
from JSON data and splits data into multiple outputs by event type and then partitions each output by date.
Caution: this parser doesn't support finalization of partitions.
|
getFinalizedUptoPartitions
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/SplitByFieldMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/SplitByFieldMessageParser.java
|
Apache-2.0
|
@Override
public String[] getPreviousPartitions(String[] partitions) throws Exception {
throw new UnsupportedOperationException("Partition finalization is not supported");
}
|
SplitByFieldMessageParser extracts event type field (specified by 'message.split.field.name')
and timestamp field (specified by 'message.timestamp.name')
from JSON data and splits data into multiple outputs by event type and then partitions each output by date.
Caution: this parser doesn't support finalization of partitions.
|
getPreviousPartitions
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/SplitByFieldMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/SplitByFieldMessageParser.java
|
Apache-2.0
|
protected String extractEventType(JSONObject jsonObject) {
if (!jsonObject.containsKey(mSplitFieldName)) {
throw new RuntimeException("Could not find key " + mSplitFieldName + " in Json message");
}
return jsonObject.get(mSplitFieldName).toString();
}
|
SplitByFieldMessageParser extracts event type field (specified by 'message.split.field.name')
and timestamp field (specified by 'message.timestamp.name')
from JSON data and splits data into multiple outputs by event type and then partitions each output by date.
Caution: this parser doesn't support finalization of partitions.
|
extractEventType
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/SplitByFieldMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/SplitByFieldMessageParser.java
|
Apache-2.0
|
protected long extractTimestampMillis(JSONObject jsonObject) {
Object fieldValue = getJsonFieldValue(jsonObject);
if (fieldValue != null) {
return toMillis(Double.valueOf(fieldValue.toString()).longValue());
} else {
throw new RuntimeException("Failed to extract timestamp from the message");
}
}
|
SplitByFieldMessageParser extracts event type field (specified by 'message.split.field.name')
and timestamp field (specified by 'message.timestamp.name')
from JSON data and splits data into multiple outputs by event type and then partitions each output by date.
Caution: this parser doesn't support finalization of partitions.
|
extractTimestampMillis
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/SplitByFieldMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/SplitByFieldMessageParser.java
|
Apache-2.0
|
@Override
public short getThriftFieldId() {
return mFieldId;
}
|
Thrift message parser extracts date partitions from thrift messages.
@author Pawel Garbacki (pawel@pinterest.com)
|
getThriftFieldId
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/ThriftMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/ThriftMessageParser.java
|
Apache-2.0
|
@Override
public String getFieldName() {
return mFieldName;
}
|
Thrift message parser extracts date partitions from thrift messages.
@author Pawel Garbacki (pawel@pinterest.com)
|
getFieldName
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/ThriftMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/ThriftMessageParser.java
|
Apache-2.0
|
@Override
public long extractTimestampMillis(final Message message) throws TException {
long timestamp;
if ("i32".equals(mTimestampType)) {
timestamp = (long) mDeserializer.partialDeserializeI32(message.getPayload(), mThriftPath);
} else {
timestamp = mDeserializer.partialDeserializeI64(message.getPayload(), mThriftPath);
}
return toMillis(timestamp);
}
|
Thrift message parser extracts date partitions from thrift messages.
@author Pawel Garbacki (pawel@pinterest.com)
|
extractTimestampMillis
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/parser/ThriftMessageParser.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/parser/ThriftMessageParser.java
|
Apache-2.0
|
private void updateAccessTime(TopicPartition topicPartition) {
long now = System.currentTimeMillis() / 1000L;
mLastAccessTime.put(topicPartition, now);
Iterator iterator = mLastAccessTime.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry pair = (Map.Entry) iterator.next();
long lastAccessTime = (Long) pair.getValue();
if (now - lastAccessTime > mTopicPartitionForgetSeconds) {
iterator.remove();
}
}
}
|
Message reader consumer raw Kafka messages.
@author Pawel Garbacki (pawel@pinterest.com)
|
updateAccessTime
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/reader/MessageReader.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/reader/MessageReader.java
|
Apache-2.0
|
private void exportStats() {
StringBuffer topicPartitions = new StringBuffer();
for (TopicPartition topicPartition : mLastAccessTime.keySet()) {
if (topicPartitions.length() > 0) {
topicPartitions.append(' ');
}
topicPartitions.append(topicPartition.getTopic() + '/' +
topicPartition.getPartition());
}
StatsUtil.setLabel("secor.topic_partitions", topicPartitions.toString());
}
|
Message reader consumer raw Kafka messages.
@author Pawel Garbacki (pawel@pinterest.com)
|
exportStats
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/reader/MessageReader.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/reader/MessageReader.java
|
Apache-2.0
|
public boolean hasNext() {
return mKafkaMessageIterator.hasNext();
}
|
Message reader consumer raw Kafka messages.
@author Pawel Garbacki (pawel@pinterest.com)
|
hasNext
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/reader/MessageReader.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/reader/MessageReader.java
|
Apache-2.0
|
public Message read() {
assert hasNext();
mNMessages = (mNMessages + 1) % mCheckMessagesPerSecond;
if (mNMessages % mCheckMessagesPerSecond == 0) {
RateLimitUtil.acquire(mCheckMessagesPerSecond);
}
Message message = mKafkaMessageIterator.next();
if (message == null) {
return null;
}
TopicPartition topicPartition = new TopicPartition(message.getTopic(),
message.getKafkaPartition());
updateAccessTime(topicPartition);
// Skip already committed messages.
long committedOffsetCount = mOffsetTracker.getTrueCommittedOffsetCount(topicPartition);
LOG.debug("read message {}", message);
if (mNMessages % mCheckMessagesPerSecond == 0) {
exportStats();
}
if (message.getOffset() < committedOffsetCount) {
LOG.debug("skipping message {} because its offset precedes committed offset count {}",
message, committedOffsetCount);
return null;
}
return message;
}
|
Message reader consumer raw Kafka messages.
@author Pawel Garbacki (pawel@pinterest.com)
|
read
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/reader/MessageReader.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/reader/MessageReader.java
|
Apache-2.0
|
public void commit(TopicPartition topicPartition, long offset) {
mKafkaMessageIterator.commit(topicPartition, offset);
}
|
Message reader consumer raw Kafka messages.
@author Pawel Garbacki (pawel@pinterest.com)
|
commit
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/reader/MessageReader.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/reader/MessageReader.java
|
Apache-2.0
|
public long getCommitedOffsetCount(TopicPartition topicPartition) {
return mKafkaMessageIterator.getKafkaCommitedOffsetCount(topicPartition);
}
|
Message reader consumer raw Kafka messages.
@author Pawel Garbacki (pawel@pinterest.com)
|
getCommitedOffsetCount
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/reader/MessageReader.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/reader/MessageReader.java
|
Apache-2.0
|
public void deleteOldLogs() throws Exception {
if (mConfig.getLocalLogDeleteAgeHours() <= 0) {
return;
}
String[] consumerDirs = FileUtil.list(mConfig.getLocalPath());
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss z");
format.setTimeZone(mConfig.getTimeZone());
for (String consumerDir : consumerDirs) {
long modificationTime = FileUtil.getModificationTimeMsRecursive(consumerDir);
String modificationTimeStr = format.format(modificationTime);
LOG.info("Consumer log dir {} last modified at {}", consumerDir , modificationTimeStr);
final long localLogDeleteAgeMs =
mConfig.getLocalLogDeleteAgeHours() * 60L * 60L * 1000L;
if (System.currentTimeMillis() - modificationTime > localLogDeleteAgeMs) {
LOG.info("Deleting directory {} last modified at {}", consumerDir, modificationTimeStr);
FileUtil.delete(consumerDir);
}
}
}
|
Log file deleter removes message old log files stored locally.
@author Pawel Garbacki (pawel@pinterest.com)
|
deleteOldLogs
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/tools/LogFileDeleter.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/tools/LogFileDeleter.java
|
Apache-2.0
|
public void printFile(String path) throws Exception {
FileSystem fileSystem = FileUtil.getFileSystem(path);
Path fsPath = new Path(path);
SequenceFile.Reader reader = new SequenceFile.Reader(fileSystem, fsPath,
new Configuration());
LongWritable key = (LongWritable) reader.getKeyClass().newInstance();
BytesWritable value = (BytesWritable) reader.getValueClass().newInstance();
System.out.println("reading file " + path);
while (reader.next(key, value)) {
if (mPrintOffsetsOnly) {
System.out.println(Long.toString(key.get()));
} else {
byte[] nonPaddedBytes = new byte[value.getLength()];
System.arraycopy(value.getBytes(), 0, nonPaddedBytes, 0, value.getLength());
System.out.println(Long.toString(key.get()) + ": " + new String(nonPaddedBytes));
}
}
}
|
Log file printer displays the content of a log file.
@author Pawel Garbacki (pawel@pinterest.com)
|
printFile
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/tools/LogFilePrinter.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/tools/LogFilePrinter.java
|
Apache-2.0
|
private String getTopicPrefix() throws IOException {
return FileUtil.getPrefix(mTopic, mConfig) + "/" + mTopic;
}
|
Log file verifier checks the consistency of log files.
@author Pawel Garbacki (pawel@pinterest.com)
|
getTopicPrefix
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/tools/LogFileVerifier.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/tools/LogFileVerifier.java
|
Apache-2.0
|
private void populateTopicPartitionToOffsetToFiles() throws IOException {
String prefix = FileUtil.getPrefix(mTopic, mConfig);
String topicPrefix = getTopicPrefix();
String[] paths = FileUtil.listRecursively(topicPrefix);
for (String path : paths) {
if (!path.endsWith("/_SUCCESS")) {
LogFilePath logFilePath = new LogFilePath(prefix, path);
TopicPartition topicPartition = new TopicPartition(logFilePath.getTopic(),
logFilePath.getKafkaPartition());
SortedMap<Long, HashSet<LogFilePath>> offsetToFiles =
mTopicPartitionToOffsetToFiles.get(topicPartition);
if (offsetToFiles == null) {
offsetToFiles = new TreeMap<Long, HashSet<LogFilePath>>();
mTopicPartitionToOffsetToFiles.put(topicPartition, offsetToFiles);
}
long offset = logFilePath.getOffset();
HashSet<LogFilePath> logFilePaths = offsetToFiles.get(offset);
if (logFilePaths == null) {
logFilePaths = new HashSet<LogFilePath>();
offsetToFiles.put(offset, logFilePaths);
}
logFilePaths.add(logFilePath);
}
}
}
|
Log file verifier checks the consistency of log files.
@author Pawel Garbacki (pawel@pinterest.com)
|
populateTopicPartitionToOffsetToFiles
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/tools/LogFileVerifier.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/tools/LogFileVerifier.java
|
Apache-2.0
|
private void filterOffsets(long fromOffset, long toOffset) {
Iterator iterator = mTopicPartitionToOffsetToFiles.entrySet().iterator();
while (iterator.hasNext()) {
long firstOffset = -2;
long lastOffset = Long.MAX_VALUE;
Map.Entry entry = (Map.Entry) iterator.next();
SortedMap<Long, HashSet<LogFilePath>> offsetToFiles =
(SortedMap<Long, HashSet<LogFilePath>>) entry.getValue();
for (long offset : offsetToFiles.keySet()) {
if (offset <= fromOffset || firstOffset == -2) {
firstOffset = offset;
}
if (offset >= toOffset && toOffset == Long.MAX_VALUE) {
lastOffset = offset;
}
}
if (firstOffset != -2) {
TopicPartition topicPartition = (TopicPartition) entry.getKey();
offsetToFiles = offsetToFiles.subMap(firstOffset, lastOffset);
mTopicPartitionToOffsetToFiles.put(topicPartition, offsetToFiles);
}
}
}
|
Log file verifier checks the consistency of log files.
@author Pawel Garbacki (pawel@pinterest.com)
|
filterOffsets
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/tools/LogFileVerifier.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/tools/LogFileVerifier.java
|
Apache-2.0
|
private int getMessageCount(LogFilePath logFilePath) throws Exception {
FileReader reader = createFileReader(logFilePath);
int result = 0;
while (reader.next() != null) {
result++;
}
reader.close();
return result;
}
|
Log file verifier checks the consistency of log files.
@author Pawel Garbacki (pawel@pinterest.com)
|
getMessageCount
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/tools/LogFileVerifier.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/tools/LogFileVerifier.java
|
Apache-2.0
|
public void verifyCounts(long fromOffset, long toOffset, int numMessages) throws Exception {
populateTopicPartitionToOffsetToFiles();
filterOffsets(fromOffset, toOffset);
Iterator iterator = mTopicPartitionToOffsetToFiles.entrySet().iterator();
int aggregateMessageCount = 0;
while (iterator.hasNext()) {
long previousOffset = -2L;
long previousMessageCount = -2L;
Map.Entry entry = (Map.Entry) iterator.next();
SortedMap<Long, HashSet<LogFilePath>> offsetToFiles =
(SortedMap<Long, HashSet<LogFilePath>>) entry.getValue();
for (HashSet<LogFilePath> logFilePaths : offsetToFiles.values()) {
int messageCount = 0;
long offset = -2;
for (LogFilePath logFilePath : logFilePaths) {
assert offset == -2 || offset == logFilePath.getOffset():
Long.toString(offset) + " || " + offset + " == " + logFilePath.getOffset();
messageCount += getMessageCount(logFilePath);
offset = logFilePath.getOffset();
}
if (previousOffset != -2 && offset - previousOffset != previousMessageCount) {
TopicPartition topicPartition = (TopicPartition) entry.getKey();
throw new RuntimeException("Message count of " + previousMessageCount +
" in topic " + topicPartition.getTopic() +
" partition " + topicPartition.getPartition() +
" does not agree with adjacent offsets " +
previousOffset + " and " + offset);
}
previousOffset = offset;
previousMessageCount = messageCount;
aggregateMessageCount += messageCount;
}
}
if (numMessages != -1 && aggregateMessageCount != numMessages) {
throw new RuntimeException("Message count " + aggregateMessageCount +
" does not agree with the expected count " + numMessages);
}
}
|
Log file verifier checks the consistency of log files.
@author Pawel Garbacki (pawel@pinterest.com)
|
verifyCounts
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/tools/LogFileVerifier.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/tools/LogFileVerifier.java
|
Apache-2.0
|
private void getOffsets(LogFilePath logFilePath, Set<Long> offsets) throws Exception {
FileReader reader = createFileReader(logFilePath);
KeyValue record;
while ((record = reader.next()) != null) {
if (!offsets.add(record.getOffset())) {
throw new RuntimeException("duplicate key " + record.getOffset() + " found in file " +
logFilePath.getLogFilePath());
}
}
reader.close();
}
|
Log file verifier checks the consistency of log files.
@author Pawel Garbacki (pawel@pinterest.com)
|
getOffsets
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/tools/LogFileVerifier.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/tools/LogFileVerifier.java
|
Apache-2.0
|
public void verifySequences(long fromOffset, long toOffset) throws Exception {
populateTopicPartitionToOffsetToFiles();
filterOffsets(fromOffset, toOffset);
Iterator iterator = mTopicPartitionToOffsetToFiles.entrySet().iterator();
while (iterator.hasNext()) {
TreeSet<Long> offsets = new TreeSet<Long>();
Map.Entry entry = (Map.Entry) iterator.next();
TopicPartition topicPartition = (TopicPartition) entry.getKey();
SortedMap<Long, HashSet<LogFilePath>> offsetToFiles =
(SortedMap<Long, HashSet<LogFilePath>>) entry.getValue();
for (HashSet<LogFilePath> logFilePaths : offsetToFiles.values()) {
for (LogFilePath logFilePath : logFilePaths) {
getOffsets(logFilePath, offsets);
}
}
long lastOffset = -2;
for (Long offset : offsets) {
if (lastOffset != -2) {
assert lastOffset + 1 == offset: Long.toString(offset) + " + 1 == " + offset +
" for topic " + topicPartition.getTopic() + " partition " +
topicPartition.getPartition();
}
lastOffset = offset;
}
}
}
|
Log file verifier checks the consistency of log files.
@author Pawel Garbacki (pawel@pinterest.com)
|
verifySequences
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/tools/LogFileVerifier.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/tools/LogFileVerifier.java
|
Apache-2.0
|
private FileReader createFileReader(LogFilePath logFilePath) throws Exception {
CompressionCodec codec = null;
if (mConfig.getCompressionCodec() != null && !mConfig.getCompressionCodec().isEmpty()) {
codec = CompressionUtil.createCompressionCodec(mConfig.getCompressionCodec());
}
FileReader fileReader = ReflectionUtil.createFileReader(
mConfig.getFileReaderWriterFactory(),
logFilePath,
codec,
mConfig
);
return fileReader;
}
|
Helper to create a file reader writer from config
@param logFilePath
@return
@throws Exception
|
createFileReader
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/tools/LogFileVerifier.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/tools/LogFileVerifier.java
|
Apache-2.0
|
private void makeRequest(String body) throws IOException {
URL url = new URL("http://" + mConfig.getTsdbHostport() + "/api/put?details");
HttpURLConnection connection = null;
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestProperty("Content-Type", "application/json");
connection.setRequestProperty("Accepts", "application/json");
connection.setRequestProperty("Accept", "*/*");
if (body != null) {
connection.setRequestMethod("POST");
connection.setRequestProperty("Content-Length",
Integer.toString(body.getBytes().length));
}
connection.setUseCaches (false);
connection.setDoInput(true);
connection.setDoOutput(true);
if (body != null) {
// Send request.
DataOutputStream dataOutputStream = new DataOutputStream(
connection.getOutputStream());
dataOutputStream.writeBytes(body);
dataOutputStream.flush();
dataOutputStream.close();
}
// Get Response.
InputStream inputStream = connection.getInputStream();
BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream));
Map response = (Map) JSONValue.parse(reader);
if (!response.get("failed").equals(0)) {
throw new RuntimeException("url " + url + " with body " + body + " failed " +
JSONObject.toJSONString(response));
}
} catch (IOException exception) {
if (connection != null) {
connection.disconnect();
}
throw exception;
}
}
|
Progress monitor exports offset lags per topic partition.
@author Pawel Garbacki (pawel@pinterest.com)
|
makeRequest
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/tools/ProgressMonitor.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/tools/ProgressMonitor.java
|
Apache-2.0
|
private void exportToTsdb(Stat stat)
throws IOException {
LOG.info("exporting metric to tsdb {}", stat);
makeRequest(stat.toString());
}
|
Progress monitor exports offset lags per topic partition.
@author Pawel Garbacki (pawel@pinterest.com)
|
exportToTsdb
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/tools/ProgressMonitor.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/tools/ProgressMonitor.java
|
Apache-2.0
|
public void exportStats() throws Exception {
List<Stat> stats = getStats();
LOG.info("Stats: {}", JSONArray.toJSONString(stats));
// if there is a valid openTSDB port configured export to openTSDB
if (mConfig.getTsdbHostport() != null && !mConfig.getTsdbHostport().isEmpty()) {
for (Stat stat : stats) {
exportToTsdb(stat);
}
}
// if there is a valid statsD port configured export to statsD
if (mStatsDClient != null) {
exportToStatsD(stats);
}
}
|
Progress monitor exports offset lags per topic partition.
@author Pawel Garbacki (pawel@pinterest.com)
|
exportStats
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/tools/ProgressMonitor.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/tools/ProgressMonitor.java
|
Apache-2.0
|
private void exportToStatsD(List<Stat> stats) {
// group stats by kafka group
for (Stat stat : stats) {
@SuppressWarnings("unchecked")
Map<String, String> tags = (Map<String, String>) stat.get(Stat.STAT_KEYS.TAGS.getName());
long value = Long.parseLong((String) stat.get(Stat.STAT_KEYS.VALUE.getName()));
if (mConfig.getStatsdDogstatdsTagsEnabled()) {
String metricName = (String) stat.get(Stat.STAT_KEYS.METRIC.getName());
String[] tagArray = new String[tags.size()];
int i = 0;
for (Map.Entry<String, String> e : tags.entrySet()) {
tagArray[i++] = e.getKey() + ':' + e.getValue();
}
mStatsDClient.recordGaugeValue(metricName, value, tagArray);
} else {
StringBuilder builder = new StringBuilder();
if (mConfig.getStatsDPrefixWithConsumerGroup()) {
builder.append(tags.get(Stat.STAT_KEYS.GROUP.getName()))
.append(PERIOD);
}
String metricName = builder
.append((String) stat.get(Stat.STAT_KEYS.METRIC.getName()))
.append(PERIOD)
.append(tags.get(Stat.STAT_KEYS.TOPIC.getName()))
.append(PERIOD)
.append(tags.get(Stat.STAT_KEYS.PARTITION.getName()))
.toString();
mStatsDClient.recordGaugeValue(metricName, value);
}
}
}
|
Helper to publish stats to statsD client
|
exportToStatsD
|
java
|
pinterest/secor
|
src/main/java/com/pinterest/secor/tools/ProgressMonitor.java
|
https://github.com/pinterest/secor/blob/master/src/main/java/com/pinterest/secor/tools/ProgressMonitor.java
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.