package com.johnguo.beam_test.read;

import java.util.ArrayList;
import java.util.List;

import org.apache.beam.sdk.Pipeline;
import org.apache.beam.sdk.io.kafka.KafkaIO;
import org.apache.beam.sdk.options.PipelineOptions;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.transforms.DoFn;
import org.apache.beam.sdk.transforms.ParDo;
import org.apache.beam.sdk.values.KV;
import org.apache.kafka.common.serialization.IntegerDeserializer;
import org.apache.kafka.common.serialization.StringDeserializer;

import com.johnguo.beam_test.read.consumer.ConsumerFactoryFn;

public class KafkaDemoBeam {
	public static void main(String... args) {
		PipelineOptions options = PipelineOptionsFactory.fromArgs(args).withValidation().create();
		// System.out.println(options);
		Pipeline p = Pipeline.create(options);
		List<String> topics = new ArrayList<String>();
		topics.add("test");
		KafkaIO.Read<Integer, String> reader = KafkaIO.<Integer, String>read().withBootstrapServers("flink-master:9092")
				.withTopics(topics).withConsumerFactoryFn(new ConsumerFactoryFn()) // 20
				// .withConsumerFactoryFn(new ConsumerFactoryFn(topics, 10, 20,
				// OffsetResetStrategy.EARLIEST)) // 20
				// partitions
				.withKeyDeserializer(IntegerDeserializer.class).withValueDeserializer(StringDeserializer.class);
		// .withMaxNumRecords(1);
		p.apply(reader.withoutMetadata()).apply(ParDo.of(new DoFn<KV<Integer, String>, String>() {
			private static final long serialVersionUID = 1L;

			@ProcessElement
			public void processElement(ProcessContext c) {
				System.out.println(c.element());
			}
		}));
		p.run().waitUntilFinish();
	}
}

// class ConsumerFactoryFn implements SerializableFunction<Map<String, Object>,
// Consumer<byte[], byte[]>> {
// private final List<String> topics;
// private final int partitionsPerTopic;
// private final int numElements;
// private final OffsetResetStrategy offsetResetStrategy;
//
// public ConsumerFactoryFn(List<String> topics, int partitionsPerTopic, int
// numElements,
// OffsetResetStrategy offsetResetStrategy) {
// this.topics = topics;
// this.partitionsPerTopic = partitionsPerTopic;
// this.numElements = numElements;
// this.offsetResetStrategy = offsetResetStrategy;
// }
//
// @Override
// public Consumer<byte[], byte[]> apply(Map<String, Object> config) {
// return mkMockConsumer(topics, partitionsPerTopic, numElements,
// offsetResetStrategy);
// }
//
// private static MockConsumer<byte[], byte[]> mkMockConsumer(List<String>
// topics, int partitionsPerTopic,
// int numElements, OffsetResetStrategy offsetResetStrategy) {
//
// final List<TopicPartition> partitions = new ArrayList<>();
// final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> records = new
// HashMap<>();
// Map<String, List<PartitionInfo>> partitionMap = new HashMap<>();
//
// for (String topic : topics) {
// List<PartitionInfo> partIds = new ArrayList<>(partitionsPerTopic);
// for (int i = 0; i < partitionsPerTopic; i++) {
// TopicPartition tp = new TopicPartition(topic, i);
// partitions.add(tp);
// partIds.add(new PartitionInfo(topic, i, null, null, null));
// records.put(tp, new ArrayList<ConsumerRecord<byte[], byte[]>>());
// }
// partitionMap.put(topic, partIds);
// }
//
// int numPartitions = partitions.size();
// long[] offsets = new long[numPartitions];
//
// for (int i = 0; i < numElements; i++) {
// int pIdx = i % numPartitions;
// TopicPartition tp = partitions.get(pIdx);
//
// records.get(tp)
// .add(new ConsumerRecord<>(tp.topic(), tp.partition(), offsets[pIdx]++,
// ByteBuffer.wrap(new byte[4]).putInt(i).array(), // key
// // is
// // 4
// // byte
// // record
// // id
// ByteBuffer.wrap(new byte[8]).putLong(i).array())); // value
// // is
// // 8
// // byte
// // record
// // id
// }
//
// // This is updated when reader assigns partitions.
// final AtomicReference<List<TopicPartition>> assignedPartitions = new
// AtomicReference<>(
// Collections.<TopicPartition>emptyList());
//
// final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[],
// byte[]>(offsetResetStrategy) {
// // override assign() in order to set offset limits & to save
// // assigned partitions.
// // remove keyword '@Override' here, it can work with Kafka client
// // 0.9 and 0.10 as:
// // 1. SpEL can find this function, either input is List or
// // Collection;
// // 2. List extends Collection, so super.assign() could find either
// // assign(List)
// // or assign(Collection).
// public void assign(final List<TopicPartition> assigned) {
// super.assign(assigned);
// assignedPartitions.set(ImmutableList.copyOf(assigned));
// for (TopicPartition tp : assigned) {
// updateBeginningOffsets(ImmutableMap.of(tp, 0L));
// updateEndOffsets(ImmutableMap.of(tp, (long) records.get(tp).size()));
// }
// }
// };
//
// for (String topic : topics) {
// consumer.updatePartitions(topic, partitionMap.get(topic));
// }
//
// // MockConsumer does not maintain any relationship between partition
// // seek position and the
// // records added. e.g. if we add 10 records to a partition and then seek
// // to end of the
// // partition, MockConsumer is still going to return the 10 records in
// // next poll. It is
// // our responsibility to make sure currently enqueued records sync with
// // partition offsets.
// // The following task will be called inside each invocation to
// // MockConsumer.poll().
// // We enqueue only the records with the offset >= partition's current
// // position.
// Runnable recordEnqueueTask = new Runnable() {
// @Override
// public void run() {
// // add all the records with offset >= current partition
// // position.
// for (TopicPartition tp : assignedPartitions.get()) {
// long curPos = consumer.position(tp);
// for (ConsumerRecord<byte[], byte[]> r : records.get(tp)) {
// if (r.offset() >= curPos) {
// consumer.addRecord(r);
// }
// }
// }
// consumer.schedulePollTask(this);
// }
// };
//
// consumer.schedulePollTask(recordEnqueueTask);
// return consumer;
// }
// }