package dacp.etl.kafka.hdfs.tools;

import java.util.Arrays;
import java.util.Properties;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord; 

public class SplitSomeToTestTopic {
	public static String bootServers = "hbbdc-codis-01:6667,hbbdc-codis-02:6667,hbbdc-codis-03:6667,hbbdc-codis-04:6667,hbbdc-codis-05:6667,hbbdc-codis-06:6667,hbbdc-codis-07:6667";

	public static void main(String[] args) throws InterruptedException {
		if(args.length < 2 ){
			System.err.println("param error");
			System.exit(-1);
		}
		String fromTopicName = args[0];
		String toTopicName = args[1];
		
		long max = 10000000L;
		if(args.length >= 3){
			max = Long.parseLong(args[2]);
		}
		if(args.length >= 4){
			bootServers = args[3];
		}

		KafkaProducer<String, String> producer = getProducer(toTopicName);
		KafkaConsumer<String, String> consumer = getConsumer(fromTopicName);
		
		System.out.println("from  : " +fromTopicName);
		System.out.println("to    : " + toTopicName);
		System.out.println("size  : " +max);
		System.out.println("server: " +bootServers);
		
		System.out.println("begin ...");
		long cnt = 0L;
		long begin = System.currentTimeMillis();
		while (true) { 
			
			//consumer.seekToBeginning(consumer.assignment());
			
			ConsumerRecords<String, String> records = consumer.poll(100);
			System.out.println("records.size=" + records.count() + " : " + cnt);
			for (ConsumerRecord<String, String> record : records) {
				cnt++;
				if(cnt > max){
					break;
				}
				producer.send(new ProducerRecord<String, String>(toTopicName, Long.toString(cnt), record.value()));
			}
			if(cnt > max){
				consumer.close();
				producer.close();
				break;
			}
			if(records.count() == 0) {
				Thread.sleep(5000);
			}
		}
		long end = System.currentTimeMillis();
		System.out.println("Times: " + (end-begin) + " ms");
	}

	public static KafkaConsumer<String, String> getConsumer(String topicName) {

		Properties props = new Properties();
		props.put("bootstrap.servers", bootServers);
		props.put("group.id", "test-2");
		props.put("enable.auto.commit", "true");
		props.put("auto.commit.interval.ms", "1000");
		props.put("auto.offset.reset", "latest");
		props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

		// props.put("key.deserializer",
		// "org.apache.kafka.common.serialization.ByteArrayDeserializer");
		// props.put("value.deserializer",
		// "org.apache.kafka.common.serialization.ByteArrayDeserializer");

		KafkaConsumer<String, String> consumer = new KafkaConsumer
		         <String, String>(props);
		//consumer.subscribe(Arrays.asList(topicName));
	    consumer.subscribe(Arrays.asList(topicName));

		return consumer;
	}

	public static KafkaProducer<String, String> getProducer(String topicName) {

		// create instance for properties to access producer configs
		Properties props = new Properties();
		// Assign localhost id
		props.put("bootstrap.servers", bootServers);

		// Set acknowledgements for producer requests.
		props.put("acks", "all");

		// If the request fails, the producer can automatically retry,
		props.put("retries", 0);

		// Specify buffer size in config
		props.put("batch.size", 16384);

		// Reduce the no of requests less than 0
		props.put("linger.ms", 1);

		// The buffer.memory controls the total amount of memory available to
		// the producer for buffering.
		props.put("buffer.memory", 33554432);
		props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		KafkaProducer<String, String> producer = new KafkaProducer<String, String>(props);

		return producer;

	}

}
