package org.eking.bigdata.kafka;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.AuthorizationException;
//import org.apache.kafka.common.errors.OutOfOrderSequenceException;
//import org.apache.kafka.common.errors.ProducerFencedException;
import org.apache.kafka.common.serialization.StringSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import kafka.producer.ProducerClosedException;

import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Properties;

public class KafkaTest {
	static final Logger logger = LoggerFactory.getLogger(KafkaTest.class);

	public static void main(String[] args) {
		// TODO Auto-generated method stub
		 TestProduct();
//		TranProduct();
//		testConsumer();
//		TestOfConsumer();
		System.out.println("run complete ");
		System.exit(0);

	}

	static void TestProduct() {
		Properties props = new Properties();
		props.put("bootstrap.servers", "10.70.94.93:9092");
		props.put("client.id", "lele-test");
		// props.put("broker.list", "10.71.200.109:9092");
		// props.put("host.name", "10.71.200.109:9092");
		props.put("acks", "1");
		// props.put("retries", 0);
		// props.put("batch.size", 16384);
		// props.put("linger.ms", 1);
		// props.put("buffer.memory", 33554432);
		props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		System.out.println("send begin 12");
		// ProducerConfig config = new ProducerConfig(props);

		Producer<String, String> producer = new KafkaProducer<>(props);
		
		String SendTopic = "kafkatst2";
		System.out.println("send :" + SendTopic);
		for (int i = 0; i < 5; i++)
			producer.send(new ProducerRecord<String, String>(SendTopic, "counts", "11111 222 3333"));

		producer.flush();
		System.out.println("send complete ");
		producer.close();
	}

	static void TranProduct() {
		Properties props = new Properties();
		props.put("bootstrap.servers", "10.71.200.109:9092");
		props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		props.put("transactional.id", "100010101010");
		// props.put("transactional.id", "100010101010");
		KafkaProducer<String, String> producer = new KafkaProducer<>(props, new StringSerializer(),
				new StringSerializer());

//		producer.initTransactions();

		try {
//			producer.beginTransaction();
			for (int i = 1; i < 10; i++)
				producer.send(new ProducerRecord<>("streams", "counts", "aaaaaa" + Integer.toString(i)));
//			producer.commitTransaction();
		} catch (Exception e) {
			// We can't recover from these exceptions, so our only option is to
			// close the producer and exit.
			//e.printStackTrace();
			System.out.println("abortTransaction begin ");
			producer.close();
		}
//		} catch (KafkaException e) {
//			// For all other exceptions, just abort the transaction and try
//			// again.
//			e.printStackTrace();
//			producer.abortTransaction();
//			System.out.println("abortTransaction begin ");
//			// producer.close();
//		}
		System.out.println("close begin ");
		producer.close();
		System.out.println("send complete ");
	}

	static void testConsumer() {
		Properties props = new Properties();
		props.put("bootstrap.servers", "10.70.94.93:9092");
		props.put("group.id", "test1");
//		props.put("from-beginning", "true");
		props.put("enable.auto.commit", "true");
		props.put("from.beginning", "true");
		props.put("auto.commit.interval.ms", "1000");
		props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
		consumer.subscribe(Arrays.asList("test", "kafkatst"));
		while (true) {
			ConsumerRecords<String, String> records = consumer.poll(100);
			for (ConsumerRecord<String, String> record : records)
				System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
		}
	}

	static void TestOfConsumer() {
		Properties props = new Properties();
		props.put("bootstrap.servers", "10.71.200.109:9092");
		props.put("group.id", "test1");
		props.put("enable.auto.commit", "true");
		props.put("auto.commit.interval.ms", "1000");
		props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		
		KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
		consumer.subscribe(Arrays.asList("test", "bar"));
		try {
			while (true) {
				ConsumerRecords<String, String> records = consumer.poll(Long.MAX_VALUE);
				for (TopicPartition partition : records.partitions()) {
					List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
					for (ConsumerRecord<String, String> record : partitionRecords) {
						System.out.println(record.offset() + ": " + record.value());
					}
					long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
					consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1)));
				}
			}
		} finally {
			consumer.close();
		}
	}

}