package net.wicp.tams.common.others.kafka;

import java.lang.reflect.ParameterizedType;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;

import org.apache.commons.collections.CollectionUtils;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;

import lombok.extern.slf4j.Slf4j;
import net.wicp.tams.common.Conf;
import net.wicp.tams.common.Result;
import net.wicp.tams.common.apiext.ReflectAssist;
import net.wicp.tams.common.others.constant.SeekPosition;

@Slf4j
public abstract class KafkaConsumerGroup<T> {
	private List<KafkaConsumerGroupThread> consumerThreadList = new ArrayList<KafkaConsumerGroupThread>();
	private String groupId;
	private String topic;
	private IConsumer<T> doConsumer;
	private int batchNum = Integer.parseInt(Conf.get("common.others.kafka.consumer.batch.num"));
	private long timeout = Long.parseLong(Conf.get("common.others.kafka.consumer.batch.timeout"));

	public KafkaConsumerGroup(String groupId, String topic, IConsumer<T> doConsumer, int hosts) {
		this.groupId = groupId;
		this.topic = topic;
		this.doConsumer = doConsumer;
		@SuppressWarnings("unchecked")
		Class<T> classz = ReflectAssist.getSuperClassGenricType(this.getClass());
		KafkaProducer<String, T> kafkaProducer = KafkaAssitInst.getInst().getKafkaProducer(classz);
		List<PartitionInfo> partitions = kafkaProducer.partitionsFor(topic);
		log.info("topic======{},partitions size====={}", topic, partitions.size());
		int consumerNum = partitions.size() / hosts + (partitions.size() % hosts > 0 ? 1 : 0);
		log.info("consumerNum====={}", consumerNum);
		for (int i = 0; i < consumerNum; i++) {
			KafkaConsumerGroupThread consumerThread = new KafkaConsumerGroupThread();
			consumerThreadList.add(consumerThread);
		}
	}

	public KafkaConsumerGroup(String topic, IConsumer<T> doConsumer, int hosts) {
		this(Conf.get("common.others.kafka.consumer.group.id"), topic, doConsumer, hosts);
	}

	public void seekPotion(SeekPosition seekPosition, Long position) {
		for (KafkaConsumerGroupThread kafkaConsumer : consumerThreadList) {
			kafkaConsumer.seekPotion(seekPosition, position);
		}
	}

	@SuppressWarnings("unchecked")
	public Class<T> getTClass() {
		Class<T> tClass = (Class<T>) ((ParameterizedType) this.getClass().getGenericSuperclass())
				.getActualTypeArguments()[0];
		return tClass;
	}

	public void start() {
		for (KafkaConsumerGroupThread item : consumerThreadList) {
			Thread thread = new Thread(item);
			thread.start();
		}
	}

	private class KafkaConsumerGroupThread implements Runnable {
		private KafkaConsumer<String, T> kafkaConsumer;

		public KafkaConsumerGroupThread(SeekPosition seekPosition, Long position) {
			Properties props = KafkaTools.getProps(false);
			props.put("group.id", groupId);
			props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
			props.put("value.deserializer", KafkaTools.getValueProp(getTClass(), false));
			props.put("max.poll.records", batchNum + 1);
			log.info("kafka consumer 参数:");
			for (Object propele : props.keySet()) {
				log.info("{}:{}", propele, props.get(propele));
			}
			this.kafkaConsumer = new KafkaConsumer<String, T>(props);
			this.kafkaConsumer.subscribe(Arrays.asList(topic));
			seekPotion(seekPosition, position);
		}

		public void seekPotion(SeekPosition seekPosition, Long position) {
			List<PartitionInfo> partitions = kafkaConsumer.partitionsFor(topic);
			if (CollectionUtils.isNotEmpty(partitions)) {
				if (seekPosition != SeekPosition.no) {
					List<TopicPartition> ptlist = new ArrayList<>();
					for (PartitionInfo partitionInfo : partitions) {
						ptlist.add(new TopicPartition(topic, partitionInfo.partition()));
					}
					kafkaConsumer.poll(0);
					switch (seekPosition) {
					case begin:
						kafkaConsumer.seekToBeginning(ptlist);
						break;
					case end:
						kafkaConsumer.seekToEnd(ptlist);
						break;
					case user:
						if (position != null && position > 0) {
							for (TopicPartition topicPartition : ptlist) {
								kafkaConsumer.seek(topicPartition, position);
							}
						}
						break;
					default:
						break;
					}
				}
			}
		}

		public KafkaConsumerGroupThread() {
			this(SeekPosition.no, null);
		}

		@Override
		public void run() {
			List<ConsumerRecord<String, T>> buffer = new ArrayList<>();
			long maxTime = timeout * 3;
			long startTime = System.currentTimeMillis();
			while (true) {
				ConsumerRecords<String, T> consumerRecords = kafkaConsumer.poll(timeout);
				for (ConsumerRecord<String, T> consumerRecord : consumerRecords) {
					buffer.add(consumerRecord);
				}
				long time2 = System.currentTimeMillis();
				// log.info("per num:{},use time:{},timeout:{}", consumerRecords.count(), time2
				// - startTime, timeout);
				if (buffer.size() >= batchNum || (time2 - startTime > maxTime && buffer.size() > 0)) {
					Result doWithRecord = null;
					try {
						doWithRecord = doConsumer.doWithRecords(buffer);
					} catch (Throwable e) {
						log.error("业务处理失败", e);
						doWithRecord = Result.getError(e.getMessage());
					} finally {
						KafkaTools.errorlog(consumerRecords, doWithRecord, log);
					}
					if (doWithRecord.isSuc()) {
						try {
							kafkaConsumer.commitSync();
						} catch (Throwable e) {
							log.error("commit error", e);
						}
					}
					log.info("from kafka server,the time:{},records:{}", time2 - startTime, buffer.size());
					buffer.clear();
					startTime = System.currentTimeMillis();
				}
			}
		}
	}
}
