package net.wicp.tams.common.others.kafka;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;

import org.apache.commons.lang3.ArrayUtils;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;

import net.wicp.tams.common.Conf;
import net.wicp.tams.common.Result;
import net.wicp.tams.common.apiext.StringUtil;

public abstract class KafkaTools {
	private static Properties propProducer = null;// 幂等的，再做一次也无防,不做双重检查了
	private static Properties propConsumer = null;

	public static Properties getProps(boolean isProducer) {
		if ((isProducer && propProducer == null) || (!isProducer && propConsumer == null)) {
			synchronized (new Object()) {
				Properties temp = new Properties();
				Map<String, String> nameToType = new HashMap<>();
				nameToType.put("retries", "integer");
				nameToType.put("max.block.ms", "long");
				nameToType.put("max.in.flight.requests.per.connection", "integer");
				nameToType.put("linger.ms", "integer");
				nameToType.put("batch.size", "integer");
				nameToType.put("buffer.memory", "integer");
				nameToType.put("max.request.size", "integer");
				nameToType.put("receive.buffer.bytes", "integer");
				nameToType.put("request.timeout.ms", "integer");
				nameToType.put("send.buffer.bytes", "integer");											
				nameToType.put("connections.max.idle.ms", "integer");
				//consumer				
				nameToType.put("batch.timeout", "integer");
				nameToType.put("auto.commit.interval.ms", "integer");
				nameToType.put("session.timeout.ms", "integer");
				nameToType.put("heartbeat.interval.ms", "integer");
								
				Map<String, String> paramsMap = Conf
						.getPre("common.others.kafka." + (isProducer ? "producer." : "consumer."), true);
				paramsMap.putAll(Conf.getPre("common.others.kafka.common.", true));
				for (String key : paramsMap.keySet()) {
					if (StringUtil.isNotNull(paramsMap.get(key))) {
						if (nameToType.containsKey(key)) {
							if ("integer".equals(nameToType.get(key))) {
								temp.put(key, Integer.parseInt(paramsMap.get(key)));
							} else if ("long".equals(nameToType.get(key))) {
								temp.put(key, Long.parseLong(paramsMap.get(key).replace("l", "").replace("L", "")));
							}
						} else {
							temp.put(key, paramsMap.get(key));
						}
					}
				}
				if (isProducer)
					propProducer = temp;
				else
					propConsumer = temp;
			}
		}
		return isProducer ? (Properties) propProducer.clone() : (Properties) propConsumer.clone();
	}

	public static String getValueProp(Class<?> classz, boolean isProducer) {
		if ("java.lang.String".equals(classz.getName())) {
			return isProducer ? "org.apache.kafka.common.serialization.StringSerializer"
					: "org.apache.kafka.common.serialization.StringDeserializer";
		} else if ("[B".equals(classz.getName())) {
			return isProducer ? "org.apache.kafka.common.serialization.ByteArraySerializer"
					: "org.apache.kafka.common.serialization.ByteArrayDeserializer";
		} else {
			throw new IllegalArgumentException("不支持的类型");
		}
	}

	public static <T> void errorlog(ConsumerRecords<String, T> consumerRecords, Result doWithRecord, Logger log) {
		if (!doWithRecord.isSuc()) {
			int i = 0;
			for (ConsumerRecord<String, T> item : consumerRecords) {
				if (ArrayUtils.isEmpty(doWithRecord.retObjs())) {
					log.error("处理出错了,打印此批次: topic:[{}],partition:[{}],offset:[{}]", item.topic(), item.partition(),
							item.offset());
				} else if (doWithRecord.retObjs().length > i && doWithRecord.retObjs()[i] != null) {
					log.error("处理出错了,出错记录: topic:[{}],partition:[{}],offset:[{}] ，出错原因【{}】", item.topic(),
							item.partition(), item.offset(), doWithRecord.retObjs()[i]);
				}
				i++;
			}
		}
	}

	public static List<TopicPartition> getTopicPartition(String topic) {
		KafkaProducer<String, byte[]> kafkaProducer = KafkaAssitInst.getInst().getKafkaProducer(byte[].class);
		List<PartitionInfo> partitionsFor = kafkaProducer.partitionsFor(topic);
		List<TopicPartition> retlist = new ArrayList<>();
		for (PartitionInfo partitionInfo : partitionsFor) {
			TopicPartition pation = new TopicPartition(topic, partitionInfo.partition());
			retlist.add(pation);
		}
		return retlist;
	}
}
