package com.tanjor.kafka.producer;

import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.ExecutionException;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.util.concurrent.ListenableFuture;

import com.alibaba.fastjson.JSON;
import com.tanjor.kafka.constant.KafkaMesConstant;

/**
 * kafkaProducer模板
 * 	使用此模板发送消息
 * @author wangb
 *
 */
public class KafkaServiceImpl implements KafkaService{

	@Autowired
	private KafkaTemplate<String, String> kafkaTemplate;
	
	/**
	 * kafka发送消息模板
	 * @param topic 主题
	 * @param value	messageValue
	 */
	@Override
	public Map<String, Object> sendMsg(String topic, Object value) {
		// TODO Auto-generated method stub
		ListenableFuture<SendResult<String, String>> result = kafkaTemplate.send(topic, value.toString());
		Map<String,Object> res = checkProRecord(result);
		return res;
	}

	/**
	 * kafka发送消息模板
	 * @param topic 主题
	 * @param value	messageValue
	 * @param partitionNum 分区数 如果是否使用分区为0,分区数必须大于0
	 */
	@Override
	public Map<String, Object> sendMsg(String topic, Object value, Integer partitionNum) {
		// TODO Auto-generated method stub
		return this.sendMsg(topic, value,partitionNum, "tanjor");
	}	

	/**
	 * kafka发送消息模板
	 * @param topic 主题
	 * @param value	messageValue
	 * @param partitionNum 分区数 如果是否使用分区为0,分区数必须大于0
	 * @param key : bbc app erp...
	 */
	public Map<String,Object> sendMsg(String topic, Object value,Integer partitionNum, String key){
		String keyStr = key+"-"+value.hashCode();
		String valueString = JSON.toJSONString(value);
					
		int partitionIndex = getPartitionIndex(key, partitionNum);
		ListenableFuture<SendResult<String, String>> result = kafkaTemplate.send(topic, partitionIndex, keyStr, valueString);
		Map<String,Object> res = checkProRecord(result);
		return res;		
	}

	/**
	 * 根据key值获取分区索引
	 * @param key
	 * @param partitionNum
	 * @return
	 */
	private int getPartitionIndex(String key, int partitionNum){
		if (key == null) {
			Random random = new Random();
			return random.nextInt(partitionNum);
		}
		else {
			int result = Math.abs(key.hashCode())%partitionNum;
			return result;
		}
	}
	
	/**
	 * 检查发送返回结果record
	 * @param res
	 * @return
	 */
	@SuppressWarnings("rawtypes")
	private Map<String,Object> checkProRecord(ListenableFuture<SendResult<String, String>> res){
		Map<String,Object> m = new HashMap<String,Object>();
		if(res!=null){
			try {
				SendResult r = res.get();//检查result结果集
				/*检查recordMetadata的offset数据，不检查producerRecord*/
				Long offsetIndex = r.getRecordMetadata().offset();
				if(offsetIndex!=null && offsetIndex>=0){
					m.put("code", KafkaMesConstant.SUCCESS_CODE);
					m.put("message", KafkaMesConstant.SUCCESS_MESSAGE);
					return m;
				}else{
					m.put("code", KafkaMesConstant.KAFKA_NO_OFFSET_CODE);
					m.put("message", KafkaMesConstant.KAFKA_NO_OFFSET_MES);
					return m;
				}
			} catch (InterruptedException e) {
				e.printStackTrace();
				m.put("code", KafkaMesConstant.KAFKA_SEND_ERROR_CODE);
				m.put("message", KafkaMesConstant.KAFKA_SEND_ERROR_MES);
				return m;
			} catch (ExecutionException e) {
				e.printStackTrace();
				m.put("code", KafkaMesConstant.KAFKA_SEND_ERROR_CODE);
				m.put("message", KafkaMesConstant.KAFKA_SEND_ERROR_MES);
				return m;
			}
		}else{
			m.put("code", KafkaMesConstant.KAFKA_NO_RESULT_CODE);
			m.put("message", KafkaMesConstant.KAFKA_NO_RESULT_MES);
			return m;
		}
	}

}
