package com.moli.iov.producer;
import com.alibaba.fastjson.JSON;
import com.moli.iov.util.KafkaMesConstant;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Random;
import javax.annotation.Resource;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.PartitionInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.util.concurrent.ListenableFuture;

/**
 * kafkaProducer模板
 *     使用此模板发送消息
 * @author wangb
 *
 */
public class KafkaProducerServer{

    @Resource
    private KafkaProducerUtil kafkaProducerUtil;

    private KafkaTemplate<String, Object> kafkaTemplate;

    protected final Logger logger=LoggerFactory.getLogger("KafkaProducerServer");

    /**
     *原始模式kafka生产者异步发送
     * @param topic
     * @param key
     * @param value
     * @return
     */
    public void originalProducerAsynchronousSend(String springBaseXmlPath,String topic, String key, Object value){

        kafkaProducerUtil.setStringBaseXMLPath(springBaseXmlPath);
        Properties props=kafkaProducerUtil.getProps();
        int partitionIndex = getPartitionIndex(key, topic);
        KafkaProducer<String, Object> producer = new KafkaProducer<>(props);
        producer.send(new ProducerRecord<String,Object>(topic,partitionIndex,key,value), new Callback() {
            @Override
            public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                if(null!=e){
                   logger.error("异步发送异常，e:{}",e);
                  e.printStackTrace();
                }else{
                    logger.info("异步发送成功：topic={},partition={},offset={}",recordMetadata.topic(),recordMetadata.partition(),recordMetadata.offset());
                }
            }
        });
       producer.close();
    }




    /**
     * kafka同步发送消息模板
     * @param topic 主题
     * @param value    messageValue
     * @param key
     */
    public Map<String,Object> sndMesForTemplate(String topic, String key, Object value){
            int partitionIndex = getPartitionIndex(key, topic);
            ListenableFuture<SendResult<String,Object>> result = kafkaTemplate.send(topic, partitionIndex, key, value);
            Map<String,Object> res = checkProRecord(result,partitionIndex,topic);
            return res;

    }

    /**
     * 异步发送kafka消息
     * @param topic
     * @param key
     * @param value
     * @return
     */
    public void asynchronousSendMesForTemplate(String topic, String key, Object value){
        int partitionIndex = getPartitionIndex(key, topic);
        logger.info("kafka send >>>> topic:{},part:{},key:{},value:{}",topic,partitionIndex,key,value);
        kafkaTemplate.send(topic, partitionIndex, key, value);
    }

    /**
     * 根据key值获取分区索引
     * @param key
     * @param topic
     * @return
     */
    private int getPartitionIndex(String key,String topic){
        try {
            List<PartitionInfo> list= kafkaTemplate.partitionsFor(topic);
            int listNum=list.size();
            if (key == null) {
                Random random = new Random();
                return random.nextInt(listNum);
            }
            else {
                int result = Math.abs(key.hashCode())%listNum;
                return result;
            }
        }catch (Exception e){
            e.printStackTrace();
            return 0;
        }

    }

    /**
     * 检查发送返回结果record
     * @param res
     * @return
     */
    @SuppressWarnings("rawtypes")
    private Map<String,Object> checkProRecord(ListenableFuture<SendResult<String,Object>> res,Integer partitionIndex,String topic){
        Map<String,Object> m = new HashMap<String,Object>(0);
        if(res!=null){
            try {
                SendResult r = res.get();//检查result结果集
                /*检查recordMetadata的offset数据，不检查producerRecord*/
                Long offsetIndex = r.getRecordMetadata().offset();
                if(offsetIndex!=null && offsetIndex>=0){
                    m.put("code", KafkaMesConstant.SUCCESS_CODE);
                    m.put("message", KafkaMesConstant.SUCCESS_MES);
                    return m;
                }else{
                    loggerPrint(topic,partitionIndex);
                    m.put("code", KafkaMesConstant.KAFKA_NO_OFFSET_CODE);
                    m.put("message", KafkaMesConstant.KAFKA_NO_OFFSET_MES);
                    return m;
                }
            } catch (Exception e) {
                logger.error("kafka发送失败",e);
                loggerPrint(topic,partitionIndex);
                m.put("code", KafkaMesConstant.KAFKA_SEND_ERROR_CODE);
                m.put("message", KafkaMesConstant.KAFKA_SEND_ERROR_MES);
                return m;
            }
        }else{
            loggerPrint(topic,partitionIndex);
            m.put("code", KafkaMesConstant.KAFKA_NO_RESULT_CODE);
            m.put("message", KafkaMesConstant.KAFKA_NO_RESULT_MES);
            return m;
        }
    }

    public void  loggerPrint(String topic,Integer partitionIndex){
        List<PartitionInfo> list= kafkaTemplate.partitionsFor(topic);
        logger.error("topic------{}的可用分区数量是{}可用分区列表{}------发送目标分区号-------{}",topic,list.size(),JSON.toJSONString(list),partitionIndex);
    }

    public KafkaTemplate<String, Object> getKafkaTemplate() {
        return kafkaTemplate;
    }

    public void setKafkaTemplate(
        KafkaTemplate<String, Object> kafkaTemplate) {
        this.kafkaTemplate = kafkaTemplate;
    }
}
