package com.mtiiot.mq.service;



import com.mtiiot.mq.esdao.BaseElasticsearch;
import com.mtiiot.mq.utils.Contants;


import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;
import org.springframework.beans.factory.InitializingBean;

import java.util.*;

/**
 * Created by Administrator on 2016/8/11.
 */
public class KafkaClientTask implements InitializingBean{
    private BaseElasticsearch baseElasticsearch;
    private Properties properties; // 配置文件；
    private Map<String,String> topicMap; // 保存主题和消费者的线程数
    private LogService1 logService1;
    private String cid;
    private String gid;
    /**
     * kafka 出队列数据处理接口
     */
    private KafkaConsumerService1 kafkaConsumerService1;

    private ElasticsearchService elasticsearchService = new ElasticsearchService();


    public KafkaClientTask(){}

    private  String gId(int i, int j) {
        return gid + String.format("%04d", new Object[]{Integer.valueOf(i)}) + "-" + String.format("%04d", new Object[]{Integer.valueOf(j)});
    }

    private  String cId(int i, int j) {
        return cid + String.format("%04d", new Object[]{Integer.valueOf(i)}) + "-" + String.format("%04d", new Object[]{Integer.valueOf(j)});
    }



    public void consume(String topic,String groupId,String clientId,int threadPerTopic) {
        Properties props = this.properties;
        props.put("group.id",groupId);
        props.put("consumer.id", clientId);
        ConsumerConfig config = new ConsumerConfig(props);
        // ConsumerConnector: Consumer的连接器,这里基于ZK实现,是ZookeeperConsumerConnector
        ConsumerConnector connector = Consumer.createJavaConsumerConnector(config);
        HashMap topicCountMap = new HashMap();
        topicCountMap.put(topic, Integer.valueOf(threadPerTopic));
        // 指定要消费的topic和线程数,返回每个topic对应的KafkaStream列表,每个线程对应一个KafkaStream.
        Map streams = connector.createMessageStreams(topicCountMap);
        Iterator var7 = ((List) streams.get(topic)).iterator();
        while (var7.hasNext()) {
            // KafkaStream: 消息流,每个消费者线程都对应了一个消息流,消息会放入消息流的阻塞队列中
            KafkaStream stream = (KafkaStream) var7.next();
            (new KafkaClientTask.MyStreamThread(stream,topic,groupId,clientId)).start();
        }
    }

    @Override
    public void afterPropertiesSet() throws Exception {

    }

    public LogService1 getLogService1() {
        return logService1;
    }

    public void setLogService1(LogService1 logService1) {
        this.logService1 = logService1;
    }

    public void setCid(String cid) {
        this.cid = cid;
    }

    public String getCid(){
        return cid;
    }

    public String getGid() {
        return gid;
    }

    public void setGid(String gid) {
        this.gid = gid;
    }

    public KafkaConsumerService1 getKafkaConsumerService1() {
        return kafkaConsumerService1;
    }

    public void setKafkaConsumerService1(KafkaConsumerService1 kafkaConsumerService1) {
        this.kafkaConsumerService1 = kafkaConsumerService1;
    }


    private class MyStreamThread extends Thread {
        private KafkaStream<byte[], byte[]> stream;
        private String topicName = ""; // 主题；
        private String groupId; // 组ID；
        private String clientId; // 消费者id;

        public MyStreamThread(KafkaStream stream,String topicName,String groupId,String clientId) {
            this.stream = stream;
            this.topicName = topicName;
            this.groupId = groupId;
            this.clientId = clientId;
        }

        public void run() {
            // ConsumerIterator: 消费者迭代器,只有迭代器开始迭代获取数据时,才会返回给消费者
            ConsumerIterator streamIterator = this.stream.iterator();
            while (streamIterator.hasNext()) {
                MessageAndMetadata message = streamIterator.next();
                String topic = message.topic();
                int partition = message.partition();
                long offset = message.offset();
                String key = new String((byte[]) message.key());
                String msg = new String((byte[]) message.message());
                if(topic.equals(topicName)) {
                    String content = "groupId:" +groupId
                            +",consumerid:" + clientId
                            + ", thread : " + Thread.currentThread().getName()
                            + ", topic : " + topic + ", partition : " + partition
                            + ", offset : " + offset + " , key : " + key
                            + " , mess : " + msg;
                    /*String time = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date());
                    System.out.println("time="+time+",数据:"+content);
                    // 调用后台处理方法输出
                    try {
                        if (msg.contains(".json")){
                            BaseService.sendData(topicName,key,msg);
                        }
                    } catch (Exception e){

                    }*/

                    System.out.println("数据:"+content);
                    if ("metadatainfo".equals(topicName)) {
                        // 处理元数据
                        /*JSONObject jsonObject = JSONObject.fromObject(msg);
                        String id = jsonObject.getString("id");
                        // 入到es,建立索引
                        try {
                            baseElasticsearch.createIndexById(Contants.INDEX_METADATA,key,msg,id);
                        } catch (Exception e) {
                            logService1.sendLog(new MetadataInfo(Contants.SYS_ID_FS, Contants.FSTASK_HOST, Contants.MODULE_FSTASK, "KafkaClientTask", "execute"), LogLevel.ERROR, " kafka error!!", e.getMessage());
                        }*/
                    } else if ("N_BX_TES_KAFKA".equals(topic)){
                        // 处理ipd数据
                        // 2016-11-17
                        try {
                            kafkaConsumerService1.consumeMessage(topic,key,msg);
                        } catch (Exception e) {
                            logService1.sendLog(new MetadataInfo(Contants.SYS_ID_FS, Contants.FSTASK_HOST, Contants.MODULE_FSTASK, "KafkaClientTask", "execute"), LogLevel.ERROR, " kafka error!!", e.getMessage());
                        }
                    } else {
                        // 处理业务数据
                        String[] keyArr = key.split("%");
                        try {
                            // 入库
                            BaseService.sendData(topicName, keyArr[1], msg);
                        } catch (Exception e) {
                            logService1.sendLog(new MetadataInfo(Contants.SYS_ID_FS, Contants.FSTASK_HOST, Contants.MODULE_FSTASK, "KafkaClientTask", "execute"), LogLevel.ERROR, " kafka error!!", e.getMessage());
                        }
                    }

                    // 测试es数据
                    /*if ("N_YQ_TES_RPFB".equals(topicName)){
                        System.out.println(content);
                        elasticsearchService.handleEsData(key,msg);
                    }*/

                }
            }
        }
    }

    public void execute() {
        byte i = 1;
        short j = 163;
        for (Map.Entry<String,String> map : topicMap.entrySet()){
            String topic = map.getKey();
            int threadCount = Integer.valueOf(map.getValue());
            consume(topic,gId(i,j),cId(i,j),threadCount);
            i++;
        }
    }

    public void setProperties(Properties properties) {
        this.properties = properties;
    }

    public Properties getProperties(){
        return properties;
    }

    public Map<String, String> getTopicMap() {
        return topicMap;
    }

    public void setTopicMap(Map<String, String> topicMap) {
        this.topicMap = topicMap;
    }

    public BaseElasticsearch getBaseElasticsearch() {
        return baseElasticsearch;
    }

    public void setBaseElasticsearch(BaseElasticsearch baseElasticsearch) {
        this.baseElasticsearch = baseElasticsearch;
    }
}
