package com.suning.sawp.service.impl.bi;

import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;

import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;

import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;

import com.google.common.collect.Lists;
import com.google.common.collect.Queues;
import com.google.gson.reflect.TypeToken;
import com.suning.sawp.constants.ConfigConstants;
import com.suning.sawp.intf.bi.BiKafkaRealTimeSaleService;
import com.suning.sawp.service.util.GsonUtils;
import com.suning.sawp.service.util.SCMConfigUtil;

/**
 * 
 * 消费销售中台推送过来的门店销售实时数据<br>
 * 〈功能详细描述〉
 * 
 * @author 12061818
 * @see [相关类/方法]（可选）
 * @since [产品/模块版本] （可选）
 */
public class BiStoreRealTimeSaleKafkaConsumer implements InitializingBean {
    private static final Logger LOGGER = LoggerFactory.getLogger(BiStoreRealTimeSaleKafkaConsumer.class);

    private static final String DEAL_FLAG = "1";
    
    private final SimpleDateFormat SDF = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");

    // private static final AtomicInteger ATOMIC_INTEGER = new AtomicInteger(0);

    private ConsumerConnector connector;
    // 处理线程
    private ExecutorService threadPool;

    // kafka数据处理线程池
    private ThreadPoolExecutor executor;

    private BlockingQueue<Map<String, Object>> pushQueue;

    @Autowired
    BiKafkaRealTimeSaleService biKafkaRealTimeSaleService;

    // kafka topic 门店销售
    @Value("#{settingConfig[kafka_topic]}")
    String topic;
    // 分区数
    @Value("#{settingConfig[kafka_partitionsNum]}")
    int partitionsNum;
    @Value("#{settingConfig[kafka_zk_connect]}")
    String kafkaZkConnect;

    @Value("#{settingConfig[kafka_group_id]}")
    String kafkaGroupId;

    @Value("#{settingConfig[kafka_zk_session_timeout_ms]}")
    String kafkaZkSessionTimeout;

    @Value("#{settingConfig[kafka_zk_sync_time_ms]}")
    String kafkaZkSyncTime;

    @Value("#{settingConfig[kafka_auto_commit_interval_ms]}")
    String kafkaAutoCommitInterval;

    public BiStoreRealTimeSaleKafkaConsumer() throws Exception {
    }

    // 开始执行
    @PostConstruct
    public void start() throws Exception {
        Properties props = new Properties();
        props.put("zookeeper.connect", kafkaZkConnect);
        props.put("group.id", kafkaGroupId);
        props.put("zookeeper.session.timeout.ms", kafkaZkSessionTimeout);
        props.put("zookeeper.sync.time.ms", kafkaZkSyncTime);
        props.put("auto.commit.interval.ms", kafkaAutoCommitInterval);
        ConsumerConfig config = new ConsumerConfig(props);
        connector = Consumer.createJavaConsumerConnector(config);
        Map<String, Integer> topics = new HashMap<String, Integer>();
        topics.put(topic, partitionsNum);
        Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(topics);
        List<KafkaStream<byte[], byte[]>> partitions = streams.get(topic);

        threadPool = Executors.newFixedThreadPool(partitionsNum);

        for (KafkaStream<byte[], byte[]> partition : partitions) {
            threadPool.submit(new MessageRunner(partition));
        }
    }

    // 线程
    class MessageRunner implements Runnable {
        private KafkaStream<byte[], byte[]> partition;

        MessageRunner(KafkaStream<byte[], byte[]> partition) {
            this.partition = partition;
        }

        public void run() {
            ConsumerIterator<byte[], byte[]> it = partition.iterator();
            while (it.hasNext()) {
                MessageAndMetadata<byte[], byte[]> item = it.next();
                String message = null;
                // 消息处理开关，若kafka推送的数据量激增可以考虑关闭开关，不在处理
                String flag = SCMConfigUtil.getConfig(ConfigConstants.KAFKA_DEAL_FLAG);
                if (DEAL_FLAG.equals(flag)) {
                    try {
                        message = new String(item.message());
                        // 获取报文信息
                        Map<String, Object> data = GsonUtils.fromJson(message, new TypeToken<Map<String, Object>>() {
                        });
                        if (null != data && data.containsKey("flag")) {
                            // 追加本条记录的生成时间，方便定位
                            data.put("updateTime", SDF.format(new Date()));
                            pushQueue.offer(data);
                        }
                    } catch (Exception e) {
                        LOGGER.error("biKafkaRealTimeSaleService.dealKafkaRealTimeSale error", e);
                    }
                }
            }

        }
    }

    @PreDestroy
    public void shutdown() {
        // 关闭kafka连接
        if (null != connector) {
            connector.shutdown();
        }
        // 关闭线程池
        if (null != threadPool) {
            threadPool.shutdown();
        }
        // 关闭线程池
        if (null != executor) {
            executor.shutdown();
        }
    }

    @Override
    public void afterPropertiesSet() throws Exception {
        pushQueue = Queues.newLinkedBlockingDeque(10000);
        executor = new ThreadPoolExecutor(0, 8, 60L, TimeUnit.SECONDS, new SynchronousQueue<Runnable>());
        new Thread() {
            public void run() {
                while (true) {
                    try {
                        final List<Map<String, Object>> inptPushs = Lists.newArrayList();
                        /**
                         * 30秒钟，或者500个一批量
                         */
                        Queues.drain(pushQueue, inptPushs, 500, 30, TimeUnit.SECONDS);
                        if (!inptPushs.isEmpty()) {
                            executor.submit(new Runnable() {
                                @Override
                                public void run() {
                                    biKafkaRealTimeSaleService.dealKafkaRealTimeSale(inptPushs);
                                }
                            });
                        }
                    } catch (RuntimeException e) {
                        LOGGER.error("ThreadPoolExecutor RuntimeException", e);
                    } catch (InterruptedException e) {
                        LOGGER.error("ThreadPoolExecutor InterruptedException", e);
                    }
                }
            }
        }.start();
    }
}
