package com.tc.vms.consumer;

import com.tc.vms.MessageWrap;
import com.tc.vms.encoding.Message;
import com.tc.vms.exception.VmsClientErrorCode;
import com.tc.vms.exception.VmsClientException;
import com.tc.vms.iface.IContext;
import com.tc.vms.kafka.KafkaEventArgs;
import com.tc.vms.utils.Utils;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;

/**
 * Created by yonghua.zhang on 2015/12/14.
 */
public class VmsConsumerKafkaImpl extends AbstractConsumer {
    private ConsumerConfig kafkaConfig;
    private ConsumerConnector javaConsumerConnector;
    private ExecutorService kafkaWorkerPool;

    public VmsConsumerKafkaImpl(VmsConsumerProxy vmsConsumerProxy) {
        super(vmsConsumerProxy);
        this.consumingThreadNum = 4;
    }

    public ConsumerConfig getKafkaConfig() {
        return kafkaConfig;
    }

    @Override
    public boolean init(IContext context) throws VmsClientException {
        return true;
    }

    @Override
    public synchronized boolean start() throws VmsClientException {
        if (!isOpen) {
            if (callback == null) {
                throw new VmsClientException("Please register listerner before start consuming.", VmsClientErrorCode.VMS_SYS_ERR_CONSUMER_NO_LISTERNER);
            }
            try {
                Map<String, Integer> topicStreamMap = new HashMap<String, Integer>();
                for (String topic : queueMetaData.getChlAndRkey().keySet()) {
                    if (!topic.isEmpty()) {
                        topicStreamMap.put(topic, this.consumingThreadNum);
                    }
                }
                this.kafkaConfig = new ConsumerConfig(preparedConfig());
                javaConsumerConnector = Consumer.createJavaConsumerConnector(kafkaConfig);
                doFetching(topicStreamMap);
                LOGGER.info("VmsConsumerKafkaImpl start fetching kafka data.");
            } catch (Exception e) {
                LOGGER.error("Start consuming failed.", e);
                innerShutdown();

                throw new VmsClientException("Start consuming failed.", e);
            }
            isOpen = true;
        }
        return isOpen;
    }

    private synchronized void innerShutdown() {
        if (kafkaWorkerPool != null) {
            try {
                kafkaWorkerPool.shutdown();
                kafkaWorkerPool.awaitTermination(5, TimeUnit.SECONDS);
            } catch (InterruptedException e) {
                LOGGER.error("Kafka workPool.awaitTermination interruppted", e);
            }
        }
        if (javaConsumerConnector != null) {
            javaConsumerConnector.shutdown();
        }
    }

    @Override
    public synchronized void shutdown() {
        if (isOpen) {
            isOpen = false;
            innerShutdown();
        }
    }

    private Properties preparedConfig() {
        Properties properties = new Properties();
        properties.putAll(queueMetaData.getCfgCenterProps());
        properties.putAll(vmsConsumerProxy.getUserProps());
        if (properties.getProperty("zookeeper.connect") == null) {
            properties.put("zookeeper.connect", queueMetaData.getCfgZkCluster());
        }
        if (properties.getProperty("zookeeper.connection.timeout.ms") == null) {
            properties.put("zookeeper.connection.timeout.ms", String.valueOf(vmsConsumerProxy.getVmsClientContext()
                    .getZkBootOptions().getZKConnectionTimeout()));
        }
        /*if (properties.getProperty("zookeeper.session.timeout.ms") == null) {
            properties.put("zookeeper.session.timeout.ms", String.valueOf(vmsConsumerProxy.getVmsClientContext()
                    .getZkBootOptions().getZKSessionTimeout()));
        }*/
        if (sla.consumeFromWhere().equals(ConsumeFromWhere.FROM_LARGEST)) {
            properties.put("auto.offset.reset", "largest");
        } else {
            properties.put("auto.offset.reset", "smallest");
        }
        /*if (properties.getProperty("queued.max.message.chunks") == null) {
            properties.put("queued.max.message.chunks", "100");
        }*/
        if (subQoS.prefetchSize() > 0) {
            properties.put("fetch.message.max.bytes", String.valueOf(subQoS.prefetchSize()));
        }
        properties.put("auto.commit.enable", "true");
        properties.put("auto.commit.interval.ms", String.valueOf(subQoS.getCommitInterval()));
        /*if (!subQoS.autoCommit()) {
            properties.put("auto.commit.enable", "false");
        } else {
            properties.put("auto.commit.interval.ms", String.valueOf(subQoS.getCommitInterval()));
        }*/

        if (properties.getProperty("socket.receive.buffer.bytes") == null) {
            properties.put("socket.receive.buffer.bytes", String.valueOf(1024 * 1024));
        }

        properties.put("rebalance.max.retries", "6");
        properties.put("group.id", groupName);
        properties.put("client.id", instName);
        LOGGER.debug("initialize ConsumerConfig = {}", properties.toString());
        return properties;
    }


    private synchronized void doFetching(Map<String, Integer> topicStreamMap) {
        Map<String, List<KafkaStream<byte[], byte[]>>> partitions = javaConsumerConnector
                .createMessageStreams(topicStreamMap);
        int tasks = 0;
        for (List<KafkaStream<byte[], byte[]>> list : partitions.values()) {
            tasks += list.size();
        }
        kafkaWorkerPool = Executors.newFixedThreadPool(tasks);
        for (List<KafkaStream<byte[], byte[]>> list : partitions.values()) {
            for (KafkaStream<byte[], byte[]> partition : list) {
                this.kafkaWorkerPool.submit(new MessageRunner(partition));
            }
        }
    }

    private void doResponse(MessageAndMetadata<byte[], byte[]> response) {
        KafkaEventArgs eventArgs = null;
        try {
            Message.VMSMessage vmsMessage = Message.VMSMessage.parseFrom(response.message());
            MessageWrap message = new MessageWrap(vmsMessage.getPayload().toByteArray());
            message.setMessageId(vmsMessage.getMessageId());

            for (String key : vmsMessage.getRoutingKeysList()) {
                message.addRoutingKey(key);
            }

            if (vmsMessage.getPropertiesList() != null) {
                for (Message.KeyPair kp : vmsMessage.getPropertiesList()) {
                    message.setAttribute(kp.getKey(), kp.getValue());
                }
            }

            eventArgs = new KafkaEventArgs(message);
            eventArgs.setIsProtobuf(true);
            eventArgs.setOffset(response.offset());
            eventArgs.setPartition(response.partition());
            eventArgs.setTopicName(response.topic());
        } catch (Throwable e) {
            LOGGER.warn("Kafka converting protocolbuf error, Use original data to consume.", e);
            eventArgs = new KafkaEventArgs(new MessageWrap(response.message()));
            eventArgs.setIsProtobuf(false);
            eventArgs.setOffset(response.offset());
            eventArgs.setPartition(response.partition());
            eventArgs.setTopicName(response.topic());
        }
        callback.consumeMessage(new VmsKafkaConsumerContext(0, response.offset()), eventArgs);
        //commit();
    }

    private boolean commit() {
        if (javaConsumerConnector != null && !subQoS.autoCommit()) {
            javaConsumerConnector.commitOffsets();
            return true;
        }
        return false;
    }

    class MessageRunner implements Runnable {
        private final KafkaStream<byte[], byte[]> partition;

        MessageRunner(KafkaStream<byte[], byte[]> partition) {
            this.partition = partition;
        }

        public void run() {
            try {
                MessageAndMetadata<byte[], byte[]> item = null;
                ConsumerIterator<byte[], byte[]> pIt = partition.iterator();
                while (pIt.hasNext()) {

                    if (vmsConsumerProxy.isSuspend()) {  //for service degrade.
                        LOGGER.debug("Consuming service is suspend by vmscenter");
                        Utils.sleep5s();
                        continue;
                    }

                    try {
                        item = pIt.next();
                        doResponse(item);
                    } catch (Throwable innerEx) {
                        LOGGER.warn("Kafka comsume runtime error = {}.topic-partition-offset = {}.", innerEx,
                                item != null ? String.format("%s-%d-%d", item.topic(), item.partition(), item.offset()) : "", innerEx);
                    }
                }
            } catch (Throwable ex) {
                LOGGER.error("MessageRunner run failed.", ex);
            }

        }
    }
}
