package com.linkstec.mot.base;

import java.sql.Connection;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Properties;

import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.LongDeserializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;

import com.linkstec.kafka.BaseZookeeper;
import com.linkstec.util.BatchContext;
import com.linkstec.util.DbUtil;

public class PushLagMonitorService implements RunnerService {
	private static final Logger logger = LogManager.getLogger();
    private final static String BOOTSTRAP_SERVERS = "192.168.2.232:9092,192.168.2.233:9092,192.168.2.234:9092";
    private final static String ZOOKEEPER = "192.168.2.232:2181,192.168.2.233:2181,192.168.2.234:2181";
    private final static String GROUP_ID = "kafka-dbsycn-service";
    private final static String TOPIC = "test";
    private final static long INTERVAL = 30L;
    private final static String OFFSETS_PATH = "/consumers/" + GROUP_ID + "/offsets/" + TOPIC;
	private final static String INSERT_SQL = "insert into mot_push_lag_monitor "
			+ "(channel, logsize, consumeroffset, lag, groupid, producermessagesec, consumermessagesec)"
			+ " values "
			+ "(?, ?, ?, ?, ?, ?, ?)";
    private static Consumer<Long, String> consumer = null;
    private static BaseZookeeper zookeeper = new BaseZookeeper();
    private static List<Object> lastParams = null;
    static  {
        final Properties props = new Properties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "mot-push_monitor");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class.getName());
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());

        consumer = new KafkaConsumer<>(props);
        zookeeper = new BaseZookeeper();
        try {
			zookeeper.connectZookeeper(ZOOKEEPER);
		} catch (Exception e) {
			e.printStackTrace();
		}
    }
	@Override
	public ParamDto execute(ParamDto dtoInput) {
		ParamDto outDto = new ParamDto();
		Connection toConn = null;
		long consumerOffset = 0L;
		try {
			toConn = BatchContext.getNewConnection();
			List<String> children = zookeeper.getChildren(OFFSETS_PATH);
			for (String partition : children) {
				String offset = zookeeper.getData(OFFSETS_PATH + "/" + partition);
				consumerOffset += Long.parseLong(offset);
				logger.info("Partition " + partition + "的offset是：" + offset);
			}
			long logsize = getPartitionsForTopic();
			long lag = logsize - consumerOffset;
			List<Object> params = new ArrayList<Object>();
			params.add(TOPIC);
			params.add(logsize);
			params.add(consumerOffset);
			params.add(lag);
			params.add(GROUP_ID);
			if (null != lastParams) {
				params.add((logsize - (long) lastParams.get(1)) / INTERVAL);
				params.add((consumerOffset - (long) lastParams.get(2)) / INTERVAL);
				
			} else {
				params.add(0L);
				params.add(0L);
			}
			
			DbUtil.executeUpdate(toConn, INSERT_SQL, params);
			toConn.commit();
			lastParams = params;
			logger.info(INSERT_SQL);
			String returnMsg = "TOPIC:" + TOPIC + "生产最新offset：" +logsize + "当前消费到offset：" + consumerOffset + "消费延迟:" + lag;
			logger.info(returnMsg);
			outDto.put("ReturnCode", "0");
			outDto.put("ReturnMessage", returnMsg);
		} catch (Exception e) {
			String errMsg = ExceptionUtils.getStackTrace(e);
			logger.error(errMsg);
			outDto.put("ReturnCode", "0");
			outDto.put("ReturnMessage", errMsg);
		} finally {
			BatchContext.closeConnection(toConn);
			toConn = null;
		}
		return outDto;
	}
    public static long getPartitionsForTopic() {

    	long logsize = 0L;
        Collection<PartitionInfo> partitionInfos = consumer.partitionsFor(TOPIC);
        logger.info("Get the partition info as below:");
        List<TopicPartition> tp =new ArrayList<TopicPartition>();
        for (PartitionInfo str : partitionInfos){
        	logger.info("Partition Info:");
        	logger.info(str + "");

            tp.add(new TopicPartition(TOPIC,str.partition()));
            consumer.assign(tp);
            consumer.seekToEnd(tp);

            long offset = consumer.position(new TopicPartition(TOPIC, str.partition()));
            logsize = logsize + offset;
            logger.info("Partition " + str.partition() + " 's latest offset is '" + offset);
        };
        return logsize;
    }
}