package com.cloud.logger.zchain.zchainclient.util;

import com.cloud.logger.zchain.core.InetAddressUtilities;
import com.cloud.logger.zchain.core.common.entity.AngelaLoggerEntity;
import com.cloud.logger.zchain.core.common.entity.AngelaLoggerProject;
import com.cloud.logger.zchain.core.common.entity.AngelaLoggerProjectNode;
import com.cloud.logger.zchain.core.common.entity.AngelaLoggerProjectNodeDetail;
import com.cloud.logger.zchain.core.common.properties.ServerInfo;
import com.cloud.logger.zchain.core.common.threadlocal.LoggerThreadLocal;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;

import java.net.UnknownHostException;
import java.util.Date;
import java.util.Properties;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;

/**
 * User: wcy
 * Date: 2017/11/6
 * Time: 11:22
 */
public class KafkaUtil {

    private KafkaUtil(){}

    private final static String LOGGER_TOPIC = "";

    private static ThreadPoolTaskExecutor poolTaskExecutor;

    private static PropertiesConfiguration config;

    public static KafkaProducer producer=null;

    private static Gson gson = new GsonBuilder().create();

    private static final Properties props = new Properties();

    static{
        Logger.getLogger("kafka.client").setLevel(Level.OFF);
        Logger.getLogger("kafka.producer").setLevel(Level.OFF);
        Logger.getLogger("kafka.utils").setLevel(Level.OFF);
        Logger.getLogger("kafka.consumer").setLevel(Level.OFF);
        Logger.getLogger("kafka.network").setLevel(Level.OFF);
        Logger.getLogger("org.apache.zookeeper").setLevel(Level.OFF);
        try {
            poolTaskExecutor = new ThreadPoolTaskExecutor();
            //线程池所使用的缓冲队列
            poolTaskExecutor.setQueueCapacity(50000);
            //线程池维护线程的最少数量
            poolTaskExecutor.setCorePoolSize(5);
            //线程池维护线程的最大数量
            poolTaskExecutor.setMaxPoolSize(300);
            //线程池维护线程所允许的空闲时间
            poolTaskExecutor.setKeepAliveSeconds(30000);
            poolTaskExecutor.initialize();
            config = new PropertiesConfiguration("kafka.properties");
        } catch (ConfigurationException e) {
            e.printStackTrace();
        }
        props.put("bootstrap.servers", config.getString("kafka.bootstrap-servers"));
        props.put("acks", "all");
        props.put("retries", 0);
        props.put("batch.size", config.getString("batch.size"));
        props.put("linger.ms", 1);
        props.put("buffer.memory", config.getString("buffer.memory"));
        props.put("key.serializer", config.getString("kafka.producer.key-serializer"));
        props.put("value.serializer", config.getString("kafka.producer.value-serializer"));
        checkKafka();
    }

    private static void sendMessage(AngelaLoggerEntity logger){
        CompletableFuture.runAsync(() -> producer.send(new ProducerRecord("logger", gson.toJson(logger))),poolTaskExecutor);
    }

    /**
     * 发送消息到kafka,主题为test
     */
    public static void saveLogger(AngelaLoggerEntity logger){
        logger = initData(logger);

        AngelaLoggerEntity finalLogger = logger;
        CompletableFuture.runAsync(() ->
        {
            Future<RecordMetadata> f = producer.send(new ProducerRecord("logger", gson.toJson(finalLogger)));
            try {
                System.out.println(f.get());
            } catch (InterruptedException e) {
                e.printStackTrace();
            } catch (ExecutionException e) {
                e.printStackTrace();
            }
        },poolTaskExecutor);
    }

    public static void updateLogger(String id, AngelaLoggerEntity logger) {
        CompletableFuture.runAsync(() -> producer.send(new ProducerRecord("test1", gson.toJson(logger))),poolTaskExecutor);
    }

    public static void saveLoggerProject(AngelaLoggerProject logger) {
    }

    /**
     * 发送消息到kafka,定时向服务器发送服务运行正常的状态信息
     */
    public static void checkProject(){
        AngelaLoggerProjectNodeDetail detail = new AngelaLoggerProjectNodeDetail();
        detail.setCode(ServerInfo.project);
        try {
            detail.setIp(InetAddressUtilities.getLocalHostLANAddress().getCanonicalHostName());
            if(LoggerThreadLocal.getCurrentProject()!=null)
                detail.setPort(ServerInfo.port+"");
        } catch (UnknownHostException e) {
            e.printStackTrace();
        }
        CompletableFuture.runAsync(() -> producer.send(new ProducerRecord("checkproject", gson.toJson(detail))),poolTaskExecutor);
    }

    /**
     * 发送消息到kafka,主题为test
     */
    public static void startProject(){
        AngelaLoggerProjectNode info = LoggerThreadLocal.getCurrentProject();
        info.setCode(ServerInfo.project);
        info.setScenic(ServerInfo.scenic);
        info.setIp(ServerInfo.ip);
        if(LoggerThreadLocal.getCurrentProject()!=null)
            info.setPort(ServerInfo.port+"");
        LoggerThreadLocal.setZchainProject(info);
    }

    private static AngelaLoggerEntity initData(AngelaLoggerEntity loggerEntity){
        loggerEntity.setZchainClientTime(new Date());
        return loggerEntity;
    }

    /**
     *发送消息，发送的对象必须是可序列化的
     */


    public static void main(String[] args) throws Exception {

        AngelaLoggerEntity entity = new AngelaLoggerEntity();
        entity.setDescription("aaaaaaaaaaaaa");
        entity.setMethod("/aaa/bbb/ccc");
        int i =11;
        producer.send(new ProducerRecord("logger", gson.toJson(entity)));
        System.out.println("End");
        producer.close();
    }

    public static void checkKafka(){
        //消费者配置文件
        producer = new KafkaProducer(props);
    }
}
