package com.letu.plumelog.handler;

import cn.hutool.core.collection.CollectionUtil;
import com.alibaba.fastjson.JSONObject;
import com.letu.annotation.log.entity.OptLogDTO;
import com.letu.cache.redis.RedisOps;
import com.letu.core.plumelog.BaseLogMessage;
import com.letu.core.plumelog.CompressMessage;
import com.letu.plumelog.client.RedisClient;
import com.letu.plumelog.constant.LogMessageConstant;
import com.letu.plumelog.exception.LogQueueConnectException;
import com.letu.plumelog.util.LZ4Util;
import com.letu.plumelog.util.ThreadPoolUtil;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.Lists;
import org.springframework.kafka.core.KafkaTemplate;

import java.lang.reflect.InvocationTargetException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;

/**
 * className：MessageAppenderFactory
 * description： TODO
 * time：2023-02-13.14:18
 *
 * @author ligang
 * @version 1.0.0
 */
public class MessageAppenderFactory {
    /**
     * 当下游异常的时候，状态缓存时间
     */
    private static final ThreadPoolExecutor threadPoolExecutor = ThreadPoolUtil.getPool();
    private final static Cache<String, Boolean> cache = CacheBuilder.newBuilder()
            .expireAfterWrite(30, TimeUnit.SECONDS).build();
    public static BlockingQueue<BaseLogMessage> rundataQueue = new LinkedBlockingQueue<>(LogMessageConstant.logQueueSize);
    private static Boolean logOutPut = true;
    //最近一次push到redis 时间
    private static AtomicLong lastRunPushTime = new AtomicLong(0);


    /**
     * 业务系统日志
     */
    public static BlockingQueue<OptLogDTO> systemLogdataQueue = new LinkedBlockingQueue<>(LogMessageConstant.logQueueSize);

    private final static Cache<String, Boolean> systemCache = CacheBuilder.newBuilder()
            .expireAfterWrite(30, TimeUnit.SECONDS).build();
    private static Boolean systemLogOutPut = true;
    //最近一次push到redis 时间
    private static AtomicLong lastSystemPushTime = new AtomicLong(0);
    /**
     * redis 客户端
     */
    private static RedisOps redisOps;

    /**
     * redis 客户端
     */
    private static KafkaTemplate kafkaTemplate;
    /**
     * 是否开启全量日志记录
     */
    private static Integer isAllLogOpen = 1;


    public static void setRedisOps(RedisOps redisOps) {
        MessageAppenderFactory.redisOps = redisOps;
    }

    public static void setRedisKafka(KafkaTemplate kafkaTemplate) {
        MessageAppenderFactory.kafkaTemplate = kafkaTemplate;
    }


    public static void init(KafkaTemplate kafkaTemplate, RedisOps redisOps, Integer isAllLogOpen) {
        setRedisOps(redisOps);
        setRedisKafka(kafkaTemplate);
        MessageAppenderFactory.isAllLogOpen = isAllLogOpen;

        if (MessageAppenderFactory.isAllLogOpen != null && MessageAppenderFactory.isAllLogOpen == 1) {
            //启动系统日志记录
            threadPoolExecutor.execute(() -> {
                MessageAppenderFactory.startRunLog(LogMessageConstant.REDIS_ALL_LOG_KEY);
            });
        }

        //启动业务日志记录
//        threadPoolExecutor.execute(() -> {
//            MessageAppenderFactory.startSystemLog(LogMessageConstant.REDIS_SYSTEM_LOG_KEY);
//        });
    }

    /**
     * 将系统的日志信息 推送到mq
     *
     * @param logMessage
     */
    public static void pushRundataQueue(BaseLogMessage logMessage) {
        if (logMessage != null) {
            if (MessageAppenderFactory.isAllLogOpen != null && MessageAppenderFactory.isAllLogOpen == 1) {
                if (rundataQueue.size() < LogMessageConstant.logQueueSize) {
                    rundataQueue.add(logMessage);
                }
            }
        }
    }

    /**
     * 将业务日志信息 推送到mq
     *
     * @param optLogDTO
     */
    public static void pushSystemLogQueue(OptLogDTO optLogDTO) {
        if (optLogDTO != null) {
            if (systemLogdataQueue.size() < LogMessageConstant.logQueueSize) {
                systemLogdataQueue.add(optLogDTO);
            }
        }
    }


    /**
     * 将数据加密推送到 redis 订阅
     *
     * @param key
     * @param logs
     * @param logOutPutKey
     */
    public static void pushOfRedis(String key, List<BaseLogMessage> logs, String logOutPutKey) {
        if (CollectionUtil.isEmpty(logs)) {
            return;
        }
        logOutPut = cache.getIfPresent(logOutPutKey);
        if (logOutPut == null || logOutPut) {
            //压缩
            Long l = System.currentTimeMillis();
            List<String> list = new ArrayList<>();
            for (BaseLogMessage baseLogMessage : logs) {
                list.add(JSONObject.toJSONString(baseLogMessage));
            }
            CompressMessage compressMessage = LZ4Util.compressedMessage(list);
            redisOps.send(key, compressMessage);
//            System.out.println(String.format("条数：%s ,redis 队列耗时：%s",logs.size(),(System.currentTimeMillis()-l)));
            cache.put(logOutPutKey, true);
        }
    }


    /**
     * 将数据加密推送到 redis 订阅
     *
     * @param key
     * @param logs
     * @param logOutPutKey
     */
    public static void pushSystemLogOfRedis(String key, List<OptLogDTO> logs, String logOutPutKey) {
        if (CollectionUtil.isEmpty(logs)) {
            return;
        }
        systemLogOutPut = systemCache.getIfPresent(logOutPutKey);
        if (systemLogOutPut == null || systemLogOutPut) {
            //压缩
            Long l = System.currentTimeMillis();
            List<String> list = new ArrayList<>();
            for (OptLogDTO optLogDTO : logs) {
                list.add(JSONObject.toJSONString(optLogDTO));
            }
            CompressMessage compressMessage = LZ4Util.compressedMessage(list);
            redisOps.send(key, compressMessage);
//            System.out.println(String.format("条数：%s ,redis 队列耗时：%s",logs.size(),(System.currentTimeMillis()-l)));
            systemCache.put(logOutPutKey, true);
        }
    }


    /**
     * 将数据加密推送到 redis 订阅
     *
     * @param key
     * @param logs
     * @param logOutPutKey
     */
    public static void pushSystemLogOfKafka(String key, List<BaseLogMessage> logs, String logOutPutKey) {
        if (CollectionUtil.isEmpty(logs)) {
            return;
        }
        logOutPut = cache.getIfPresent(logOutPutKey);
        if (logOutPut == null || logOutPut) {
            Long l = System.currentTimeMillis();
            //压缩
//            Long l = System.currentTimeMillis();
//            List<String> list = new ArrayList<>();
//            for (BaseLogMessage baseLogMessage : logs) {
//                list.add(JSONObject.toJSONString(baseLogMessage));
//            }
//            CompressMessage compressMessage = LZ4Util.compressedMessage(list);
//            redisOps.send(key, compressMessage);
//            System.out.println("kafka data is " + JSONObject.toJSONString(logs));
//            kafkaTemplate.send("mytopic_log", JSONObject.toJSONString(logs));
            for(BaseLogMessage baseLogMessage : logs){
                kafkaTemplate.send("mytopic_log", JSONObject.toJSONString(baseLogMessage));
            }
//            System.out.println("kafka push tims is " + (System.currentTimeMillis() - l));
            cache.put(logOutPutKey, true);
        }
    }

    /**
     * 获取BlockingQueue信息，并发送到redis订阅上
     *
     * @param key
     */
    public static void startRunLog(String key) {
        while (true) {
            try {
                List<BaseLogMessage> logs = new ArrayList<>();
                int size = rundataQueue.size();
                long currentTimeMillis = System.currentTimeMillis();
                long time = currentTimeMillis - lastRunPushTime.get();

                String s = String.format("size is %s , pushMaxCount is %s , time is %s", size, LogMessageConstant.pushMaxCount, time);
//                System.out.println(s);
                if (size >= LogMessageConstant.pushMaxCount) {
//                    System.out.println("进入日志记录1");
                    rundataQueue.drainTo(logs, LogMessageConstant.pushMaxCount);
                    if (CollectionUtil.isNotEmpty(logs)) {
                        pushSystemLogOfKafka(key, logs, LogMessageConstant.LOG_PUT_KEY);
                    }
                    lastRunPushTime.set(currentTimeMillis);
                } else if (size > 0 && size < LogMessageConstant.pushMaxCount) {
//                    System.out.println("进入日志记录1");
                    rundataQueue.drainTo(logs, size);
                    if (CollectionUtil.isNotEmpty(logs)) {
                        pushSystemLogOfKafka(key, logs, LogMessageConstant.LOG_PUT_KEY);
                    }
                    lastRunPushTime.set(currentTimeMillis);
                }
//                else if (size == 0) {
////                    System.out.println("进入日志记录2");
//                    BaseLogMessage log = rundataQueue.take();
//                    if (log != null) {
//                        logs.add(log);
//                        pushSystemLogOfKafka(key, logs, LogMessageConstant.LOG_PUT_KEY);
//                    }
//                    lastRunPushTime.set(currentTimeMillis);
//                }
                else {
//                    System.out.println("等待300毫秒");
                    Thread.sleep(1000);
                    lastRunPushTime.set(currentTimeMillis);
                }
            } catch (Exception e) {
                String exMsg = e.getMessage();
                if (e instanceof InvocationTargetException && exMsg == null) {
                    exMsg = ((InvocationTargetException) e).getTargetException().getMessage();
                }
                System.out.println("plumelog error:--------doStartLog--------" + exMsg + "-------------------");
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException interruptedException) {
                }
            } finally {
                try {
                    Thread.sleep(400);
                } catch (InterruptedException interruptedException) {
                }
            }
        }
    }


    /**
     * 获取BlockingQueue信息，并发送到redis订阅上
     *
     * @param key
     */
    public static void startSystemLog(String key) {
        while (true) {
            try {
                List<OptLogDTO> logs = new ArrayList<>();
                int size = systemLogdataQueue.size();
                long currentTimeMillis = System.currentTimeMillis();
                long time = currentTimeMillis - lastSystemPushTime.get();

                String s = String.format("size is %s , pushMaxCount is %s , time is %s", size, LogMessageConstant.pushMaxCount, time);
//                System.out.println(s);
                if (size >= LogMessageConstant.pushMaxCount || time > 500) {
                    systemLogdataQueue.drainTo(logs, LogMessageConstant.pushMaxCount);
                    pushSystemLogOfRedis(key, logs, LogMessageConstant.LOG_PUT_KEY_SYSTEM);
                    lastSystemPushTime.set(currentTimeMillis);
                } else if (size == 0) {
                    OptLogDTO log = systemLogdataQueue.take();
                    if (log != null) {
                        logs.add(log);
                        pushSystemLogOfRedis(key, logs, LogMessageConstant.LOG_PUT_KEY_SYSTEM);
                    }
                    lastSystemPushTime.set(currentTimeMillis);
                } else {
//                    System.out.println("等待100毫秒");
                    Thread.sleep(100);
                }
            } catch (Exception e) {
                String exMsg = e.getMessage();
                if (e instanceof InvocationTargetException && exMsg == null) {
                    exMsg = ((InvocationTargetException) e).getTargetException().getMessage();
                }
                System.out.println("plumelog error:--------doStartLog--------" + exMsg + "-------------------");
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException interruptedException) {
                }
            }
        }
    }
}
