package com.yamed.bus.service.appender;

import ch.qos.logback.classic.Level;
import ch.qos.logback.classic.spi.ILoggingEvent;
import ch.qos.logback.classic.spi.IThrowableProxy;
import ch.qos.logback.core.AppenderBase;
import com.yamed.bus.BusApplication;
import org.springframework.core.env.Environment;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;

import java.text.SimpleDateFormat;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;

/**
 * 本类中禁止使用任何日志工具打日志
 * 除非加入IgnorePackage中
 *
 * @author renchunlei
 * @date 2019-06-11
 */
public class KafkaAppender extends AppenderBase<ILoggingEvent> {
    String tab = "\t";
    SimpleDateFormat dateTimeFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
    SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");

    @Override
    protected void append(ILoggingEvent eventObject) {
        //初始化未完成或者，在优雅关机是不必进行日志传输
        if (BusApplication.applicationContext == null) {
            return;
        }
        try {

            ThreadPoolTaskExecutor backExecutor = BusApplication.applicationContext.getBean("backExecutor", ThreadPoolTaskExecutor.class);
            if (backExecutor == null) {
                return;
            }
            KafkaTemplate kafkaTemplate = BusApplication.applicationContext.getBean(KafkaTemplate.class);
            if (kafkaTemplate == null) {
                return;
            }
//            String globalId = XtraceId.gen();

            backExecutor.execute(() -> {
                //同步发送会造成线程阻塞，估计原因是kafka 加入组分区尚未初始化完成导致的
                Environment environment = BusApplication.applicationContext.getEnvironment();
                String topic = environment.getProperty("kafka.log.topic");

                //region 日志拼接
                String appName = environment.getProperty("spring.application.name");
                String hostname = environment.getProperty("spring.cloud.client.hostname");
                String ip_address = environment.getProperty("spring.cloud.client.ip-address");
                String threadName = eventObject.getThreadName();
                String loggerName = eventObject.getLoggerName();
                Map<String, Boolean> ignorePackage = getIgnorePackage();
                if (ignorePackage.containsKey(loggerName)) {
                    return;
                }
                Level level = eventObject.getLevel();
                String date = dateFormat.format(new Date(eventObject.getTimeStamp()));
                String datetime = dateTimeFormat.format(new Date(eventObject.getTimeStamp()));
                StringBuilder loggerBuilder = new StringBuilder();
                loggerBuilder.append(appName)
                        .append(tab).append(date)
//                        .append(tab).append(globalId)
                        .append(tab).append(loggerName)
                        .append(tab).append(level).
                        append(tab).append(hostname)
                        .append(tab).append(ip_address)
                        .append(tab).append(threadName.replace(tab, "  "))
                        .append(tab).append(datetime)
                        .append(tab).append(eventObject.getFormattedMessage().replace(tab, "  "));
                //endregion

                //region 异常信息记录
                if (eventObject.getThrowableProxy() != null) {
                    IThrowableProxy errorDetails = eventObject.getThrowableProxy();
                    loggerBuilder.append("\r\n" + errorDetails.getClassName()).append("\r\n").append(errorDetails.getMessage());
                    Arrays.stream(errorDetails.getStackTraceElementProxyArray()).forEach(t -> {
                        loggerBuilder.append("\r\n").append("  " + t.getSTEAsString());
                    });
                }
                //endregion
                //暂未处理消息体大于1m的情况，出现概率较低
                kafkaTemplate.send(topic, appName, loggerBuilder.toString());
            });


        } catch (Exception ex) {
            System.out.println("kafka log error：" + ex.getMessage());
        }

    }

    /**
     * 获取忽略的包，如NetworkClient为了防止死循环产生
     *
     * @return
     */
    public Map<String, Boolean> getIgnorePackage() {
        Map<String, Boolean> ignorePackage = new HashMap<>();
        ignorePackage.put("org.apache.kafka.clients.NetworkClient", true);
        ignorePackage.put("com.sea.columbus.gateway.global.appender.KafkaAppender", true);
        ignorePackage.put("org.springframework.scheduling.concurrent", true);
        return ignorePackage;
    }
}
