# coding=utf-8

import logging
import logging.handlers
import sys
import os
from logs.log_handlers import TimedRotatingCompressedFileHandler
from queue import Queue
from logging.handlers import QueueHandler, QueueListener
from concurrent.futures import ProcessPoolExecutor
import multiprocessing
import sys
sys.path.append("../")
# LOGGER_FORMAT = '%(asctime)s %(levelname)-5s [%(threadName)-10s] %(name)s.%(module)s.%(funcName)s : %(message)s'
fmt = '%(asctime)s | %(levelname)-5s |%(module)s|%(threadName)s|%(funcName)s|%(lineno)d| %(message)s'
formatter = logging.Formatter(fmt)

logging.basicConfig(
    stream=None,
    filemode='a',
    level=logging.INFO,
    format=fmt,
    # datefmt='%Y-%d-%d %H:%M:%S,%u%u%u'
)


def setup_custom_logger(name, log_level=logging.INFO):
    return setup_logging(name, log_level)


def setup_logging(name, log_level=logging.INFO):
    logger = logging.getLogger(name)
    logger.setLevel(log_level)

    # console_handler = logging.StreamHandler()
    # console_handler.setFormatter(formatter)
    # console_handler.setLevel(log_level)
    # logger.addHandler(console_handler)

    # 创建一个队列来传递日志信息
    log_queue= Queue()
    # 设置QueueHandler来将日志信息发送到队列
    queue_handler = QueueHandler(log_queue)
    logger.addHandler(queue_handler)
    # 设置QueueListener来处理队列中的日志，并将日志输出到文件
    fh = TimedRotatingCompressedFileHandler(filename=name, when='MIDNIGHT', backupCount=7, encoding='utf-8',
                                            compression="gz")
    fh.setFormatter(formatter)
    fh.setLevel(logging.DEBUG)
    listener = QueueListener(log_queue, fh)
    listener.start()
    # logger.addHandler(fh)

    # # 使用StreamHandler输出到屏幕 默认已经有了, 所以这里就不需要了
    # ch = logging.StreamHandler()
    # ch.setFormatter(formatter)
    # ch.setLevel(log_level)
    # logger.addHandler(ch)

    return logger


def setup_loguru(name, log_level=logging.INFO):
    from loguru import logger
    import sys

    logger.remove()

    # # 过滤出含有“filter_message”的error信心输出到系统命令行
    # logger.add(sys.stderr, format="{time} {level} {message}", filter="filter_message", level="ERROR")
    #
    # # 过滤出含有“filter_message”的error信心输出到文件
    # logger.add("./test.log", format="{time} {level} {message}", filter="filter_message", level="ERROR")

    # 过滤出含有“filter_message”的error信心输出到系统命令行

    fmt_loguru = "<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | " \
                 + "<level>{level: <5}</level> |" \
                 + "<cyan>{module}</cyan>|<cyan>{name}</cyan>|<cyan>{function}</cyan>|<cyan>{line}</cyan>| <level>{message}</level>"

    # 创建一个队列
    log_queue = Queue()
    # 将日志加入到队列
    def log_handler():
        logger.add(name + '.{time:YYYY-MM-DD}', format=fmt_loguru,
                   level=logging.getLevelName(logging.DEBUG), rotation="00:00",
                   retention='7 days', compression='gz')
    # 使用进程池处理日志
    with ProcessPoolExecutor(max_workers=3) as executor:
        future = executor.submit(log_handler)
        future.result()
    # 设置监听器来读取队列并将日志事件写入文件
    logger.add(sys.stderr, format=fmt_loguru,
               level=logging.getLevelName(log_level))

    # # 过滤出含有“filter_message”的error信心输出到文件
    # logger.add(name + '.{time:YYYY-MM-DD}',
    #            format=fmt_loguru,
    #            level=logging.getLevelName(logging.DEBUG), rotation='00:00',
    #            retention='7 days', compression='gz')

    # 根据配置文件的大小为300MB，超过会自动生成一个新日志文件
    # logger.add(name + '_{time:YYYY-MM-DD}.log', rotation="300 MB", retention='7 days', compression='gz')
    # 设置每天零点创建一个日志文件
    # logger.add(name + '_{time:YYYY-MM-DD}.log', rotation='00:00', retention='7 days', compression='gz')
    # 设置每隔一周创建一个日志文件, 配置日志文件最长保留时间为7天, 设置文件格式压缩为zip，节省空间
    # logger.add(name + '_{time:YYYY-MM-DD}.log', rotation='1 week', retention='7 days', compression='gz')

    return logger