# -*- coding: utf-8 -*-
# =============================================================================
#         Desc: 格式化MySQL慢日志并按照各维度进行排序输出。
#       Author: GGA
#        Email:
#     HomePage:
#      Version: 1.0.0
#   LastChange: 2020-12-20
#      History:
# =============================================================================
import datetime
import sys
import re
import os
import hashlib
import argparse
import logging
import heapq
from utils.sqlpf_helper import SQLFingerPrint
from utils.logger_helper import LoggerHelper
from utils.top_max_heap import TopMaxHeapItem, TopMaxHeap
import fire

default_datetime_format = "%Y-%m-%d %H:%M:%S"

logger = logging.getLogger()


class SlowLogConfig(object):
    QUERY_START_TAG = " Query\t"
    QUERY_TIME_PATTERN = r"^[0-9]{6} [0-9]{2}:[0-9]{2}:[0-9]{2}"
    QUERY_TIME_PATTERN2 = r"^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}"
    USE_DB_PATTERN = r"^use.*;"
    CHECK_SQL_LIST = [
        "select @@session.tx_read_only",
        "select 1",
        "set autocommit=1",
        "set autocommit=0",
        "commit"
    ]
    SQL_TOP_TYPES = [
        "query_time_ms", "examined_rows", "affected_rows",
        "lock_time_ms", "sent_rows", "sent_bytes"
    ]
    SQL_STATS_TYPES = {
        "query_time_ms": "查询时间",
        "examined_rows": "预估行数",
        "affected_rows": "影响行数",
        "lock_time_ms": "阻塞时间",
        "sent_rows": "返回行数",
        "sent_bytes": "返回字节"
    }
    TMP_CACHE_LOG_NUMBER = 10000


class SlowLogItem(object):
    def __init__(self):
        self.login_user = ""
        self.login_database = ""
        self.query_time = 0
        self.lock_time = 0
        self.sent_rows = 0
        self.examined_rows = 0
        self.affected_rows = 0
        self.sent_bytes = 0
        self.start_time = None
        self.stop_time = None
        self.raw_sql = ""
        self.sql_print = ""


class SlowLogFilter(object):
    def __init__(self, start_time, stop_time, min_examined_rows, min_affect_rows,
                 min_query_time_ms, min_sent_rows, min_sent_bytes, skip_clients, skip_users):
        if start_time is None:
            self.start_time = datetime.datetime.now() - datetime.timedelta(days=365 * 10)
        else:
            self.start_time = start_time
        if stop_time is None:
            self.stop_time = datetime.datetime.now() + datetime.timedelta(days=365 * 10)
        else:
            self.stop_time = stop_time
        self.min_examined_rows = min_examined_rows
        self.min_affect_rows = min_affect_rows
        self.min_query_time_ms = min_query_time_ms
        self.min_sent_rows = min_sent_rows
        self.min_sent_bytes = min_sent_bytes
        self.skip_clients = skip_clients
        self.skip_users = skip_users


class SlowLogHelper(object):
    def __init__(self, slow_log, top_sql_size, log_filter: SlowLogFilter, keep_database_name=1):
        self.slow_log = slow_log
        self.top_sql_size = top_sql_size
        self.log_filter = log_filter
        self.keep_database_name = keep_database_name
        self.check_time_str = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
        self.sql_stats_map = {}
        self.sql_top_map = {}
        self.cache_log_items = []
        self.result_files = []
        self.result_path = self.get_result_dir_path()
        self.format_log_path = os.path.join(self.result_path, self.check_time_str + "_format.txt")
        self.big_sql_size = 1 * 1024 * 1024
        self.big_sql_list = []
        self.init_sql_top_map()

    @classmethod
    def append_file_content(cls, file_path, file_content):
        with open(file=file_path, encoding="utf-8", mode="a+") as fw:
            fw.write(file_content)

    @classmethod
    def spilt_sql_line_to_dict(cls, sql_line: str, log_item: dict):
        if not sql_line.startswith("# "):
            return
        if sql_line.startswith("# User@Host"):
            sql_line = sql_line.replace("User@Host:", "User:")
            sql_line = sql_line.replace(" @  ", "  Host: ")
        sql_line = sql_line.replace("# ", "")
        tmp_list = sql_line.split(sep="  ")
        for tmp_item in tmp_list:
            if tmp_item.find(":") >= 0:
                item_key = tmp_item.split(":")[0].strip()
                item_value = tmp_item.split(":")[1].strip()
                if item_value.find("[") >= 0:
                    item_value = item_value.split("[")[1].replace("]", "").strip()
                if item_key != "":
                    log_item[item_key] = item_value

    def format_sql_text(self, sql_text, database_name):
        tmp_sql_text = re.sub(
            r"\s+", " ",
            str(sql_text).replace("\r", " ").replace("\n", " ").replace("\t", " ")
        )
        tmp_sql_text = tmp_sql_text.strip().lower()
        is_valid_db_name = database_name.strip() != ""
        if is_valid_db_name and self.keep_database_name:
            tmp_sql_text = "use {}; ".format(database_name) + tmp_sql_text
        return tmp_sql_text

    @classmethod
    def get_log_item_print_info(cls, log_item: dict):
        return """
# start_time: {start_time}, stop_time: {stop_time}, query_time_ms: {query_time_ms}, lock_time_ms: {lock_time_ms}, is_killed: {is_killed}
# examined_rows: {examined_rows}, affected_rows: {affected_rows}, sent_rows: {sent_rows}, sent_bytes: {sent_bytes}, 
# database_name: {database_name}, login_user: {login_user}, login_host: {login_host}
# sql_pf_md5: {sql_pf_md5}
# sql_txt_md5: {sql_txt_md5}
# sql_text: {sql_text}
""".format(
            start_time=log_item["start_time"].strftime(default_datetime_format),
            stop_time=log_item["stop_time"].strftime(default_datetime_format),
            sql_txt_md5=hashlib.md5(log_item["sql_text"].encode(encoding='UTF-8')).hexdigest(),
            sql_text=log_item["sql_text"],
            sql_pf=log_item["sql_pf"],
            sql_pf_md5=log_item["sql_pf_md5"],
            query_time_ms=log_item["query_time_ms"],
            login_user=log_item["login_user"],
            login_host=log_item["login_host"],
            is_killed=log_item["is_killed"],
            database_name=log_item["database_name"],
            lock_time_ms=log_item["lock_time_ms"],
            sent_rows=log_item["sent_rows"],
            examined_rows=log_item["examined_rows"],
            affected_rows=log_item["affected_rows"],
            sent_bytes=log_item["sent_bytes"]
        )

    @classmethod
    def get_result_dir_path(cls):
        """
        获取检查结果目录路径
        :return:
        """
        result_path = os.path.join(os.path.curdir, "logs")
        if not os.path.exists(result_path):
            os.makedirs(result_path)
        return result_path

    @classmethod
    def is_check_sql(cls, sql_text: str):
        for tmp_sql in SlowLogConfig.CHECK_SQL_LIST:
            if sql_text.lower().find(tmp_sql.lower()) >= 0:
                return True
        return False

    def check_env(self):
        if not os.path.exists(self.slow_log):
            print("源文件{}不存在，请检查".format(self.slow_log))
            return False
        return True

    def init_sql_top_map(self):
        self.sql_top_map = {}
        for sql_top_type in SlowLogConfig.SQL_TOP_TYPES:
            self.sql_top_map[sql_top_type] = TopMaxHeap(top_size=self.top_sql_size)

    def update_sql_top_map(self, log_item: dict):
        """
        更新SQL统计的汇总数据
        :param log_item: 新增日志记录
        :return:
        """
        for sql_top_type in SlowLogConfig.SQL_TOP_TYPES:
            top_item = TopMaxHeapItem(item_key=log_item[sql_top_type], item_data=log_item)
            self.sql_top_map[sql_top_type].push_item(top_item)

    def update_sql_stats_map(self, log_item: dict):
        """
        更新SQL统计的汇总数据
        :param log_item: 新增日志记录
        :return:
        """
        status_types = [""]
        sql_stats_map = self.sql_stats_map
        sql_pf_md5 = log_item["sql_pf_md5"]
        if sql_pf_md5 not in sql_stats_map.keys():
            stats_item = dict()
            stats_item["sum_exec_count"] = 0
            stats_item["sql_pf"] = log_item["sql_pf"]
            stats_item["sql_pf_md5"] = log_item["sql_pf_md5"]
            stats_item["sql_sample"] = log_item["sql_text"]
            for status_type in SlowLogConfig.SQL_STATS_TYPES.keys():
                stats_item["sum_" + status_type] = 0
                stats_item["avg_" + status_type] = 0
            sql_stats_map[sql_pf_md5] = stats_item
        stats_item = sql_stats_map[sql_pf_md5]
        sum_exec_count = stats_item["sum_exec_count"] + 1
        stats_item["sum_exec_count"] = sum_exec_count
        for status_type in SlowLogConfig.SQL_STATS_TYPES.keys():
            stats_item["sum_" + status_type] += log_item[status_type]
            stats_item["avg_" + status_type] = stats_item["sum_" + status_type] / sum_exec_count

    def get_sql_stats_print_info(self, sql_status_type):
        print_info = []
        sql_map = self.sql_stats_map
        sql_map_items = list(sql_map.values())
        if len(sql_map_items) == 0:
            return print_info
        if sql_status_type not in sql_map_items[0].keys():
            return print_info
        sql_items = sorted(sql_map.values(), key=lambda item: item[sql_status_type], reverse=True)
        total_value = 0
        for sql_item in sql_items:
            total_value += sql_item[sql_status_type]
        for sql_item in sql_items:
            print_info.append("*" * 100 + "")
            if total_value != 0:
                exec_rate = round(sql_item[sql_status_type] * 100.0 / total_value, 2)
            else:
                exec_rate = ""
            print_info.append("执行占比:    {0}% (按{1})".format(exec_rate, sql_status_type))
            print_info.append("指纹SQL:    {0}".format(sql_item["sql_pf"]))
            print_info.append("指纹MD5:    {0}".format(sql_item["sql_pf_md5"]))
            print_info.append("示例SQL:    {0}".format(sql_item["sql_sample"]))
            print_info.append("执行次数:    {0}".format(sql_item["sum_exec_count"]))
            for status_type in SlowLogConfig.SQL_STATS_TYPES.keys():
                print_info.append("平均/汇总{}:    {} / {}".format(
                    SlowLogConfig.SQL_STATS_TYPES[status_type],
                    int(sql_item["avg_" + status_type]),
                    int(sql_item["sum_" + status_type])
                ))
        return print_info

    def check_log_filter(self, log_item):
        lf = self.log_filter
        if lf.start_time <= log_item["stop_time"] <= lf.stop_time \
                and log_item["query_time_ms"] >= lf.min_query_time_ms \
                and log_item["affected_rows"] >= lf.min_affect_rows \
                and log_item["examined_rows"] >= lf.min_examined_rows \
                and log_item["sent_rows"] >= lf.min_sent_rows \
                and log_item["sent_bytes"] >= lf.min_sent_bytes \
                and log_item.get("login_user", "") not in lf.skip_users \
                and log_item.get("login_host", "") not in lf.skip_clients:
            return True
        return False

    def write_cache_logs(self):
        cache_logs = self.get_cache_logs_print_info()
        self.append_file_content(
            file_path=self.format_log_path,
            file_content=cache_logs
        )

    def scan_slow_logs(self):
        loop_index = 0
        log_item = {}
        sql_text_lines = []

        with open(file=self.slow_log, mode="rb") as slh:
            for line in slh:
                try:
                    line = line.decode("utf-8")
                except Exception as ex:
                    logger.info(str(ex))
                    continue
                if not line:
                    break
                loop_index += 1
                if loop_index % 100 == 0:
                    logger.info("处理第{}行记录".format(loop_index))
                if len(self.cache_log_items) > SlowLogConfig.TMP_CACHE_LOG_NUMBER:
                    logger.info("write to format log")
                    self.write_cache_logs()
                    self.cache_log_items = []
                if line.startswith("SET timestamp="):
                    log_item["timestamp"] = line.replace("SET timestamp=", "")
                    sql_text_lines = []
                    continue
                if re.match(SlowLogConfig.USE_DB_PATTERN, line.strip(), re.IGNORECASE):
                    continue
                if line.startswith("# User@Host"):
                    # 新一条慢SQL开始,处理上一条记录
                    log_item["sql_text"] = " ".join(sql_text_lines)
                    sql_text_len = len(log_item["sql_text"])
                    if sql_text_len > self.big_sql_size:
                        self.big_sql_list.append(sql_text_len)
                        log_item = None
                    else:
                        log_item = self.format_log_item(log_item)
                    if (log_item is not None) and self.check_log_filter(log_item):
                        self.update_sql_stats_map(log_item)
                        self.update_sql_top_map(log_item)
                        self.cache_log_items.append(log_item)
                    # 开始新的慢SQL
                    log_item = {}
                    sql_text_lines = []
                    self.spilt_sql_line_to_dict(sql_line=line, log_item=log_item)
                    continue
                if line.startswith("# Time:"):
                    continue
                if line.startswith("# Schema:") or line.startswith("# Query_time:") or line.startswith("# Bytes_sent:"):
                    self.spilt_sql_line_to_dict(sql_line=line, log_item=log_item)
                    continue
                if not line.startswith("#"):
                    sql_text_lines.append(line)
        self.write_cache_logs()

    def create_status_files(self):
        tmp_stats_list = ["sum_exec_count"]
        for sql_status_type in SlowLogConfig.SQL_STATS_TYPES.keys():
            tmp_stats_list.append("sum_" + sql_status_type)
            tmp_stats_list.append("avg_" + sql_status_type)

        for sql_status_type in tmp_stats_list:
            print_info = self.get_sql_stats_print_info(
                sql_status_type=sql_status_type
            )
            static_log_path = os.path.join(
                self.result_path,
                self.check_time_str + "_stats_by_{}.txt".format(sql_status_type))
            self.result_files.append(static_log_path)
            self.append_file_content(
                file_path=static_log_path,
                file_content="\n".join(print_info)
            )

    def create_top_file(self):
        for sql_top_type in SlowLogConfig.SQL_TOP_TYPES:
            top_log_path = os.path.join(
                self.result_path,
                self.check_time_str + "_top_sql_by_{}.txt".format(sql_top_type))
            top_logs = self.get_top_log_print_info(sql_top_type)
            self.append_file_content(
                file_path=top_log_path,
                file_content=top_logs
            )
            self.result_files.append(top_log_path)

    def get_top_log_print_info(self, sql_top_type):
        cache_strings = []
        top_item_heap = self.sql_top_map[sql_top_type]
        top_items = top_item_heap.get_top_items()
        for top_item in top_items:
            log_item = top_item.item_data
            cache_strings.append(self.get_log_item_print_info(log_item))
            cache_strings.append("\r")
        return "".join(cache_strings)

    def format_log_item(self, log_item: dict):
        database_name = log_item.get("Schema", "")
        sql_text = self.format_sql_text(log_item["sql_text"], database_name)
        sql_pf = SQLFingerPrint.get_finger_print(sql_text)
        sql_pf_md5 = hashlib.md5(sql_pf.encode(encoding='UTF-8')).hexdigest()
        if "timestamp" in log_item.keys():
            stop_time = datetime.datetime.fromtimestamp(int(log_item["timestamp"].replace(";", "").replace("\n", "")))
            query_seconds = float(log_item["Query_time"])
            start_time = stop_time - datetime.timedelta(seconds=query_seconds)
            return {
                "start_time": start_time,
                "stop_time": stop_time,
                "sql_text": sql_text,
                "sql_pf": sql_pf,
                "sql_pf_md5": sql_pf_md5,
                "login_user": log_item.get("User", "").strip(),
                "login_host": log_item.get("Host", "").strip(),
                "is_killed": log_item.get("Killed", "0").strip(),
                "database_name": database_name,
                "query_time_ms": int(query_seconds * 1000),
                "lock_time_ms": int(float(log_item.get("Lock_time", "0")) * 1000),
                "sent_rows": float(log_item.get("Rows_sent", "0")),
                "examined_rows": float(log_item.get("Rows_examined", "0")),
                "affected_rows": float(log_item.get("Rows_affected", "0")),
                "sent_bytes": float(log_item.get("Bytes_sent", "0")),
            }
        return None

    def get_cache_logs_print_info(self):
        cache_strings = []
        for log_item in self.cache_log_items:
            cache_strings.append(self.get_log_item_print_info(log_item))
            cache_strings.append("\r")
        return "".join(cache_strings)

    def show_result_files(self):
        logger.info("*" * 40)
        logger.info("处理结果存入下列文件")
        for result_file in self.result_files:
            logger.info(result_file)
        logger.info("*" * 40)

    def show_big_sql_info(self):
        big_sql_list_len = len(self.big_sql_list)
        if big_sql_list_len > 0:
            logger.info(
                "共找到{}个长度大于{}的慢SQL,最长SQL长度为：".format(
                    big_sql_list_len,
                    self.big_sql_size,
                    max(self.big_sql_list)
                )
            )

    def check_logs(self):
        if not self.check_env():
            return
        self.scan_slow_logs()
        self.result_files.append(self.format_log_path)
        self.create_status_files()
        self.create_top_file()
        self.show_result_files()
        self.show_big_sql_info()


def init_logger(with_debug: bool = False):
    if with_debug:
        LoggerHelper.init_logger(logger_level=logging.DEBUG)
    else:
        LoggerHelper.init_logger(logger_level=logging.INFO)
    global logger
    logger = LoggerHelper.get_logger()


def main(slow_log: str, start_time: datetime.datetime = None, stop_time: datetime.datetime = None,
         min_examined_rows: int = 0, min_affect_rows: int = 0, min_query_time_ms: int = 0,
         min_sent_rows: int = 0, min_sent_bytes: int = 0, top_sql_size: int = 20,
         skip_users: str = "root,admin,dba", skip_clients: str = "127.0.0.1,localhost", with_debug: bool = False):
    """
    MySQL 慢日志解析分析工具
    :param slow_log: 慢日志路径
    :param start_time: 开始时间
    :param stop_time: 结束时间
    :param min_examined_rows: 最小预估行数, 小于该值的请求被忽略, 默认为0
    :param min_affect_rows:  最小影响行数, 小于该值的请求被忽略, 默认为0
    :param min_query_time_ms: 最小查询时间(毫秒), 小于该值的请求被忽略，默认为0
    :param min_sent_rows: 最小发送行数, 小于该值的请求被忽略, 默认为0
    :param min_sent_bytes: 最小发送字节数, 小于该值的请求被忽略, 默认为0
    :param top_sql_size: TOP SQL数量, 默认为20
    :param skip_users: 忽略用户列表, 使用单引号分割, 默认为:"root,admin,dba"
    :param skip_clients: 忽略客户端列表, 使用单引号分割, 默认为:“127.0.0.1,localhost”
    :param with_debug: 是否开启DEBUG,默认为False
    :return:
    """
    init_logger(with_debug=with_debug)
    logger.info("处理开始。。。")
    log_filter = SlowLogFilter(
        start_time=start_time,
        stop_time=stop_time,
        min_examined_rows=min_examined_rows,
        min_affect_rows=min_affect_rows,
        min_query_time_ms=min_query_time_ms,
        min_sent_rows=min_sent_rows,
        min_sent_bytes=min_sent_bytes,
        skip_users=skip_users,
        skip_clients=skip_clients
    )
    slh = SlowLogHelper(
        slow_log=slow_log,
        top_sql_size=top_sql_size,
        log_filter=log_filter
    )
    slh.check_logs()
    logger.info("处理完成。。。")


if __name__ == '__main__':
    fire.Fire(main)
