'''
日志关键字匹配
'''
import json
import os
import re
from enum import Enum
from collections import namedtuple
from typing import List

class GlobalCofnig(Enum):
    # 重复日志的预警界限
    repeat_threshold = 20

    # 日志存储时长(天)
    log_storage_duration_threshold = 3

class FileParserName(Enum):
    runlog_parser = "runlog_parser" # 按照一定的格式解析
    general_line_parser = "general_line_parser"  # 通用的按行解析
    # exp_parser = "exp_parser"

class FilterName(Enum):
    key_word_filter = "key_word_filter"                     # 关键字过滤
    repeat_count = "repeat_count"                           # 重复内容过滤
    file_storage_duration = "file_storage_duration"         # 文件存储时长过滤

# 关键字匹配方式：普通字符串或者正则
class KeyWordMatchMethod(Enum):
    normal = 'normal' # 按照正常字符串解析
    re = 're' # 按照正则表达式解析

# 关键字列表按照字符匹配，模式是全匹配还是任一匹配
# 正则匹配，模式是匹配到第一个还是匹配全部
class MatchStyle(Enum):
    AnyOf = 'Anyof' # 任一匹配
    All = 'All' #  全匹配

# 日志每行的匹配名称
class LogLineSplitName(Enum):
    happen_time = 'happen_time'
    level = 'level'
    module_id = 'module_id'
    record_file = 'record_file'
    record_line_number = 'record_line_number'
    content = 'content'


file_config_key = "source_files_config"
log_file_path_remote_key = 'log_file_path_remote'
log_file_name_key = 'log_file_name'
log_parser_key = 'log_parser'
FileConfigFileds = namedtuple('FileConfigFileds', ['filed_name', 'filed_if_must', 'value_list'])
file_conf_list = [
    FileConfigFileds(log_file_path_remote_key, True, None),
    FileConfigFileds(log_file_name_key, True, None),
    FileConfigFileds(log_parser_key, True, list(FileParserName._value2member_map_.keys()))
]

class FileConfigItem:
    def __init__(self, log_file_path_remote, log_file_name, log_parser) -> None:
        self.log_file_path_remote = log_file_path_remote
        self.log_file_name = log_file_name
        self.log_parser = log_parser

class FileConfig:
    def __init__(self) -> None:
        self.file_config_items:List[FileConfigItem] = []

    def load(self, config_path) -> None:
        data = self.load_file(config_path)
        self.check_config(data)
        self.parse(data)
    
    def load_file(self, config_path) -> dict:
        data = {}
        try:
            with open(config_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
        except:
            raise Exception("配置文件读取异常")
        
        return data
    
    def parse(self, data:dict) -> None:
        """解析配置文件中的数据

        Args:
            data (dict): 待解析的配置文件字典，从文件中提取
        """
        for item in data[file_config_key]:
            self.file_config_items.append(FileConfigItem(item[log_file_path_remote_key], item[log_file_name_key], item[log_parser_key]))

    def get_parsers(self):
        parsers = {}
        for item in self.file_config_items:
            parsers[item.log_parser] = 0
        return parsers
    
    def check_config(self, data:dict) -> None:
        """对配置进行检查，主要检查文件是否异常/必须的配置项是否存在/关键配置项的内容是否符合要求

        Args:
            data (dict): 从文件中提取的配置字典
        """
        file_config_key = "source_files_config"
        if file_config_key not in data.keys():
            raise Exception("文件配置错误:必须以source_files_config作为索引")
        
        fields = data[file_config_key]
        if len(fields) == 0:
            raise Exception("文件配置错误:配置为空")
        
        for item in fields:
            for filed in file_conf_list:
                if filed.filed_if_must:
                    if filed.filed_name not in item.keys():
                        raise Exception(f"文件配置错误:没有找到关键字{filed.filed_name}")
                    if filed.value_list and item[filed.filed_name] not in filed.value_list:
                        raise Exception(f"不支持{item[filed.filed_name]}，支持列表为:{' '.join(filed.value_list)}")

key_word_config_key = "key_words_config"
key_word_name_key = 'key_item_name'
file_name_of_produce_key_words_key = 'file_name_of_produce_key_words'
module_name_of_produce_key_words_key = 'module_name_of_produce_key_words'
key_words_method_key = 'key_words_method'
key_words_list_key = 'key_words_list'
key_words_list_match_style_key = 'key_words_list_match_style'
key_words_re_key = 'key_words_re'
key_words_re_match_style_key = 'key_words_re_match_style'
associated_parser_key = 'associated_parser'
KeyWordConfigFileds = namedtuple('KeyWordConfigFileds', ['filed_name', 'filed_if_must', 'value_list', 'associate_filed_name', 'associate_filed_value'])
key_word_conf_list = [
    KeyWordConfigFileds(key_word_name_key, True, None, None, None),
    KeyWordConfigFileds(file_name_of_produce_key_words_key, False, None, None, None),
    KeyWordConfigFileds(module_name_of_produce_key_words_key, False, None, None, None),
    KeyWordConfigFileds(key_words_method_key, True, None, None, None),
    KeyWordConfigFileds(key_words_list_key, False, None, key_words_method_key, KeyWordMatchMethod.normal.name),
    KeyWordConfigFileds(key_words_list_match_style_key, False, list(MatchStyle._value2member_map_.keys()), key_words_method_key, KeyWordMatchMethod.normal.name),
    KeyWordConfigFileds(key_words_re_key, False, None, key_words_method_key, KeyWordMatchMethod.re.name),
    KeyWordConfigFileds(key_words_re_match_style_key, False, list(MatchStyle._value2member_map_.keys()), key_words_method_key, KeyWordMatchMethod.re.name),
    KeyWordConfigFileds(associated_parser_key, True, list(FileParserName._value2member_map_.keys()), None, None)
]

class KeyWordConfigItem:
    def __init__(self, key_word_name, file_name_of_produce_key_words, module_name_of_produce_key_words,
                 key_words_method, key_words_list, key_words_list_match_style, key_words_re,
                 key_words_re_match_style, associated_parser) -> None:
        self.key_word_name = key_word_name
        self.file_name_of_produce_key_words = file_name_of_produce_key_words
        self.module_name_of_produce_key_words = module_name_of_produce_key_words
        self.key_words_method = key_words_method
        self.key_words_list = key_words_list
        self.key_words_list_match_style = key_words_list_match_style
        self.key_words_re = key_words_re
        self.key_words_re_match_style = key_words_re_match_style
        self.associated_parser = associated_parser

class KeyWordConfig:
    """关键字文件解析
    """
    def __init__(self) -> None:
        self.key_word_config_items:List[KeyWordConfigItem] = []

    def load(self, config_path) -> None:
        data = self.load_file(config_path)
        self.check_config(data)
        self.parse(data)
    
    def load_file(self, config_path) -> dict:
        data = {}
        try:
            with open(config_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
        except:
            raise Exception("配置文件读取异常")
        
        return data
    
    def parse(self, data:dict) -> None:
        """解析配置文件中的数据

        Args:
            data (dict): 待解析的配置文件字典，从文件中提取
        """
        for item in data[key_word_config_key]:
            self.key_word_config_items.append(KeyWordConfigItem(
                                                            None if key_word_name_key not in item.keys() else item[key_word_name_key], 
                                                            None if file_name_of_produce_key_words_key not in item.keys() else item[file_name_of_produce_key_words_key], 
                                                            None if module_name_of_produce_key_words_key not in item.keys() else item[module_name_of_produce_key_words_key], 
                                                            None if key_words_method_key not in item.keys() else item[key_words_method_key], 
                                                            None if key_words_list_key not in item.keys() else item[key_words_list_key], 
                                                            None if key_words_list_match_style_key not in item.keys() else item[key_words_list_match_style_key], 
                                                            None if key_words_re_key not in item.keys() else item[key_words_re_key], 
                                                            None if key_words_re_match_style_key not in item.keys() else item[key_words_re_match_style_key], 
                                                            None if associated_parser_key not in item.keys() else item[associated_parser_key]
                                                            ))
    
    def get_filters(self):
        return self.key_word_config_items
    
    def check_config(self, data:dict) -> None:
        """对配置进行检查，主要检查文件是否异常/必须的配置项是否存在/关键配置项的内容是否符合要求

        Args:
            data (dict): 从文件中提取的配置字典
        """
        kew_word_config_key = "key_words_config"
        if kew_word_config_key not in data.keys():
            raise Exception(f"文件配置错误:必须以{kew_word_config_key}作为索引")
        
        fields = data[kew_word_config_key]
        if len(fields) == 0:
            raise Exception("文件配置错误:配置为空")
        
        cnt = 0
        for item in fields:
            for filed in key_word_conf_list:
                if filed.filed_if_must:
                    if filed.filed_name not in item.keys():
                        raise Exception(f"文件配置错误:没有找到关键字{filed.filed_name}")
                    if filed.value_list and item[filed.filed_name] not in filed.value_list:
                        raise Exception(f"{filed.filed_name}不支持{item[filed.filed_name]}，支持列表为:{' '.join(filed.value_list)}")
                
                if filed.associate_filed_name:
                    if filed.associate_filed_value is None or filed.associate_filed_value == item[filed.associate_filed_name]:
                        if filed.filed_name not in item.keys() or item[filed.filed_name] is None:
                            raise Exception(f"文件配置错误:关键字{filed.associate_filed_name}配置或其值为[{filed.associate_filed_value}]的情况下，也必须配置关键字{filed.filed_name}")
                        
                        if filed.value_list and item[filed.filed_name] not in filed.value_list:
                            raise Exception(f"{filed.filed_name}不支持{item[filed.filed_name]}，支持列表为:{' '.join(filed.value_list)}") 


class Filter:
    """过滤器，定义了如何过滤关键词的方法
    每个过滤器都跟一个文件解析器管理
    """
    def get_name(self):
        raise Exception("需要初始化一个实际的过滤器")
    
    def get_type(self):
        raise Exception("需要初始化一个实际的过滤器")
    
    def get_result(self):
        raise Exception("需要初始化一个实际的过滤器")
    
    def match(self, file, content, **kwargs):
        raise Exception("需要初始化一个实际的过滤器")

class KeyWordFilter(Filter):
    def __init__(self, key_word_item: KeyWordConfigItem) -> None:
        self.key_word_item = key_word_item
        self.filter_result = {}
        super().__init__()
    
    def get_type(self):
        """获取过滤器的类型
        """
        return FilterName.key_word_filter.value
    
    def get_name(self):
        """获取过滤器的名称/描述
        """
        return self.key_word_item.key_word_name
    
    def match(self, file, content, **kwargs):
        if self.key_word_item.file_name_of_produce_key_words is not None and LogLineSplitName.record_file.value in kwargs.keys():
            if self.key_word_item.file_name_of_produce_key_words != kwargs[LogLineSplitName.record_file.value]:
                return
        
        if self.key_word_item.module_name_of_produce_key_words is not None and LogLineSplitName.module_id.value in kwargs.keys():
            if self.key_word_item.module_name_of_produce_key_words != kwargs[LogLineSplitName.module_id.value]:
                return
        
        result = False
        if self.key_word_item.key_words_method == KeyWordMatchMethod.normal.value:
            result = self.match_by_naive_serach(content)
        elif self.key_word_item.key_words_method == KeyWordMatchMethod.re.value:
            result = self.match_by_re(content)
        
        if result:
            if file not in self.filter_result.keys():
                self.filter_result[file] = [(content, kwargs)]
            else:
                self.filter_result[file].append((content, kwargs))

    def match_by_naive_serach(self, content) -> bool:
        """按照普通模式进行搜索（直接搜索字符串是否存在）
        """
        # 忽略大小写，忽略待匹配字符列表的前后空格
        content_lower = str(content).lower()
        if self.key_word_item.key_words_list_match_style == MatchStyle.AnyOf.value:
            # 关键字匹配到任意一个就成功
            for kw in self.key_word_item.key_words_list:
                if str(kw).lower().strip() in content_lower:
                    return True
        elif self.key_word_item.key_words_list_match_style == MatchStyle.All.value:
            # 匹配所有关键字才成功
            for kw in self.key_word_item.key_words_list:
                if str(kw).lower().strip() not in content_lower:
                    return False
            return True
        return False

    def match_by_re(self, content) -> bool:
        """按照正则表达式进行匹配
        """
        # pattern = self.key_word_item.key_words_re
        if self.key_word_item.key_words_re_match_style == MatchStyle.AnyOf.value:
            for kw_re in self.key_word_item.key_words_re:
                res = re.search(kw_re, content, re.IGNORECASE)
                if res:
                    return True
        elif self.key_word_item.key_words_re_match_style == MatchStyle.All.value:
            for kw_re in self.key_word_item.key_words_re:
                res = re.search(kw_re, content, re.IGNORECASE)
                if not res:
                    return False
            return True
        return False

    def get_result(self):
        return self.filter_result

class RepeatCountFilter(Filter):
    """
    重复内容过滤器, 过滤规则为:
    按照每行进行统计, 只匹配相同的内容, 忽略其他记录信息
    """
    def __init__(self, key_word_item: KeyWordConfigItem = None) -> None:
        self.key_word_item = key_word_item
        self.filter_result = {}
        super().__init__()

    def get_name(self):
        return "重复内容"
    
    def get_type(self):
        return FilterName.repeat_count.value
    
    def get_result(self):
        # 去除掉重复较低的内容
        delete_list = []
        for file, contents in self.filter_result.items():
            for content, count in contents.items():
                if count < GlobalCofnig.repeat_threshold.value:
                    delete_list.append((file, content))
        for delete_item in delete_list:
            self.filter_result[delete_item[0]].pop(delete_item[1])
        return self.filter_result
    
    def match(self, file, content, **kwargs):
        if file not in self.filter_result.keys():
            self.filter_result[file] = {}
        content_strip = content.strip()
        if content_strip not in self.filter_result[file].keys():
            self.filter_result[file][content_strip] = 1
        else:
            self.filter_result[file][content_strip] += 1

class FileStorageDurationFilter(Filter):
    """
    日志存储时长判定
    根据日志规格,如果日志已经记录到最大规格,则判定日志中最新的时间和最老时间的差值
    """
    def __init__(self, key_word_item: KeyWordConfigItem = None) -> None:
        self.key_word_item = key_word_item
        self.filter_result = {}
        self.newest_date = None
        self.latest_date = None
        super().__init__()

    def get_name(self):
        return "文件存储时长"
    
    def get_type(self):
        return FilterName.file_storage_duration.value
    
    def get_result(self):
        return self.filter_result
    
    def match(self, file, content, **kwargs):
        if LogLineSplitName.happen_time.value in kwargs.keys():
            happen_time = kwargs[LogLineSplitName.happen_time.value]
            if self.latest_date is None:
                self.latest_date = happen_time
            else:
                if self.newest_date is None:
                    self.newest_date = happen_time
                    # 判定大小
                else:
                    # 判定happen_time和self.latest_date以及self.newest_date的大小
                    pass

class Parser:
    """文件解析器，定义了对文件的解析方式
    不同的文件在file_config.json中定义了解析器类型
    而在key_word_config.json中定义了每个关键字的过滤器filter,每个过滤器都会与一个文件解析器关联
    当文件解析器在解析文件时，会自动调用过滤器进行关键字过滤
    """
    pass

class RunlogParser(Parser):
    def __init__(self) -> None:
        super().__init__()
    
    def get_name(self):
        return FileParserName.runlog_parser.value
    
    def parse(self, file_path: str, filters: List[Filter]):
        """解析指定文件，并通过过滤器过滤关键字

        Args:
            file_path (str): 待读取的文件路径
            filters (List[Filter]): 对于文件使用的关键字过滤器
        """
        with open(file_path, 'r', encoding='utf-8') as f:
            for line in f:
                line_sub = re.sub(r'\s+', ' ', line) # 去除连续的空格，只保留一个
                # 提取行信息
                #pattern = r'^(?P<happen_time>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}) (?P<level>.*?) (?P<module_id>.*?) (?P<record_file>.*?)\[(?P<record_line_number>.*?)\]:(?P<content>.*)'
                pattern = r'(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}) (.*?) (.*?) (.*?)\[(.*?)\]\s*:(.*)'
                res = re.match(pattern, line_sub)
                if not res:
                    # 行匹配错误，可能需要记录下来，说明日志记录不满足格式要求
                    continue
                happen_time, level, module_id, record_file, record_line_number, content = res.groups()
                record_file = record_file.strip()
                record_line_number = record_line_number.strip()
                args = {
                    LogLineSplitName.happen_time.value: happen_time,
                    LogLineSplitName.level.value: level,
                    LogLineSplitName.module_id.value: module_id,
                    LogLineSplitName.record_file.value: record_file,
                    LogLineSplitName.record_line_number.value: record_line_number,
                }
                # 运行日志，对每一行应用关键字匹配
                for filter in filters:
                    filter.match(os.path.basename(file_path), content, **args)
    

class KeyWordManager:
    def __init__(self) -> None:
        self.file_config = None
        self.key_word_config = None
        self.parsers = {}
        self.filters = {} # 根据文件解析器进行索引
    
    def load(self, file_config: FileConfig, key_word_config: KeyWordConfig):
        self.file_config = file_config
        self.create_parsers_from_config()

        self.key_word_config = key_word_config
        self.create_filters_from_config()

    def run(self):
        # 需要加载配置
        if self.file_config is None or self.key_word_config is None:
            raise Exception("运行前请使用load函数加载文件配置和关键字配置!")
        
        # 对每一个文件进行解析
        for fc in self.file_config.file_config_items:
            # 先从远程主机拉取文件
            local_file_path = self.pull_file(fc.log_file_path_remote, fc.log_file_name)
            parser = self.parsers[fc.log_parser]
            parser.parse(local_file_path, self.filters[fc.log_parser])
    
    def collect_result(self):
        """收集结果，生成扫描报告摘要
        扫描摘要的格式：
        过滤器名称1:
            过滤的文件1:过滤结果1,过滤结果2,过滤结果3...过滤结果N
            扫描统计：
                文件1扫描出X个结果,最早发生于XXX,最晚发生于XXX
        过滤器名称2:
            ……
        """
        result = {}
        for filters in self.filters.values():
            for filter in filters:
                filter_name = filter.get_name()
                filter_type = filter.get_type()
                org_content = 'content'
                summary_key = 'summary'
                filter_result = filter.get_result()
                result[filter_name] = {org_content: filter_result, summary_key: []}
                if filter_type == FilterName.repeat_count.value:
                    for file, counts in filter_result.items():
                        for content, count in counts.items():
                            result[filter_name][summary_key].append(f'从文件{file}中扫描出内容[{content}]重复了{count}次,预警线是{GlobalCofnig.repeat_threshold.value}次')
                else: # 其他通用匹配器
                    for file_path, results in filter_result.items():
                        date_time = [result[1][LogLineSplitName.happen_time.value] for result in results]
                        date_time_sorted = sorted(date_time)
                        result[filter_name][summary_key].append(f'从文件{file_path}中扫描出{len(results)}处结果，最早发生时间为{date_time_sorted[0]},最晚发生时间为{date_time_sorted[-1]}')
        return result
    
    def pull_file(self, remote_dir, file_name):
        # 实际可能是通过ssh(scp)/sftp或者其他方式从远程拉取文件
        # 返回存放本地的文件全路径
        # file_path = os.path.join(remote_dir, file_name)
        return file_name

    def create_parsers_from_config(self):
        parser_names = self.file_config.get_parsers()
        for name in parser_names.keys():
            self.parsers[name] = self.create_parser_by_name(name)
    
    def create_parser_by_name(self, name) -> Parser:
        if name == FileParserName.runlog_parser.value:
            return RunlogParser()
        else:
            pass
    
    def create_filters_from_config(self):
        for item in self.key_word_config.get_filters():
            parser_name = item.associated_parser
            if parser_name not in self.filters.keys():
                self.filters[parser_name] = [self.create_filter_by_name(FilterName.key_word_filter.value, item)]
            else:
                self.filters[parser_name].append(self.create_filter_by_name(FilterName.key_word_filter.value, item))
    
    def create_filter_by_name(self, filter_name, arg) -> Filter:
        if filter_name == FilterName.key_word_filter.value:
            return KeyWordFilter(arg)
        else:
            return None
    
    def attach_filter_to_parser(self, parser_name, filter: Filter):
        """对于一些公共的过滤器，可以通过此函数进行注册

        Args:
            parser_name (_type_): 文件解析器名称
            filter (Filter): 过滤器实例
        """
        if parser_name not in self.filters.keys():
            self.filters[parser_name] = [filter]
        else:
            self.filters[parser_name].append(filter)


if __name__ == '__main__':
    # 读取日志文件列表
    # 从远程拉取文件到近端
    # 运行日志解析器，解析文件
    # 解析过程中，通过关键字等过滤器进行过滤
    # 获取最终的结果，判定是否需要告警
    pass