import json
import os
import pickle
import time
import warnings
from collections import Counter
import pandas as pd
import sys
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.parent))
sys.path.append(str(pathlib.Path(__file__).parent))
from drain3 import TemplateMiner

warnings.filterwarnings('ignore')
warnings.filterwarnings('ignore')

def hash_decimal_to_hex(decimal):
    # 使用Python内置哈希函数将整数哈希为一个大整数
    hashed_decimal = abs(hash(str(decimal)))
    # 将大整数转换为16进制字符串
    hex_string = hex(hashed_decimal)
    # 取字符串末尾8个字符作为哈希值，即一个长度为8的16进制数
    hash_value = hex_string[:8]
    # 将16进制数转换为整数并返回
    return hash_value

# 将模板中连续的<*>合并成一个  以及忽略给定字符
def merge_token(in_str, ignores=[]):
    out_list = []
    for ig in ignores:
        in_str = in_str.replace(ig, ' ')
    in_splits = [item.strip() for item in in_str.split('<*>')]
    if in_str.strip() == '<*>' or len(in_splits) <= 1:
        return in_str.strip()
    for item in in_splits:
        if len(out_list) > 0 and item == '':
            continue
        out_list.append(item)
    if in_splits[-1] == '':
        out_list.append('')
    return ' <*> '.join(out_list).strip()

# 括号包含的内容mask
def bracked_mask(in_str: str):
    bracket_stack = []
    param_list = []
    merge_pos = []
    masked_str = in_str
    pair_pos = []
    lefts, rights = ['[', '{'], [']', '}']
    pairs = list(zip(lefts, rights))
    for i in range(len(in_str)):
        if in_str[i] in lefts:
            bracket_stack.append(i)
        elif in_str[i] in rights and len(bracket_stack) > 0:
            if (in_str[bracket_stack[-1]], in_str[i]) in pairs:
                pair_pos.append((bracket_stack[-1], i))
                bracket_stack.pop()
    if len(bracket_stack) > 0:
        return '-1'
    right = -1
    for item in sorted(pair_pos):
        if item[1] >= right:
            merge_pos.append(item)
            right = item[1]
    for pair in sorted(merge_pos, reverse=True):
        # param_list.insert(0, masked_str[pair[0]: pair[1] + 1])
        masked_str = masked_str.replace(in_str[pair[0]: pair[1] + 1], '<*>')
    return masked_str

# 日志前后无效字符去除  大小写统一  双引号去除
def preprocess_str(in_str):
    in_str = in_str.strip().lower().replace('\"', '')
    return in_str.strip()

def extract_templates(log_df, res_dir_drain, processed_log_dir):
    miner_path = f'{res_dir_drain}/template_miner.pkl'
    processed_log_path = f'{processed_log_dir}/template_logstash-service.csv'

    processed_df = log_df
    # 
    if os.path.exists(miner_path):
        f = open(miner_path, 'rb')
        template_miner = pickle.loads(f.read())
        print('load existed template miner')
    else:
        template_miner = TemplateMiner()
        print('start fit')
        start = time.time()
        for _, row in processed_df.iterrows():
            template_miner.add_log_message(row['message'])
            if (_ % 10000 == 0): print(f'fit: {_}/{len(processed_df)}...')
        end = time.time()
        print(f'end fit, using: {end-start}')
        if not os.path.exists(res_dir_drain):
            os.makedirs(res_dir_drain)
        f = open(miner_path, 'wb')
        f.write(pickle.dumps(template_miner))
        f.close()
    # 
    print('start match')
    if_add_new_log = False
    start = time.time()
    templates = []
    for _, row in processed_df.iterrows():
        try:
            cluster = template_miner.match(row['message'])
            template = hash_decimal_to_hex(template_miner.match(row['message']).cluster_id)
        except:
            print('add new log to template miner')
            if_add_new_log = True
            template_miner.add_log_message(row['message'])
            template = hash_decimal_to_hex(template_miner.match(row['message']).cluster_id)

        templates.append(template)
        if (_ % 10000 == 0): print(f'match: {_}/{len(processed_df)}...')
    end = time.time()
    print(f'end match, using: {end-start}')
    if not os.path.exists(processed_log_dir):
        os.makedirs(processed_log_dir)
    processed_df['template_id'] = templates
    processed_df.to_csv(processed_log_path, index=False)
    # with open(templates_path, 'w') as f:
    #     json.dump(templates, f)
    if os.path.exists(miner_path) and if_add_new_log:
        f = open(miner_path, 'wb')
        f.write(pickle.dumps(template_miner))
        f.close()



def log_extract_main(origin_dataset_path: pathlib.Path):
    # origin_dataset_path 为原始数据集的路径
    res_dir_drain = origin_dataset_path / f'log/template_miner/'
    base_path = origin_dataset_path/f'log/'
    log_df = pd.DataFrame()
    for f in os.listdir(base_path):
        if "log_logstash-service" in f:
            log_df = log_df.append(pd.read_csv(base_path/f))

    processed_log_dir = origin_dataset_path / f'log'
    extract_templates(log_df, res_dir_drain, processed_log_dir)