import json
import logging
import os
import subprocess
import sys
import time
from os.path import dirname

from drain3 import TemplateMiner
from drain3.template_miner_config import TemplateMinerConfig


def hash_decimal_to_hex(decimal):
    # 使用Python内置哈希函数将整数哈希为一个大整数
    hashed_decimal = abs(hash(str(decimal)))
    # 将大整数转换为16进制字符串
    hex_string = hex(hashed_decimal)
    # 取字符串末尾8个字符作为哈希值，即一个长度为8的16进制数
    hash_value = hex_string[:8]
    # 将16进制数转换为整数并返回
    return hash_value
    
def get_drain_template(log_df):
    logger = logging.getLogger(__name__)
    logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(message)s')

    config = TemplateMinerConfig()
    config.load("drain3.ini")
    config.profiling_enabled = True
    template_miner = TemplateMiner(config=config)

    line_count = 0

    lines = log_df['message'].tolist()

    start_time = time.time()
    batch_start_time = start_time
    batch_size = 10000
    result_json_list = []
    for line in lines:
        line = line.rstrip()
        line = line.split(',',5)[-1]
        result = template_miner.add_log_message(line)
        line_count += 1
        if line_count % batch_size == 0:
            time_took = time.time() - batch_start_time
            rate = batch_size / time_took
            batch_start_time = time.time()
        if result["change_type"] != "none":
            result_json = json.dumps(result)
        result_json_list.append(result)

    time_took = time.time() - start_time
    rate = line_count / time_took

    sorted_clusters = sorted(template_miner.drain.clusters, key=lambda it: it.size, reverse=True)
    for cluster in sorted_clusters:
        logger.info(cluster)

    # with open('logstash_structured.json', 'w+') as f:
    #     json.dump(result_json_list, f)

    # with open('logstash_templates.txt', 'w+') as f:
    #     for cluster in sorted_clusters:
    #         f.write(str(cluster))
    #         f.write('\n')

    EID_list = []        
    for logdict in result_json_list:
        EID_list.append(hash_decimal_to_hex(logdict['cluster_id']))

    log_df['template_id'] = EID_list

    return log_df

base_path = '../origin_data/demo/log/'
log_df = pd.read_csv(base_path+'log_logstash-service.csv')
template_df = get_drain_template(log_df)
template_df.to_csv(base_path+'template_logstash-service.csv', index=None)