import sys
sys.path.append('../')

import os
import re
import json
import pandas as pd
from collections import defaultdict
from tqdm import tqdm
import numpy as np
from logparser import Spell, Drain
import torch
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity, euclidean_distances

# get [log key, delta time] as input for deeplog
# input_dir  = os.path.expanduser('~/.dataset/hdfs/')
input_dir = "../../logbert/dataset/hdfs/"
output_dir = '../output/hdfs/'  # The output directory of parsing results
log_file   = "HDFS.log"  # The input log file name
process_evolution = True

log_structured_file = output_dir + log_file + "_structured.csv"
log_templates_file = output_dir + log_file + "_templates.csv"
log_sequence_file = output_dir + "hdfs_sequence.csv"

def find_most_similar_eventId(df_templates, df_templates_evo):
    # 构造存储 eventId 对应关系的字典
    eventId_mapping = {}
    
    # 将 Vector 列转为 numpy 数组以便计算相似度
    df_templates['Vector'] = df_templates['Vector'].apply(np.array)
    df_templates_evo['Vector'] = df_templates_evo['Vector'].apply(np.array)
    print(type(df_templates_evo['Vector'].iloc[0]))
    
    for index, row in df_templates_evo.iterrows():
        evo_eventId = row['EventId']
        evo_vector = row['Vector']
        
        # 检查 eventId 是否在 df_templates 中
        if evo_eventId not in df_templates['EventId'].values:
            # 计算余弦相似度和欧几里得距离
            cosine_similarities = cosine_similarity([evo_vector], df_templates['Vector'].tolist()).flatten()
            distances = euclidean_distances([evo_vector], df_templates['Vector'].tolist()).flatten()
            combined_similarity = distances - cosine_similarities
            
            # 找到相似度最高的模板对应的 eventId
            max_similarity_index = np.argmin(combined_similarity)
            most_similar_eventId = df_templates.iloc[max_similarity_index]['EventId']

            # 构造 eventId 对应关系
            eventId_mapping[evo_eventId] = most_similar_eventId
    for key in eventId_mapping:
        if key != eventId_mapping[key]:
            print("eventId_mapping: ",key, eventId_mapping[key])
            
    return eventId_mapping

def mapping():
    log_temp = pd.read_csv(log_templates_file)
    log_temp.sort_values(by = ["Occurrences"], ascending=False, inplace=True)
    log_temp_dict = {event: idx+1 for idx , event in enumerate(list(log_temp["EventId"])) }
    print(log_temp_dict)
    with open (output_dir + "hdfs_log_templates.json", "w") as f:
        json.dump(log_temp_dict, f)


def parser(input_dir, output_dir, log_file, log_format, type='drain'):
    if type == 'spell':
        tau        = 0.5  # Message type threshold (default: 0.5)
        regex      = [
            "(/[-\w]+)+", #replace file path with *
            "(?<=blk_)[-\d]+" #replace block_id with *

        ]  # Regular expression list for optional preprocessing (default: [])

        parser = Spell.LogParser(indir=input_dir, outdir=output_dir, log_format=log_format, tau=tau, rex=regex, keep_para=False)
        parser.parse(log_file)

    elif type == 'drain':
        regex = [
            r"(?<=blk_)[-\d]+", # block_id
            r'\d+\.\d+\.\d+\.\d+',  # IP
            r"(/[-\w]+)+",  # file path
            #r'(?<=[^A-Za-z0-9])(\-?\+?\d+)(?=[^A-Za-z0-9])|[0-9]+$',  # Numbers
        ]
        # the hyper parameter is set according to http://jmzhu.logpai.com/pub/pjhe_icws2017.pdf
        st = 0.5  # Similarity threshold
        depth = 5  # Depth of all leaf nodes


        parser = Drain.LogParser(log_format, indir=input_dir, outdir=output_dir, depth=depth, st=st, rex=regex, keep_para=False)
        parser.parse(log_file)


def hdfs_sampling(log_file, window='session'):
    assert window == 'session', "Only window=session is supported for HDFS dataset."
    print("Loading", log_file)
    if process_evolution == False:
        df = pd.read_csv(log_file, engine='c',
            na_filter=False, memory_map=True, dtype={'Date':object, "Time": object})
    else:
        df = pd.read_csv(output_dir + "HDFS.log_structured_evolution.csv", engine='c',
            na_filter=False, memory_map=True, dtype={'Date':object, "Time": object})

        # 抽取演化后日志模板的语义向量
        device = 'cuda' if torch.cuda.is_available() else 'cpu'
        model = SentenceTransformer('sentence-transformers/all-mpnet-base-v2', device=device)

        df_templates_evo = pd.read_csv(output_dir + 'HDFS.log_templates_evolution.csv')
        embeddings = model.encode(df_templates_evo['EventTemplate'].tolist())  # num_workers=num_workers)
        df_templates_evo['Vector'] = [embedding.tolist() for embedding in embeddings]
        template_dict = df_templates_evo.set_index('EventTemplate')['Vector'].to_dict()

        df_templates = pd.read_csv(output_dir + 'HDFS.log_templates.csv')
        embeddings = model.encode(df_templates['EventTemplate'].tolist())  # num_workers=num_workers)
        df_templates['Vector'] = [embedding.tolist() for embedding in embeddings]
        template_dict = df_templates.set_index('EventTemplate')['Vector'].to_dict()

        # 替换演化模板为最相似的旧模板
        instead_dict = find_most_similar_eventId(df_templates, df_templates_evo)
        for idx, eventId in tqdm(enumerate(df['EventId'])):
            if eventId in instead_dict:
                df.at[idx, 'EventId'] = instead_dict[eventId]
    
    with open(output_dir + "hdfs_log_templates.json", "r") as f:
        event_num = json.load(f)
    df["EventId"] = df["EventId"].apply(lambda x: event_num.get(x, -1))

    data_dict = defaultdict(list) #preserve insertion order of items
    for idx, row in tqdm(df.iterrows()):
        blkId_list = re.findall(r'(blk_-?\d+)', row['Content'])
        blkId_set = set(blkId_list)
        for blk_Id in blkId_set:
            data_dict[blk_Id].append(row["EventId"])

    data_df = pd.DataFrame(list(data_dict.items()), columns=['BlockId', 'EventSequence'])
    data_df.to_csv(log_sequence_file, index=None)
    print("hdfs sampling done")


def generate_train_test(hdfs_sequence_file, n=None, ratio=0.4):
    blk_label_dict = {}
    blk_label_file = os.path.join(input_dir, "preprocessed/anomaly_label.csv")
    blk_df = pd.read_csv(blk_label_file)
    for _ , row in tqdm(blk_df.iterrows()):
        blk_label_dict[row["BlockId"]] = 1 if row["Label"] == "Anomaly" else 0

    seq = pd.read_csv(hdfs_sequence_file)
    seq["Label"] = seq["BlockId"].apply(lambda x: blk_label_dict.get(x)) #add label to the sequence of each blockid

    normal_seq = seq[seq["Label"] == 0]["EventSequence"]
    normal_seq = normal_seq.sample(frac=1, random_state=20) # shuffle normal data

    abnormal_seq = seq[seq["Label"] == 1]["EventSequence"]
    normal_len, abnormal_len = len(normal_seq), len(abnormal_seq)
    train_len = n if n else int(normal_len * ratio)
    print("normal size {0}, abnormal size {1}, training size {2}".format(normal_len, abnormal_len, train_len))

    train = normal_seq.iloc[:train_len]
    test_normal = normal_seq.iloc[train_len:]
    test_abnormal = abnormal_seq
    if process_evolution == False:
        df_to_file(train, output_dir + "train")
        df_to_file(test_normal, output_dir + "test_normal")
        df_to_file(test_abnormal, output_dir + "test_abnormal")
    else:
        df_to_file(train, output_dir + "train_evolution")
        df_to_file(test_normal, output_dir + "test_normal_evolution")
        df_to_file(test_abnormal, output_dir + "test_abnormal_evolution")
    print("generate train test data done")


def df_to_file(df, file_name):
    with open(file_name, 'w') as f:
        for _, row in df.items():
            f.write(' '.join([str(ele) for ele in eval(row)]))
            f.write('\n')


if __name__ == "__main__":
    # 1. parse HDFS log
    log_format = '<Date> <Time> <Pid> <Level> <Component>: <Content>'  # HDFS log format
    # parser(input_dir, output_dir, log_file, log_format, 'drain')
    mapping()
    hdfs_sampling(log_structured_file)
    generate_train_test(log_sequence_file, n=4855)
