import sys
sys.path.append('../')

import os
import re
import json
import pandas as pd
from collections import defaultdict
from tqdm import tqdm
import numpy as np
from logparser import Spell, Drain
from sentence_transformers import SentenceTransformer
import torch

# get [log key, delta time] as input for deeplog
# input_dir  = os.path.expanduser('~/.dataset/hdfs/')
input_dir = "../../logbert/dataset/hdfs/"
parse_log_dir = '../../logbertV2/output/hdfs/'
output_dir = '../output/hdfs/'  # The output directory of parsing results
log_file   = "HDFS.log"  # The input log file name

log_structured_file = parse_log_dir + log_file + "_structured.csv"
log_templates_file = parse_log_dir + log_file + "_templates.csv"
log_sequence_file = output_dir + "hdfs_sequence_data.npz"

def mapping():
    log_temp = pd.read_csv(log_templates_file)
    log_temp.sort_values(by = ["Occurrences"], ascending=False, inplace=True)
    log_temp_dict = {event: idx+1 for idx , event in enumerate(list(log_temp["EventId"])) }
    print(log_temp_dict)
    with open (output_dir + "hdfs_log_templates.json", "w") as f:
        json.dump(log_temp_dict, f)


def parser(input_dir, output_dir, log_file, log_format, type='drain'):
    if type == 'spell':
        tau        = 0.5  # Message type threshold (default: 0.5)
        regex      = [
            "(/[-\w]+)+", #replace file path with *
            "(?<=blk_)[-\d]+" #replace block_id with *

        ]  # Regular expression list for optional preprocessing (default: [])

        parser = Spell.LogParser(indir=input_dir, outdir=output_dir, log_format=log_format, tau=tau, rex=regex, keep_para=False)
        parser.parse(log_file)

    elif type == 'drain':
        regex = [
            r"(?<=blk_)[-\d]+", # block_id
            r'\d+\.\d+\.\d+\.\d+',  # IP
            r"(/[-\w]+)+",  # file path
            #r'(?<=[^A-Za-z0-9])(\-?\+?\d+)(?=[^A-Za-z0-9])|[0-9]+$',  # Numbers
        ]
        # the hyper parameter is set according to http://jmzhu.logpai.com/pub/pjhe_icws2017.pdf
        st = 0.5  # Similarity threshold
        depth = 5  # Depth of all leaf nodes


        parser = Drain.LogParser(log_format, indir=input_dir, outdir=output_dir, depth=depth, st=st, rex=regex, keep_para=False)
        parser.parse(log_file)


def hdfs_sampling(log_file, window='session'):
    assert window == 'session', "Only window=session is supported for HDFS dataset."
    print("Loading", log_file)
    df = pd.read_csv(log_file, engine='c',
            na_filter=False, memory_map=True, dtype={'Date':object, "Time": object})
    df_template = pd.read_csv(log_templates_file)

    with open(output_dir + "hdfs_log_templates.json", "r") as f:
        event_num = json.load(f)
    df["EventId"] = df["EventId"].apply(lambda x: event_num.get(x, -1))

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    model = SentenceTransformer(
        'distilbert-base-nli-mean-tokens', device=device)
    # calculate vectors for all known templates
    print('vector embedding...')
    embeddings = model.encode(
        df_template['EventTemplate'].tolist())  # num_workers=num_workers)
    df_template['Vector'] = list(embeddings)
    template_dict = df_template.set_index('EventTemplate')['Vector'].to_dict()

    # convert templates to vectors for all logs
    vectors = []
    for idx, template in tqdm(enumerate(df['EventTemplate'])):
        try:
            vectors.append(template_dict[template])
        except KeyError:
            # new template
            vectors.append(model.encode(template))
    df['Vector'] = vectors
    print('vector embedding done.')

    data_dict = defaultdict(list) #preserve insertion order of items
    data_dict2 = defaultdict(list)
    for idx, row in tqdm(df.iterrows()):
        blkId_list = re.findall(r'(blk_-?\d+)', row['Content'])
        blkId_set = set(blkId_list)
        for blk_Id in blkId_set:
            data_dict[blk_Id].append(row["EventId"])
            data_dict2[blk_Id].append(row['Vector'])
    
    data_list = []
    for blk_Id in data_dict:
        data_list.append((blk_Id, data_dict[blk_Id], data_dict2[blk_Id]))

    data_df = pd.DataFrame(data_list, columns=['BlockId', 'EventSequence', 'Vector'])
    np.savez(log_sequence_file, BlockId=data_df['BlockId'], EventSequence=data_df['EventSequence'], Vector=data_df['Vector'])
    print("hdfs sampling done")


def generate_train_test(hdfs_sequence_file, n=None, ratio=0.3):
    blk_label_dict = {}
    blk_label_file = os.path.join(input_dir, "preprocessed/anomaly_label.csv")
    blk_df = pd.read_csv(blk_label_file)
    for _ , row in tqdm(blk_df.iterrows()):
        blk_label_dict[row["BlockId"]] = 1 if row["Label"] == "Anomaly" else 0

    npzfile = np.load(hdfs_sequence_file, allow_pickle=True)
    block_ids = npzfile['BlockId']
    event_sequences = npzfile['EventSequence']
    vectors = npzfile['Vector']

    seq = pd.DataFrame({
        'BlockId': block_ids,
        'EventSequence': event_sequences,
        'Vector': vectors
    })
    
    seq["Label"] = seq["BlockId"].apply(lambda x: blk_label_dict.get(x)) #add label to the sequence of each blockid

    normal_seq = seq[seq["Label"] == 0]["EventSequence"]
    normal_seq_vector = seq[seq["Label"] == 0]["Vector"]
    shuffle_indices = normal_seq.sample(frac=1, random_state=20).index
    normal_seq = normal_seq.loc[shuffle_indices]
    normal_seq_vector = normal_seq_vector[shuffle_indices]
    # normal_seq = normal_seq.sample(frac=1, random_state=20) # shuffle normal data

    abnormal_seq = seq[seq["Label"] == 1]["EventSequence"]
    abnormal_seq_vector = seq[seq["Label"] == 1]["Vector"]
    normal_len, abnormal_len = len(normal_seq), len(abnormal_seq)
    train_len = n if n else int(normal_len * ratio)
    print("normal size {0}, abnormal size {1}, training size {2}".format(normal_len, abnormal_len, train_len))

    train = normal_seq.iloc[:train_len]
    train_vector = normal_seq_vector.iloc[:train_len]
    test_normal = normal_seq.iloc[train_len:]
    test_normal_vector = normal_seq_vector.iloc[train_len:]
    test_abnormal = abnormal_seq
    test_abnormal_vector = abnormal_seq_vector

    df_to_file(train, train_vector, output_dir + "train")
    df_to_file(test_normal, test_normal_vector, output_dir + "test_normal")
    df_to_file(test_abnormal, test_abnormal_vector, output_dir + "test_abnormal")
    print("generate train test data done")


def df_to_file(seq, vector, file_name):
    np.savez(f'{file_name}_data.npz',
             x=seq, y=vector)


if __name__ == "__main__":
    # 1. parse HDFS log
    log_format = '<Date> <Time> <Pid> <Level> <Component>: <Content>'  # HDFS log format
    # parser(input_dir, output_dir, log_file, log_format, 'drain')
    # mapping()
    # hdfs_sampling(log_structured_file)
    generate_train_test(log_sequence_file, n=4855)
