#!/usr/bin/env python
"""
This script demonstrates the usage of logparser to parse your own log data.
To get started, please first install the logparser via `pip install logpai`.
To get better parsing results, you are suggested to tune the hyper-parameters
`st` and `depth`.
"""
import pandas as pd
from tqdm import tqdm
import itertools
from drain3.template_miner import TemplateMiner
from drain3.template_miner_config import TemplateMinerConfig
from drain3.file_persistence import FilePersistence
from transformers import BertTokenizer, BertModel
import torch
from torch import nn
import json
from datetime import datetime


class DrainProcesser:
    def __init__(self, config) -> None:
        r"""
        config: {
            "save_path": "path/to",
            "drain_config_path": "path/to"
        }
        """
        self._drain_config_path = config["drain_config_path"]
        persistence = FilePersistence(config["drain_save_path"])
        miner_config = TemplateMinerConfig()
        miner_config.load(config["drain_config_path"])
        self._template_miner = TemplateMiner(persistence, config=miner_config)

    def __call__(self, sentence) -> str:
        line = str(sentence).strip()
        result = self._template_miner.add_log_message(line)

        return result


class BertEncoder:
    def __init__(self, bert_path) -> None:
        self._bert_tokenizer = BertTokenizer.from_pretrained(bert_path)
        self._bert_model = BertModel.from_pretrained(bert_path)
        self.cache = {}

    def __call__(self, sentence, no_wordpiece=False):
        r"""
        return list(len=768)
        """
        if self.cache.get(sentence, None) is None:
            if no_wordpiece:
                words = sentence.split(" ")
                words = [
                    word for word in words if word in self._bert_tokenizer.vocab.keys()
                ]
                sentence = " ".join(words)
            inputs = self._bert_tokenizer(
                sentence, truncation=True, return_tensors="pt", max_length=512
            )
            outputs = self._bert_model(**inputs)

            embedding = torch.mean(outputs.last_hidden_state, dim=1).squeeze(dim=1)
            self.cache[sentence] = embedding[0].tolist()
            return embedding[0].tolist()
        else:
            return self.cache[sentence]


def processor(log_df, anomaly_df):
    log_df = log_df.sort_values(by="timestamp")
    log_df['label']=0
    # 区分正常和异常
    for _, case in tqdm(anomaly_df.iterrows(), total=len(anomaly_df.values.tolist())):
        log_df.loc[
            (log_df["timestamp"] >= case["st_time"])
            & (log_df["timestamp"] <= case["ed_time"]),
            "label",
        ] = 1

    # 获取模板id
    config = {"drain_config_path": "./drain3/drain.ini", "drain_save_path": "./drain3/drain.bin"}
    # df_log = pd.read_csv("../data/log/test.csv")
    parser = DrainProcesser(config)
    data = []

    # 加入id
    for idx, line in tqdm(log_df.iterrows(), total=len(log_df.values.tolist())):
        result = parser(line["message"])
        data.append(result["cluster_id"])
    log_df["EventId"] = data

    ##向量化
    bert = BertEncoder("bert-base-uncased")
    event2semantic = {}
    for id, cluster in parser._template_miner.drain.id_to_cluster.items():
        template = cluster.get_template()
        vec = bert(template)
        event2semantic[str(id)] = vec

    with open("./data/origin/event2semantic_vec.json", "w") as f:
        json.dump(event2semantic, f)

    return log_df

