#!/usr/bin/env python
# -*- coding: utf-8 -*-

import gc
import os
import sys
import time
from collections import Counter

sys.path.append("../../")

import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm


def loader(log_df, window_size):
    # 最终结果
    predict = {"start_time": [], "end_time": [], "all_preds": [], "all_labels": []}
    test_loader = []
    data = []
    num_normal=0
    num_abnormal=0
    # 获取起始时间并添加
    start_time = pd.to_datetime(log_df.iloc[0]["timestamp"], unit="s")
    predict["start_time"].append(start_time)
    # 每组时间窗口的标签
    label = 0
    # 遍历获取
    for idx, line in tqdm(log_df.iterrows(), total=len(log_df.values.tolist())):
        end_time = pd.to_datetime(line["timestamp"], unit="s")
        data.append(line["EventId"])
        time_diff = (end_time - start_time).total_seconds()
        label = 1 if line["labels"] == 1 else label
        if time_diff >= window_size:
            predict["start_time"].append(start_time)
            predict["end_time"].append(end_time)
            predict["all_labels"].append(label)
            if label==0:
                num_normal+=1
            else:
                num_abnormal+=1
                label=1
            test_loader.append(data)
            start_time = end_time
            data = []
    return predict, test_loader,num_normal,num_abnormal


class Predicter:
    def __init__(self, model, options):
        self.data_dir = options["data_dir"]
        self.device = options["device"]
        self.model = model
        self.model_path = options["model_path"]
        self.window_size = options["window_size"]
        self.num_candidates = options["num_candidates"]
        self.num_classes = options["num_classes"]
        self.input_size = options["input_size"]
        self.sequentials = options["sequentials"]
        self.quantitatives = options["quantitatives"]
        self.semantics = options["semantics"]
        self.batch_size = options["batch_size"]

    def predict_unsupervised(self, log_df):
        model = self.model.to(self.device)
        model.load_state_dict(torch.load(self.model_path)["state_dict"])
        model.eval()
        # print('model_path: {}'.format(self.model_path))
        predict, test_loader, num_normal, num_abnormal= loader(log_df, self.window_size)
        TP = 0
        FP = 0
        FN = 0
        TN = 0
        # Test the model
        with torch.no_grad():
            for idx, line in enumerate(test_loader):
                flag=False
                for i in range(len(line) - self.window_size):
                    seq0 = line[i : i + self.window_size]
                    label = line[i + self.window_size]
                    seq1 = [0] * 133
                    log_conuter = Counter(seq0)
                    for key in log_conuter:
                        seq1[key] = log_conuter[key]

                    seq0 = (
                        torch.tensor(seq0, dtype=torch.float)
                        .view(-1, self.window_size, self.input_size)
                        .to(self.device)
                    )
                    seq1 = (
                        torch.tensor(seq1, dtype=torch.float)
                        .view(-1, self.num_classes, self.input_size)
                        .to(self.device)
                    )
                    label = torch.tensor(label).view(-1).to(self.device)
                    output = model(features=[seq0, seq1], device=self.device)
                    predicted = torch.argsort(output, 1)[0][-self.num_candidates :]

                    if label not in predicted:
                        if predict["all_labels"][idx] == 0:
                            FP += 1
                            predict["all_preds"].append(0)
                        elif predict["all_labels"][idx] == 1:
                            TP += 1
                            predict["all_preds"].append(0)
                        flag=True
                        break
                if not flag:
                    predict["all_preds"].append(0)
        
        # Compute precision, recall and F1-measure
        FN = num_abnormal - TP
        P = 100 * TP / (TP + FP)
        R = 100 * TP / (TP + FN)
        F1 = 2 * P * R / (P + R)
        metric = [P, R, F1]

        return {"predict": predict, "metric": metric}
