# -*- coding: utf-8 -*-
# 整流部分：单开关故障，双开关故障；逆变部分：单开关故障，双开关故障
import os
import sys
from typing import Collection

import re
import numpy as np
import pandas as pd
import math
import pymongo
sys.path.append("/gpfs/scratch/chgwang/XI/Scripts/Refactoring_1/getData")
sys.path.append("/gpfs/scratch/chgwang/XI/Scripts/Refactoring_1/MLModel")

import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from sklearn.metrics import accuracy_score
from tqdm import tqdm

matplotlib.use("Agg")


def convert_to_one_hot(sour_labels)->list:
    sour_labels = np.array(sour_labels, dtype=np.int32)
    labels_one_hot = np.zeros((6, sour_labels.shape[1]))
    for i in range(sour_labels.shape[1]):
        label_slices = sour_labels[:,i]
        for label in label_slices:
            label = label - 1
            if label != -1:
                labels_one_hot[label, i] = 1
    return labels_one_hot.tolist()
# find the single switch fault.
def split_switch_fault(ids, col):
    single_ids = []
    double_ids = []
    for id in ids:
        data_dict = col.find_one({"_id":id})
        label_arr = np.asarray(np.unique(data_dict["label"]))
        label_arr = np.delete(label_arr, label_arr == 0)
        if len(label_arr) == 1:
            single_ids.append(id)
        elif len(label_arr) == 2:
            double_ids.append(id)
    return(single_ids, double_ids)
# when theta = 0.5, the accuracy_rate = 95.3%
def calc_eval_index_0(id, col, theta=0.5):
    # should return by channel
    channels_amount = 6
    data_dict = col.find_one({"_id":id})
    original_labels = convert_to_one_hot(data_dict["label"])
    modeled_labels = data_dict["modeled_label"]
    # aligned length
    original_labels = np.array(original_labels)
    modeled_labels = np.array(modeled_labels)
    assert original_labels.shape[0] == 6
    original_labels = original_labels[:,199:]
    modeled_labels = np.where(modeled_labels<theta,
                            0, 1)
    modeled_labels = modeled_labels.astype(int)
    original_labels = original_labels.astype(int)
    count_points = modeled_labels.shape[0] * modeled_labels.shape[1]
    eval_index_list = []
    for i in range(channels_amount):
        OP_idx = original_labels[i,:] == 1
        MP_idx = modeled_labels[i,:] == 1
        ON_idx = original_labels[i,:] == 0
        MN_idx = modeled_labels[i,:] == 0

        TP_count = np.count_nonzero(MP_idx & OP_idx)
        FP_count = np.count_nonzero(MP_idx & ON_idx)
        FN_count = np.count_nonzero(MN_idx & OP_idx)
        TN_count = np.count_nonzero(MN_idx & ON_idx)
        eval_index_list.append([TP_count, FP_count, FN_count, TN_count])
    assert count_points == np.sum(eval_index_list)
    return np.array(eval_index_list)

def calc_acc(id, col, theta=0.5):
    # should return by channel
    data_dict = col.find_one({"_id":id})
    original_labels = convert_to_one_hot(data_dict["label"])
    modeled_labels = data_dict["modeled_label"]
    # aligned length
    original_labels = np.array(original_labels)
    modeled_labels = np.array(modeled_labels)
    assert original_labels.shape[0] == 6
    original_labels = original_labels[:,199:]
    modeled_labels = np.where(modeled_labels<theta,
                            0, 1)
    modeled_labels = modeled_labels.astype(int)
    original_labels = original_labels.astype(int)
    T_count = np.count_nonzero(modeled_labels == original_labels)
    return T_count / (modeled_labels.shape[0]*modeled_labels.shape[1])

def calc_avg_acc(ids, col):
    acc_list = []
    for id in ids:
        acc = calc_acc(id, col)
        acc_list.append(acc)
    return(np.mean(acc_list))

if __name__ == "__main__":
    client = pymongo.MongoClient("mongodb://127.0.0.1:27017/")
    db = client["Power_Fault"]
    col_sour = db["data_sour"]
    # all ID of the data, split data by ID
    ids = col_sour.distinct("_id")
    rect_filter = {'location': {
        '$regex': re.compile(r"(?si:.*整流.*)")}}
    rect_ids = col_sour.distinct("_id",filter=rect_filter)
    inver_ids = col_sour.distinct("_id",filter={'location': {
        '$regex': re.compile(r"(?si:.*逆变.*)")}})

    rect_sids, rect_dids = split_switch_fault(rect_ids, col_sour)
    inver_sids, inver_dids = split_switch_fault(inver_ids, col_sour)
    to_solve_ids = [rect_sids, rect_dids, inver_sids, inver_dids]
    names = ["single switch fault of rectifier", "double switch fault of rectifier",
            "single switch fault of inverter", "double switch fault of inverter"]
    df = pd.DataFrame(index=["average acc"],columns=names)
    for ids, name in zip(to_solve_ids, names):
        avg_acc = calc_avg_acc(ids, col_sour)
        df.loc["average acc", name] = avg_acc

    saved_dir = "/gpfs/scratch/chgwang/XI/DataBase/Model_figure"
    csv_file = os.path.join(saved_dir, "acc_index.csv")
    df.to_csv(csv_file)