from os import path as osp
from random import randint

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
import xlwings as xw
from sklearn.metrics import f1_score
from sklearn.model_selection import StratifiedKFold, train_test_split
from torch import nn
from tqdm import tqdm

POSITIVE = 1
NEGATIVE = 0


# 展示损失
class ShowLoss:
    def __init__(self, loss_data, macro_F1_data, neg_F1_data, pos_F1_data, title, out_path, decimal=3):
        self.loss = np.array(loss_data)
        self.digit = decimal
        self.title = title
        loss_min = np.argmin(loss_data)
        macro_f1_max = np.argmax(macro_F1_data)
        neg_f1_max = np.argmax(neg_F1_data)
        pos_f1_max = np.argmax(pos_F1_data)
        x = range(0, len(loss_data))
        plt.title(title)
        plt.xlabel('Epoch')
        plt.ylabel('Mean Loss')
        plt.plot(x, loss_data, 'r', x, macro_F1_data, 'g', x, pos_F1_data, 'b:', x, neg_F1_data, 'c:')  # 红色表示损失，绿色表示macro F1分数
        plt.legend([f"loss_min: {round(loss_data[loss_min], self.digit)} at {loss_min}.",
                    f"macro_F1_max: {round(macro_F1_data[macro_f1_max], self.digit)} at {macro_f1_max}.",
                    f'pos_F1_max: {round(pos_F1_data[pos_f1_max], self.digit)} at {pos_f1_max}.',
                    f'neg_F1_max: {round(neg_F1_data[neg_f1_max], self.digit)} at {neg_f1_max}.'])
        f = plt.gcf()  # 获取当前图像
        f.savefig(osp.join(out_path, f'{title}.png'))
        f.clear()  # 释放内存
        # plt.show()

    def get_loss_info(self):
        print(f'{self.title}：')
        loss = self.loss
        max = np.argmax(loss)
        min = np.argmin(loss)
        print(f'Min at {min} is {round(loss[min], self.digit)};'
              f' Max at {max} is {round(loss[max], self.digit)}.')


class F1_Loss(nn.Module):
    '''Calculate F1 score. Can work with gpu tensors

    The original implmentation is written by Michal Haltuf on Kaggle.

    Returns
    -------
    torch.Tensor
        `ndim` == 1. epsilon <= val <= 1

    Reference
    ---------
    - https://www.kaggle.com/rejpalcz/best-loss-function-for-f1-score-metric
    - https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html#sklearn.metrics.f1_score
    - https://discuss.pytorch.org/t/calculating-precision-recall-and-f1-score-in-case-of-multi-label-classification/28265/6
    - http://www.ryanzhang.info/python/writing-your-own-loss-function-module-for-pytorch/
    '''

    def __init__(self, epsilon=1e-7):
        super().__init__()
        self.epsilon = epsilon

    def forward(self, y_pred, y_true, ):
        assert y_pred.ndim == 2
        assert y_true.ndim == 1
        y_true = F.one_hot(y_true, 2).to(torch.float32)
        y_pred = F.softmax(y_pred, dim=1)

        tp = (y_true * y_pred).sum(dim=0).to(torch.float32)
        tn = ((1 - y_true) * (1 - y_pred)).sum(dim=0).to(torch.float32)
        fp = ((1 - y_true) * y_pred).sum(dim=0).to(torch.float32)
        fn = (y_true * (1 - y_pred)).sum(dim=0).to(torch.float32)

        precision = tp / (tp + fp + self.epsilon)
        recall = tp / (tp + fn + self.epsilon)

        f1 = 2 * (precision * recall) / (precision + recall + self.epsilon)
        f1 = f1.clamp(min=self.epsilon, max=1 - self.epsilon)
        return 1 - f1.mean()


def _get_bad_idx(y_true: list, y_pred: list):
    assert len(y_true) == len(y_pred)
    result = []
    for i in range(len(y_pred)):
        if y_true[i] != y_pred[i]:
            result.append(i)
    return result


class BertDataLoader:
    def __init__(self, sentences_pairs, sentences_labels, batch_size=64, shuffle=True):
        self.batch_size = batch_size
        self.length = len(sentences_labels)
        if self.length % self.batch_size == 0:
            batch_count = int(self.length / self.batch_size)  # 这里就是所有的数据一共可以分为多少块（段）
        else:
            batch_count = int(self.length / self.batch_size) + 1

        self.sentences_pairs = sentences_pairs
        self.sentences_labels = sentences_labels
        self.batch_count = batch_count
        self.index = 0

    def __getitem__(self, i):
        batch_pairs = self.sentences_pairs[i * self.batch_size: (i + 1) * self.batch_size]
        batch_labels = self.sentences_labels[i * self.batch_size: (i + 1) * self.batch_size]
        return batch_pairs, batch_labels

    def __len__(self):
        return self.batch_count

    def __iter__(self):
        self.index = 0
        return self

    def __next__(self):
        if self.index >= self.batch_count:
            raise StopIteration
        batch_pairs = self.sentences_pairs[self.index * self.batch_size: (self.index + 1) * self.batch_size]
        batch_labels = self.sentences_labels[self.index * self.batch_size: (self.index + 1) * self.batch_size]
        self.index += 1
        return batch_pairs, batch_labels


# 使用macro_F1评估模型结果
def evaluate(model, sentences_pairs, sentences_labels):
    bert_loader = BertDataLoader(sentences_pairs=sentences_pairs,
                                 sentences_labels=sentences_labels,
                                 batch_size=32)
    bad_preds_dict = {'Labels': [], 'Sentence_A': [], 'Sentence_B': []}
    y_pred, y_true = [], []
    for batch_pairs, batch_labels in tqdm(bert_loader):
        outputs = model(batch_pairs)  # 模型获得结果
        _, predict = torch.max(outputs, 1)
        y_pred.extend(predict.tolist())
        y_true.extend(batch_labels)
        idxs = _get_bad_idx(predict.tolist(), batch_labels)
        for idx in idxs:
            bad_preds_dict['Labels'].append(batch_labels[idx])
            bad_preds_dict['Sentence_A'].append(batch_pairs[idx][0])
            bad_preds_dict['Sentence_B'].append(batch_pairs[idx][1])
    df = pd.DataFrame(bad_preds_dict)
    df['Labels'] = df['Labels'].map({POSITIVE: "4分", NEGATIVE: "1分"})
    return f1_score(y_true, y_pred, average='macro'), \
           f1_score(y_true, y_pred, average='binary', pos_label=NEGATIVE), \
           f1_score(y_true, y_pred, average='binary', pos_label=POSITIVE), \
           df


def pairs_shuffle(temp_lst, temp_lst2):
    assert len(temp_lst) == len(temp_lst2)
    m = len(temp_lst)
    while (m):
        m -= 1
        i = randint(0, m)
        temp_lst[m], temp_lst[i] = temp_lst[i], temp_lst[m]
        temp_lst2[m], temp_lst2[i] = temp_lst2[i], temp_lst2[m]
    return temp_lst, temp_lst2


class BuildKDatasets:
    def __init__(self, data_path, k_num=10, shuffle=True):

        self.k_num = k_num
        data_df = pd.read_excel(io=data_path)
        data_df['相似度分数\n（初版和终板）'] = data_df['相似度分数\n（初版和终板）'].map({"4分": POSITIVE, "1分": NEGATIVE})  # 替换
        sentences_A, sentences_B = data_df['初版文本'].to_list(), data_df['终版文本'].to_list()
        sentences_pairs, sentences_labels = [], []
        for i in range(len(sentences_A)):
            if isinstance(sentences_A[i], str) and isinstance(sentences_A[i], str):
                a = sentences_A[i].replace('\n', '').replace('\t', '').replace('\r', '')
                b = sentences_B[i].replace('\n', '').replace('\t', '').replace('\r', '')
                sentences_pairs.append([a, b])
                sentences_labels.append(data_df['相似度分数\n（初版和终板）'].tolist()[i])
            else:
                print(f"去除第{i}处数据")
        if shuffle:
            pairs_shuffle(sentences_pairs, sentences_labels)
        self.sentences_pairs = sentences_pairs
        self.sentences_labels = sentences_labels

    def get_primary_dataset(self):
        return self.sentences_pairs, self.sentences_labels

    def get_kFold_dataset(self):
        np_pairs = np.array(self.sentences_pairs)
        np_labels = np.array(self.sentences_labels)
        skf = StratifiedKFold(n_splits=self.k_num, shuffle=True)
        k_folds = []
        for train_index, test_index in skf.split(np_pairs, np_labels):
            X_train, X_test = np_pairs[train_index], np_pairs[test_index]
            y_train, y_test = np_labels[train_index], np_labels[test_index]
            k_folds.append({'X_train': X_train.tolist(), 'X_test': X_test.tolist(),
                            'y_train': y_train.tolist(), 'y_test': y_test.tolist()})
        return k_folds

    def train_test_split(self):
        X_train, X_test, y_train, y_test = \
            train_test_split(self.sentences_pairs, self.sentences_labels, shuffle=True)
        return X_train, X_test, y_train, y_test


def get_kFold_model_eval(kfold_F1_scores):
    kfold_F1_scores = np.array(kfold_F1_scores)
    return kfold_F1_scores.mean()


def write_to_excel(dfs_dict: dict, out_path: str):
    dfs = []
    for key, value in dfs_dict.items():
        dfs.append(value)
    df = pd.concat(dfs, axis=0, join='outer')  # 按列索引对齐
    app = xw.App(visible=False, add_book=False)
    app.display_alerts = False
    app.screen_updating = False
    wb = app.books.add()  # 创建新的excel表
    sht = wb.sheets.add("bad_predicts")
    sht[0:1, 0:3].value = df.columns
    sht[1:, 0:3].value = df.values
    sht[:, 0:3].api.Borders.LineStyle = 1  # 框线
    sht[:, 0:3].api.HorizontalAlignment = -4108  # 居中
    sht[:, 0:3].api.VerticalAlignment = -4108
    sht[:, 0:3].api.Font.Size = 10  # 字体大小
    sht[:, 0:3].row_height = 18  # 字体大小
    sht[:, 0:1].column_width = 11
    sht[:, 1:3].column_width = 44
    wb.save(out_path)
    wb.close()
    app.quit()
