import pandas as pd
import numpy as np
import torch
from torch import nn
from torch.nn.modules import Sequential
from sklearn.model_selection import StratifiedKFold
import os
from torchmetrics.classification import BinaryF1Score
from torchmetrics.functional import f1_score
from sklearn.datasets import make_classification
from time import time
import sklearn

from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import roc_auc_score
from sklearn.metrics import classification_report
from imblearn.over_sampling import SMOTE
from collections import Counter

# import logging
# level = logging.INFO
# logging.basicConfig(level=logging.INFO)

VECTOR_SIZE = 4096
EPOCHES = 128
HID_BLOCK = 6
HID_SIZE = 1024
BETA = 1
DROP_OUT_RATE = 0.05
LEAKY_RATE = 0.125
X_PATH = "sentence_codes_4096_dm0.npy"
ORIG_PATH = "train.csv"


def build_model(vector_size=VECTOR_SIZE,
                num_block=HID_BLOCK,
                hid_size=HID_SIZE):

    def block(mode='middle'):
        block_dict = {
            'head':
            Sequential(nn.Linear(vector_size, hid_size),
                       nn.BatchNorm1d(num_features=hid_size), nn.Sigmoid()),
            'middle':
            Sequential(nn.Dropout(p=DROP_OUT_RATE),
                       nn.Linear(hid_size, hid_size),
                       nn.BatchNorm1d(num_features=hid_size), nn.Sigmoid()),
            'tail':
            Sequential(nn.Linear(hid_size, 1), nn.Sigmoid())
        }
        if mode in block_dict:
            result = block_dict[mode]
            return result
        else:
            raise ValueError(
                "ilegal mode:{error_mode}".format(error_mode=mode))

    model = Sequential()
    model.append(block('head'))
    for _ in range(num_block):
        model.append(block())
    model.append(block('tail'))
    return model


class FScoreLoss(torch.nn.Module):

    def __init__(self, beta=BETA, eps=1e-7):
        super().__init__()
        self.beta = beta
        self.eps = eps

    def forward(self, y_true, y_pred, grad=True):
        tp = (y_true * y_pred).sum().to(torch.float32)
        fn = ((1 - y_true) * y_pred).sum().to(torch.float32)
        fp = (y_true * (1 - y_pred)).sum().to(torch.float32)
        precision = tp / (tp + fp + self.eps)
        recall = tp / (tp + fn + self.eps)

        f_score_loss = (1 + self.beta**2) * (precision * recall) / (
            (self.beta**2) * precision + recall + self.eps)

        # print(
        #     f'precision = {precision}, recall = {recall}, f_score = {f_score_loss}'
        # )

        return -1 * f_score_loss


class clf_model(sklearn.base.BaseEstimator):

    def __init__(self, epoches=10, verbose=0, validation=None) -> None:
        self.model = build_model()
        self.criterion = FScoreLoss()
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-3)
        self.epoches = epoches
        self.verbose = verbose
        self.acc_device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.conf_mtx_record = []
        self.auc_record = []
        self.validation = validation
        if self.validation is not None:
            self.val_X, self.val_y = self.validation
            self.val_X = torch.tensor(self.val_X.astype(np.float32)).to(
                self.acc_device)

    def fit(self, X, y):
        skf = StratifiedKFold(n_splits=16)
        self.model.to(self.acc_device)
        self.model.train()
        if self.verbose != 0:
            print('Start Training')
        for epoch in range(self.epoches):
            start_time = time()
            for idx, (_, batch_index) in enumerate(skf.split(X, y)):
                batch_X, batch_y = torch.tensor(
                    X[batch_index].astype(np.float32),
                    requires_grad=True).to(self.acc_device), torch.tensor(
                        y[batch_index].astype(np.float32),
                        requires_grad=True).to(self.acc_device)
                y_pred = self.model(batch_X).reshape(-1)
                loss = self.criterion(y_pred, batch_y)
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
                del batch_X, batch_y
                torch.cuda.empty_cache()
            end_time = time()
            
            if self.verbose != 0:
                print(
                    'epoch_{epoch}\t\ttime:{time:.2f}s\t\tloss:{loss}'.format(
                        epoch=epoch,
                        time=end_time - start_time,
                        loss=loss.item()))
                if self.validation is not None:
                    y_pred = self.model(self.val_X).to('cpu').detach().numpy()
                    conf_mtx = confusion_matrix(
                        self.val_y,
                        np.around(y_pred).astype("int64"))
                    self.conf_mtx_record.append(conf_mtx)
                    self.auc_record.append(roc_auc_score(self.val_y, y_pred))
                    # plt.figure()
                    # sns.heatmap(conf_mtx, annot=True, cmap="crest", fmt='.20g')
                    # plt.title("EPOCH_{epoch}".format(epoch=epoch))
                    # plt.savefig(
                    #     "train_img/epoch{epoch}.svg".format(epoch=epoch))

        self.model.to('cpu')
        self.model.eval()

    def predict(self, X):
        X = torch.from_numpy(X.astype(np.float32))
        return np.round(self.model(X).detach().numpy())

    def predict_proba(self, X):
        X = torch.from_numpy(X.astype(np.float32))
        return self.model(X).detach().numpy()


if __name__ == '__main__':
    os.system('cls')
    start_time = time()
    data = np.load(X_PATH)
    label_names = [pd.read_csv(ORIG_PATH).columns[2]]
    skf = StratifiedKFold(n_splits=3, shuffle=True, random_state=0)
    end_time = time()
    print('Data prepared:\t\t\t\t{run_time:.2f}s'.format(run_time=end_time -
                                                         start_time))

    start_time = time()
    augmentator = SMOTE(sampling_strategy=0.3)
    label = pd.read_csv(ORIG_PATH).loc[:, label_names[0]].to_numpy().reshape(
        -1, 1)
    orig_len = len(label)
    X_res, y_res = augmentator.fit_resample(data, label)
    y_res = y_res.reshape(-1, 1)
    pos_index = np.where(label == 1)[0]
    del data, label
    end_time = time()
    print('Done the data augmentation:\t\t{run_time:.2f}s'.format(
        run_time=end_time - start_time))

    start_time = time()
    train_X_pos, train_y_pos = X_res[orig_len:], y_res[orig_len:]
    X_res, y_res = X_res[:orig_len], y_res[:orig_len]
    test_X_pos, test_y_pos = X_res[pos_index], y_res[pos_index]
    X_neg, y_neg = np.delete(X_res, pos_index, axis=0), np.delete(y_res,
                                                                  pos_index,
                                                                  axis=0)
    del X_res, y_res

    train_X_neg, test_X_neg, train_y_neg, test_y_neg = train_test_split(
        X_neg, y_neg, test_size=0.25)
    del X_neg, y_neg
    train_X, train_y = np.concatenate([train_X_pos, train_X_neg],
                                      axis=0), np.concatenate(
                                          [train_y_pos, train_y_neg], axis=0)
    del train_X_pos, train_X_neg, train_y_pos, train_y_neg
    end_time = time()
    print('Done the train/test split:\t\t{run_time:.2f}s'.format(
        run_time=end_time - start_time))

    start_time = time()
    temp = np.concatenate([train_X, train_y], axis=1)
    np.random.shuffle(temp)
    train_X, train_y = temp[:, :-1], temp[:, -1]
    del temp
    test_X, test_y = np.concatenate([test_X_pos, test_X_neg],
                                    axis=0), np.concatenate(
                                        [test_y_pos, test_y_neg], axis=0)
    del test_X_pos, test_X_neg, test_y_pos, test_y_neg
    temp = np.concatenate([test_X, test_y], axis=1)
    np.random.shuffle(temp)
    test_X, test_y = temp[:, :-1], temp[:, -1]
    del temp
    end_time = time()
    print('Done the concannate and shuffle:\t{run_time:.2f}s'.format(
        run_time=end_time - start_time))

    print('train set length:\t', len(train_y), '\t', Counter(train_y))
    print('test set length:\t', len(test_y), '\t\t', Counter(test_y))

    clf = clf_model(epoches=EPOCHES, verbose=1, validation=(test_X, test_y))
    label = pd.read_csv(ORIG_PATH).loc[:, label_names[0]].to_numpy().reshape(
        -1, 1)

    clf.fit(train_X, train_y)
    del train_X, train_y
    y_pred = clf.predict_proba(test_X)
    conf_mtx = confusion_matrix(test_y, np.around(y_pred).astype("int64"))
    print('AUC:\t', roc_auc_score(test_y, y_pred))
    print(classification_report(test_y, np.around(y_pred).astype("int64")))
    print(conf_mtx)
    plt.figure()
    sns.heatmap(conf_mtx, annot=True, cmap="crest", fmt='.20g')
    plt.show()
