import torch
from torch.autograd import Variable
from torch import nn
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
from sklearn.metrics import classification_report
import timeit
import os
from scipy.stats import pearsonr


sentiment_cls = {'感动':0,'同情':1, '无聊':2,'愤怒':3,'搞笑':4,'难过':5,'新奇':6,'温馨':7}
r_sentiment_cls = {}
for k, v in sentiment_cls.items():
    r_sentiment_cls[v] = k
cls_name = ['感动', '同情', '无聊', '愤怒', '搞笑', '难过', '新奇', '温馨']

import numpy as np
import torch

class EarlyStopping:
    """Early stops the training if validation loss doesn't improve after a given patience."""
    def __init__(self, patience=7, verbose=False, delta=0):
        """
        Args:
            patience (int): How long to wait after last time validation loss improved.
                            Default: 7
            verbose (bool): If True, prints a message for each validation loss improvement. 
                            Default: False
            delta (float): Minimum change in the monitored quantity to qualify as an improvement.
                            Default: 0
        """
        self.patience = patience
        self.verbose = verbose
        self.counter = 0
        self.best_score = None
        self.early_stop = False
        self.val_loss_min = np.Inf
        self.delta = delta

    def __call__(self, val_loss, model):

        score = -val_loss

        if self.best_score is None:
            self.best_score = score
            self.save_checkpoint(val_loss, model)
        elif score < self.best_score + self.delta:
            self.counter += 1
            print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
            if self.counter >= self.patience:
                self.early_stop = True
        else:
            self.best_score = score
            self.save_checkpoint(val_loss, model)
            self.counter = 0

    def save_checkpoint(self, val_loss, model):
        '''Saves model when validation loss decrease.'''
        if self.verbose:
            print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}).  Saving model ...')
        torch.save(model.state_dict(), 'checkpoint.pt')
        self.val_loss_min = val_loss

def train(model, train_it, optimizer,epoches = 20, val_it=None, scheduler=None,early_stop = True, weight=None, device=None):
    '''  
        训练使用的函数  
            -- model : 模型
            -- train_it : 训练集迭代器
            -- optimizer : 优化器
            -- epoches : [Default=20]
            -- val_it : 验证集迭代器[Default=None]
            -- scheduler : 优化器的scheduler[Default=None]
            -- device : [Default=None]
        返回：各epoch中，验证集上的acc和loss
    '''
    validation_acc = []
    validation_loss = []
    early_stop_controller = EarlyStopping(patience=15, verbose=True)
    for epoch in range(epoches):
        t0 = timeit.default_timer()
        print('<epoch {:>3d}>'.format(epoch))
        model.train()
        step = 0
        stat = torch.tensor([0]*len(sentiment_cls))
        train_loss=0.0
        for batch in train_it:
            x, y = batch.content.to(device), batch.sentiment.to(device)
            # Verify example distribute
            for idx in y:
                stat[idx] += 1
            optimizer.zero_grad()
            res = model(x)
            loss = nn.functional.cross_entropy(res, y, weight=weight)
            train_loss += loss.item()
            loss.backward()
            optimizer.step()
            step += 1
            if step % 20 == 19:
                batch_correct_num=(torch.max(res, 1).indices == y).sum()
                batch_acc = batch_correct_num*1.0/y.size()[0]
                print('<step {:0>2d}>\t acc : {:.2%}\r'.format(step, batch_acc),end='')
        if(scheduler != None):        
            scheduler.step()
        if(val_it==None):
            print('train loss = {:.4f}'.format(train_loss/step),end='\t')
            print('time = {:.4f} s'.format(timeit.default_timer()-t0))
            continue

        with torch.no_grad():
            model.eval()
            print('<stat> : {}'.format(stat),end='')
            # validation
            correct_num = 0
            val_loss = 0.0
            val_num = 0
            for val_batch in val_it:
                x, y = val_batch.content.to(device), val_batch.sentiment.to(device)
                res = model(x)
                correct_num += (torch.argmax(res, dim=1) == y).sum()
                val_num += y.size(0)
                val_loss += nn.functional.cross_entropy(res, y, reduction='sum')
            print('\t acc = {:.2%}\t train_loss = {:.4f}\t val_loss = {:.4f}'.format(correct_num*1.0/val_num,
                                                                            train_loss/step, val_loss/val_num),end='\t')
            print('time = {:.4f} s'.format(timeit.default_timer()- t0))
            validation_acc.append(correct_num*1.0/val_num)
            validation_loss.append(val_loss/val_num)

            #Early Stop
            if early_stop:
                early_stop_controller(val_loss/val_num, model)
                if early_stop_controller.early_stop:
                    print('Early stop!')
                    break
        
    torch.save(model.state_dict(), 'final_epoch.pt')
    if early_stop:
        model.load_state_dict(torch.load('checkpoint.pt'))
    if(val_it != None):
        return validation_acc, validation_loss


def evaluate(test_it, model, data_name = 'test', device=None):
    '''评估函数  
            -- test_it : 测试集的迭代器
            -- model : 模型
            -- data_name : 测试集名称[Default='test']
        
        注意:微平均考虑所有类别的时候, micro_avg_precision = micro_avg_recall = micro_avg_F1 = accuracy
    '''
    print('*'*60, '*'*28+'eval'+'*'*28, '*'*60, sep='\n')

    model.eval()
    with torch.no_grad():
        total_num = 0
        coef = 0.0
        cls_tp = torch.tensor([0] * len(sentiment_cls), device=device)#正类且预测为正
        cls_fp = torch.tensor([0] * len(sentiment_cls), device=device)#负类但预测为正
        cls_tn = torch.tensor([0] * len(sentiment_cls), device=device)#负类且预测为负
        cls_fn = torch.tensor([0] * len(sentiment_cls), device=device)#正类但预测为负
        predict = []
        target = []
        for batch in test_it:
            x, y = batch.content.to(device), batch.sentiment
            res = model(x).cpu()
            predict.append(torch.argmax(res, dim=1).numpy())
            target.append(y.numpy())
            for pred, category in zip(res, y):
                temp_A = [0] * len(sentiment_cls)
                temp_A[category] = 1
                coef +=  pearsonr(temp_A, pred)[0]
                total_num += 1

        predict = np.concatenate(predict, axis=0)
        target = np.concatenate(target, axis=0)
        print('Coef = {:.6f}, total = {}'.format(coef/total_num, total_num))
        print(classification_report(target, predict, target_names=cls_name, digits=4, zero_division=0))

def plot_val_acc_loss(val_acc, val_loss, model_name):
    plt.figure(figsize=(8,5))
    plt.plot(val_acc,'r-')
    plt.gca().yaxis.set_major_formatter(mtick.PercentFormatter(xmax=1,decimals=2))
    plt.grid()
    plt.xlabel('epochs',fontsize=12)
    plt.ylabel('Accuracy',fontsize=10)
    plt.title('Accuracy of Validation\n batch_size=16',fontsize=12)
    plt.savefig(os.path.join('.', 'acc_curve_{}.png'.format(model_name)),dpi=400, format='png')
    
    plt.figure(figsize=(8,5))
    ax = plt.plot(val_loss,'c-')
    plt.xlabel('epochs',fontsize=12)
    plt.ylabel('Loss',fontsize=10)
    plt.title('Loss of Validation\n batch_size=16',fontsize=12)
    plt.savefig(os.path.join('.', 'loss_curve_{}.png'.format(model_name)),dpi=400, format='png')