"""
@Time    : 2018/11/7 17:37
@Author  : CcH
"""
import itertools
import torch
from sklearn.metrics import classification_report
from torch.autograd import Variable
from torch import nn
import matplotlib.pyplot as plt
import torch.utils.data as Data
from DataUtils.dataloader import DataLoader
from tqdm import tqdm
from sklearn.metrics import confusion_matrix
import numpy as np
EPOCH = 1   # train the training data n times, to save time, we just train 1 epoch
BATCH_SIZE = 32
LR = 0.001     # learning rate
SEQ_LEN=20
DROPOUT=0.5
import time


print('---获取数据---')
data_loader=DataLoader(max_sentence_length=SEQ_LEN,min_words=10)
train_x=torch.LongTensor(data_loader.train_x)
test_x=torch.LongTensor(data_loader.test_x)
train_y=torch.LongTensor(data_loader.train_y)
test_y=torch.LongTensor(data_loader.test_y)
# 先转换成 torch 能识别的 Dataset
torch_dataset_train=Data.TensorDataset(train_x, train_y)
torch_dataset_test=Data.TensorDataset(test_x, test_y)
# 把 dataset 放入 DataLoader
train_loader = Data.DataLoader(
    dataset=torch_dataset_train,
    batch_size=BATCH_SIZE,
    shuffle=True,
    num_workers=2,
    # drop_last=True,
)
test_loader = Data.DataLoader(
    dataset=torch_dataset_test,
    batch_size=BATCH_SIZE,
    shuffle=True,
    num_workers=2,
    # drop_last=True,
)
# word_length
word_length= len(data_loader.char_2_idx)


# 定义args

from model.model_BiLSTM_Two import BiLSTM
from args import parse_arguments
# args=parse_arguments()
rnn=BiLSTM(word_length)
# rnn = torch.load('emotional_analysis.pkl', map_location=lambda storage, loc: storage)

# rnn=R_LSTM(word_length)   #lstm
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR)
weigh_c=torch.Tensor([0.4,0.2,0.4]).cuda()
loss_func = nn.CrossEntropyLoss(weight=weigh_c)
all_loss=[]
acc=[]
rnn.cuda()
def train(epoch,train_loader):
    rnn.train()
    print('---'+str(epoch)+' epoch---')
    for data, target in tqdm(train_loader, desc='  - (Training)  '):
        data, target = Variable(data), Variable(target)
        data, target = data.cuda(), target.cuda()
        output = rnn(data)
        loss = loss_func(output, target)
        l=loss.data.cpu().numpy().flatten()
        all_loss.append(l[0])
        optimizer.zero_grad()

        loss.backward()
        optimizer.step()


def test(loader):
    rnn.eval()
    test_loss = 0
    correct = 0
    for data, target in tqdm(loader, leave=False):
        with torch.no_grad():
            data, target = data.cuda(), target.cuda()
            # data, target = Variable(data, volatile=True), Variable(target)
            output=rnn(data)
            test_loss+=loss_func(output, target)
            # test_loss += F.nll_loss(output, target, size_average=False).data[0]
            pred = output.data.max(dim=1)[1]
            correct += pred.eq(target.data.view_as(pred)).cpu().sum()
            acc=100. * correct / len(loader.dataset)
    test_loss /= len(loader.dataset)/BATCH_SIZE
    print('\nset: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(loader.dataset),acc))
    return acc

def plt_confusion_matrix(loader):
    pred_label = []
    y=[]
    rnn = torch.load('emotional_analysis.pkl', map_location=lambda storage, loc: storage)
    # args.cuda = False
    # rnn=BiLSTM(word_length)
    # # # rnn.cuda()
    # rnn.load_state_dict(torch.load('emotional_analysis.pkl'))
    for data, target in loader:
        with torch.no_grad():
            # data, target = data.cuda(), target.cuda()
            # data, target = Variable(data, volatile=True), Variable(target)
            output = rnn(data)

            pred = output.data.max(dim=1)[1]
            p=list(pred.data.cpu().numpy().flatten())
            pred_label=pred_label+p
            y=y+list(target.data.cpu().numpy())
            print()
    return pred_label,y


def plt_loss_show(loss):
    plt.plot(loss)
    plt.legend(loc='best')
    plt.xlabel('Steps')
    plt.ylabel('Loss')
    plt.show()
    plt.savefig('test2.jpg')

def main():
    max_acc=0
    for epoch in range(EPOCH):
        train(epoch,train_loader)
        print('---Train---')
        test(train_loader)
        print('---Test---')
        accuracy = test(test_loader)
        if accuracy >= max_acc:
            max_acc = accuracy
            # torch.save(rnn.state_dict(),'emotional_analysis.pkl')
            torch.save(rnn, 'emotional_analysis.pkl')
            print('--model saved--')

    plt_loss_show(all_loss)

    # 混淆矩阵
    start=time.time()
    pred_label, y = plt_confusion_matrix(test_loader)
    end=time.time()
    print('Time:',str(end-start))
    print(classification_report(y_true=y,y_pred=pred_label))
    # cnf_matrix=confusion_matrix(y, pred_label)
    # class_names = [0,1,2]
    # plot_confusion_matrix(cnf_matrix, classes=class_names,
    #                       title='Confusion matrix, without normalization')



def plot_confusion_matrix(cm, classes,
                          normalize=False,
                          title='Confusion matrix',
                          cmap=plt.cm.Blues):
    """
    This function prints and plots the confusion matrix.
    Normalization can be applied by setting `normalize=True`.
    """
    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        print("Normalized confusion matrix")
    else:
        print('Confusion matrix, without normalization')

    print(cm)

    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    plt.yticks(tick_marks, classes)

    fmt = '.2f' if normalize else 'd'
    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(j, i, format(cm[i, j], fmt),
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show()
if __name__ == '__main__':
    main()

