# 定义训练轮
import os
from pyexpat import model
from re import T
from matplotlib import image
# from matplotlib import image
import torch
import torch.utils.data as data
import torch.nn as nn
import numpy as np
from torch import optim
import pandas as pd
from models import CNN_face, vggnet, resnet, inceptionv3
from dataloader import rewrite_dataset
from tqdm import tqdm
import random
import matplotlib.pyplot as pl
pl.rcParams['font.sans-serif'] = ['SimHei']
pl.rcParams['axes.unicode_minus'] = False
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
import time
from sklearn.metrics import  precision_score, recall_score, f1_score

# 相关可视化函数
def  plot_value(train_value,test_value,epochs,model_name,value=None,save_dir="Raf"):
    pl.figure(figsize=(8,6)) 
    pl.plot(np.arange(epochs), train_value, '.-', label=f'Train {value}',markevery=20)
    pl.plot(np.arange(epochs), test_value, '*-', label=f'Test {value}',markevery=20)
    pl.xlabel('epoch')
    pl.ylabel('value')
    pl.legend()
    pl.title(f"{value} for {model_name}")
    os.makedirs(f"{save_dir}/{model_name}",exist_ok=True)
    pl.savefig(f"{save_dir}/{model_name}/{value}.jpg")
    

def plot_matrix_and_bar(y_true, y_pred, labels_name=None, matrix_title=None,bar_title=None,model_name=None,save_dir="Raf_result"):
    # 利用sklearn中的函数生成混淆矩阵并归一化
    pl.figure(figsize=(8,8))
    confusion_mat = confusion_matrix(y_true,y_pred)
    disp = ConfusionMatrixDisplay(confusion_matrix=confusion_mat, display_labels=labels_name)
    disp.plot(
        include_values=True,            # 混淆矩阵每个单元格上显示具体数值
        cmap="viridis",                 # 不清楚啥意思，没研究，使用的sklearn中的默认值
        ax=None,                        # 同上
        xticks_rotation="horizontal",   # 同上
        values_format="d"               # 显示的数值格式
    )
    pl.title(matrix_title)
    try:
        pl.savefig(f"{save_dir}/{model_name}/{matrix_title}.jpg")
    except FileNotFoundError:
        os.makedirs(f"{save_dir}/{model_name}",exist_ok=True)
        pl.savefig(f"{save_dir}/{model_name}/{matrix_title}.jpg")
    
    # 柱状图
    classify_true, classify_false = cat_classify(y_true, y_pred)
    pl.figure(figsize=(8,8)) 
    x = np.arange(len(labels_name))  # the label locations
    width = 0.35  # the width of the bars
    pl.bar(x - width/2, classify_true, width, label='true')
    pl.bar(x + width/2, classify_false, width, label='false')
    pl.ylabel('num')
    pl.title('True and false classify before reduce')
    pl.xticks(x,labels_name)
    pl.legend()
    for a,b in zip(x- width/2 ,classify_true):   #柱子上的数字显示
        pl.text(a,b,b,ha='center',va='bottom',fontsize=7);
    for a,b in zip(x +width/2,classify_false):
        pl.text(a,b,b,ha='center',va='bottom',fontsize=7);
    pl.savefig(f"{save_dir}/{model_name}/{bar_title}.jpg")

    
    
def cat_classify(y_true,y_pred):
    assert len(y_true) == len(y_pred), "真实label数量和预测的label数量不相等"
    true_list = [0] * 7
    false_list = [0] * 7
    result=pd.value_counts(y_true).sort_index().tolist()
    for i in range(len(y_true)):
        if y_true[i] == y_pred[i]:
            true_list[int(y_true[i])] += 1
    false_list = (np.array(result) - np.array(true_list)).tolist()
    return true_list, false_list


def train(train_dataset, val_dataset, batch_size, epochs, learning_rate, wt_decay, print_cost=True, isPlot=True):
    # 加载数据集并分割batch
    train_loader = data.DataLoader(train_dataset, batch_size,shuffle=True)
    val_loader = data.DataLoader(val_dataset, batch_size)
    
    # 构建模型
    
    model_resnet50 = resnet.resnet50(num_classes=7).to(device)
    model_inceptionv3 = inceptionv3.InceptionV3(num_classes=7).to(device)
    model_vggnet16 = vggnet.Vgg16_net(num_classes=7).to(device)

    # 损失函数和优化器
    models = [model_inceptionv3,model_vggnet16,model_resnet50]
    compute_loss = nn.CrossEntropyLoss()
    compute_loss = compute_loss.to(device)
    # 学习率衰减
    for i,model in enumerate(models):
        
        start_time = time.time() 
        loss_train = []
        loss_test = []
        train_acc = []
        test_acc = []
        train_p = []
        test_p = []
        train_r = []
        test_r = []
        train_f = []
        test_f = []
        optimizer = optim.SGD(model.parameters(), lr=learning_rate, weight_decay=wt_decay)
        # scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.8)
        if i == 2:
            model_name = "resnet50"
            continue
        elif i == 1:
            model_name = "inceptionv3"
        else:
            model_name = "vggnet16"
        for epoch in tqdm([str(i) for i in range(epochs)]):
        # for epoch in range(epochs):
            loss = 0
            model.train()
            train_result, train_total = 0.0, 0
            test_result, test_total = 0.0, 0
            for images, labels in train_loader:
                images = images.cuda()
                labels = torch.as_tensor(labels).cuda()
                # optimizer.zero_grad()
                outputs = model.forward(images)
                ## 计算正确率
                pred = torch.argmax(outputs.data, dim=1)
                train_result += torch.sum((pred == labels))
                train_total += len(images)
                ## 计算损失值
                loss1 = compute_loss(outputs, labels)
                # print(loss)
                optimizer.zero_grad()
                loss1.backward() # ===========================================
                optimizer.step()
                # scheduler.step()
            with torch.no_grad():
                model.eval()
                for test_images, test_labels in val_loader:
                    test_images = images.cuda()
                    test_labels = torch.as_tensor(labels).cuda()
                    
                    test_outputs = model.forward(test_images)
                    test_loss = compute_loss(test_outputs, test_labels)
                
            loss_train.append(loss1.item())
            loss_test.append(test_loss.item())
            
            acc_train,train_y_ture,train_y_pred = validate(model, train_dataset, batch_size)
            acc_val,test_y_ture,test_y_pred = validate(model, val_dataset, batch_size)
            t_p1 = precision_score(train_y_ture, train_y_pred, average='macro')
            t_p2 = precision_score(test_y_ture, test_y_pred, average='macro')
            t_r1 = recall_score(train_y_ture, train_y_pred,average='micro')
            t_r2 = recall_score(test_y_ture, test_y_pred,average='micro') 
            t_f1 = f1_score(train_y_ture, train_y_pred,average='micro')
            t_f2 = f1_score(test_y_ture, test_y_pred,average='micro')

            acc_train = acc_train.cpu().numpy()
            acc_val = acc_val.cpu().numpy()
            train_acc.append(np.around(acc_train,4))
            test_acc.append(np.around(acc_val,4))
            train_p.append(t_p1)
            test_p.append(t_p2)
            train_r.append(t_r1)
            test_r.append(t_r2)
            train_f.append(t_f1)
            test_f.append(t_f2)

            # 打印损失值
            if print_cost:
                print('epoch{}: train_loss:'.format(int(epoch) + 1), loss1.item(), end="---")
                print('epoch{}: test_loss:'.format(int(epoch) + 1), test_loss.item())
                print('acc_train: %.2f %%' % (acc_train * 100), end="---")
                print('acc_val: %.2f %%' % (acc_val * 100))
                
            # 评估模型准确率,获取输出相关信息
            if int(int(epoch)+1) % 10 == 0:
                labels_name = ["Surprised","Fear","Disgust","Happy","Sad","Angry","Neutral"]
                plot_matrix_and_bar(test_y_ture, test_y_pred,labels_name=labels_name,matrix_title=f"matrix-visual-{model_name}",bar_title=f"bar-visual-{model_name}",model_name=model_name,save_dir="fer2013_result")
                
        # 保存模型训练图
        if isPlot:
            plot_value(train_acc,test_acc,epochs,model_name,"acc","fer2013_result")
            plot_value(loss_train,loss_test,epochs,model_name,"loss","fer2013_result")
            plot_value(train_p,test_p,epochs,model_name,"precision","fer2013_result")
            plot_value(train_r,test_r,epochs,model_name,"recall","fer2013_result")
            plot_value(train_f,test_f,epochs,model_name,"f1_score","fer2013_result")

        end_time = time.time()
        run_time = end_time - start_time
        print("**********************************")
        print("train_p:", t_p1)
        print("test_p:", t_p2)
        print("train_r:", t_r1)
        print("test_r:", t_r2)            
        print("train_f:", t_f1)
        print("test_f:", t_f2)
        print("************************************")        
        print("*****----------time----------******")
        print("run-time",run_time)
        torch.save(model, f'logs/{model_name}_model_net.pkl')  # 保存模型


# 验证模型在验证集上的正确率
def validate(model, dataset, batch_size):
    model.eval()
    val_loader = data.DataLoader(dataset, batch_size)
    # labels = list(map(get_alllabels_from_dataset,dataset))
    result, total = 0.0, 0
    y_ture = np.array([])
    y_pred = np.array([])
    for images, labels in val_loader:
        images = images.cuda()
        labels = labels.cuda()
        pred = model.forward(images)
        pred = torch.argmax(pred.data, dim=1)
        result += torch.sum((pred == labels))
        total += len(images)
        y_ture = np.append(y_ture,labels.cpu().numpy())
        y_pred = np.append(y_pred,pred.cpu().numpy())
    acc = result / total
    return acc,y_ture.tolist(),y_pred.tolist()

## 从dataset获取所有的labels
def get_alllabels_from_dataset(item):
    return item[1]

## 固定随机种子，使每次结果一致
def setup_seed(seed):
     torch.manual_seed(seed)
     torch.cuda.manual_seed_all(seed)
     np.random.seed(seed)
     random.seed(seed)
     torch.backends.cudnn.deterministic = True

def main():
    print("---------->Start Learning<---------------")
    setup_seed(2022)
    train_dataset = rewrite_dataset.FaceDataset(root=r'dataset/cnn_train')
    val_dataset = rewrite_dataset.FaceDataset(root=r'dataset/cnn_val')
    train(train_dataset, val_dataset, batch_size=300, epochs=60, learning_rate=0.001, wt_decay=0, print_cost=True, isPlot=True)


if __name__ == '__main__':
    if torch.cuda.is_available():
        device = torch.device("cuda:0")
        print("Running on the GPU")
    else:
        device = torch.device("cpu")
        print("Running on the CPU")
    main()