# -*- coding:utf-8 -*-

import os
import time
from tqdm import tqdm
from config import parsers
from utils import read_file, data_processing
from torch.utils.data import DataLoader
from model import BertTextModel_last_layer, BertClassifier, TextCNN
import torch
from torch.optim import AdamW
from torch.nn import CrossEntropyLoss #多类别损失函数
from sklearn.metrics import f1_score, accuracy_score, classification_report
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib as plt
plt.rcParams['font.sans-serif'] = ['SimHei'] # 解决汉字显示为□指定默认字体为黑体。
plt.rcParams['axes.unicode_minus'] = False


def train(model, device, trainLoader, opt, epoch, epochs):
    model.train()
    train_loss_sum, count = 0.0, 0
    train_acc_sum, n = 0.0, 0
    for batch_index, batch_con in enumerate(trainLoader):
        print(batch_con)
        exit()
        batch_con = tuple(p.to(device) for p in batch_con)
        pred = model(batch_con)
        Before = list(model.parameters())[0].clone()  # 获取更新前模型的第0层权重
        opt.zero_grad()
        loss = loss_fn(pred, batch_con[-1].float())
        loss.backward()
        opt.step()
        #         scheduler.step()#更新学习率
        train_loss_sum += loss
        count += 1
        # train_acc
        train_acc_sum += sum(row.all().int().item()
                             for row in (pred.ge(0.5) == batch_con[-1].float()))
        n += batch_con[-1].shape[0]

    msg = "[{0}/{1}]\tTrain_Loss:{2:.4f}\tTrain_acc:{3:.4f}"
    print(msg.format(epoch + 1, epochs, train_loss_sum / count, train_acc_sum / n))
    After = list(model.parameters())[0].clone()  # 获取更新后模型的第0层权重
    print('模型的第0层更新幅度：', torch.sum(After - Before))
    return train_acc_sum / n , train_loss_sum / count


def val(model, device, devLoader, save_best):
    global acc_max
    model.eval()
    # all_true, all_pred = [], []
    val_acc_sum, n = 0.0, 0
    with torch.no_grad():
        for batch_con in tqdm(devLoader):
            batch_con = tuple(p.to(device) for p in batch_con)
            pred = model(batch_con)
            val_acc_sum += sum(row.all().int().item()
                                for row in (pred.ge(0.5) == batch_con[-1].float()))
            n += batch_con[-1].shape[0]

        acc = val_acc_sum / n
        print(f"val_acc:{acc:.4f}")

        if acc > acc_max:
            acc_max = acc
            torch.save(model.state_dict(), save_best)
            print(f"已保存最佳模型")


def test(model, model_path, device, testLoader):
    # 创建空列表以存储预测结果和真实标签
    all_predictions = []
    all_labels = []
    model.load_state_dict(torch.load(model_path))
    model.eval()
    with torch.no_grad():
        for batch_con in tqdm(testLoader):
            batch_con = tuple(p.to(device) for p in batch_con)
            pred = model(batch_con)
            y_pred_binary = (pred > 0.5).float()

            all_predictions.append(y_pred_binary.cpu().numpy())
            all_labels.append(batch_con[-1].cpu().numpy())

        merged_tensor = np.vstack(all_predictions)
        label_tensor = np.concatenate(all_labels)

        report = classification_report(label_tensor, merged_tensor, target_names=['伤感', '励志', '开心', '思念', '甜蜜'])
        print('classification report:\n',report)
        test_acc = accuracy_score(label_tensor, merged_tensor)
        f1_micro = f1_score(label_tensor, merged_tensor, average='micro')
        print(f'test_acc: {test_acc:.4f}\tF1_micro: {f1_micro:.4f}')

def drawing(epochs,acc_list,loss_list,save_name):
    # 创建图表
    plt.figure(figsize=(10, 5))
    # 绘制模型准确率曲线
    plt.plot(range(1,epochs+1), acc_list, marker='o', label='准确率', color="#038355")
    # 绘制损失值曲线
    plt.plot(range(1,epochs+1), loss_list, marker='x', label='损失', color="#ffc34e")
    # 添加标题和标签
    plt.title('模型评估指标')
    plt.xlabel('迭代次数')
    # 添加网格线
    plt.grid(True)
    # 添加图例
    plt.legend(loc='upper left')
    # 保存图形
    plt.savefig('./'+'acc_loss_tu/'+save_name+'.png')


if __name__ == "__main__":
    start = time.time()
    args = parsers()

    data_path = r'./data/中文歌曲数据.csv'
    texts, labels = read_file(data_path)

    # 选择模型,1:Bert-TextCNN  2:Bert-Fc  3:Word2vec-TextCNN  4:random_embedding-TextCNN
    model_choose = 1

    if model_choose == 1:
        # 模型1
        data = data_processing(texts, labels, bert=True)
        tokenizer = data.tokenizer
        save_name = 'bert-textcnn'
        model = BertTextModel_last_layer()  # bert+text
        lr = 5e-5

    if model_choose == 2:
        # 模型2
        data = data_processing(texts, labels, bert=True)
        tokenizer = data.tokenizer
        save_name = 'bert'
        model = BertClassifier()  # bert
        lr = 5e-5
    if model_choose == 3:
        # 模型3
        data = data_processing(texts, labels, bert=False)
        vocab_dict = data.vocab_dict
        embedding_matrix = data.embedding_matrix
        save_name = 'word2vec_embedding_textcnn'
        model = TextCNN(vocab_dict, embedding_random=False, embedding_matrix=embedding_matrix)
        lr = 1e-3
    if model_choose == 4:
        # 模型4
        data = data_processing(texts, labels, bert=False)
        vocab_dict = data.vocab_dict
        save_name = 'random_embedding_textcnn'
        model = TextCNN(vocab_dict)
        lr = 1e-3

    texts, labels = data.texts_processing(), data.labels_processing() #得到处理后的数据及标签
    device = "cuda:0" if torch.cuda.is_available() else "cpu"
    model.to(device)

    save_best = args.save_model+'/'+str(save_name)+'.pth'  # 模型保存路径

    opt = AdamW(model.parameters(), lr=lr)  # 优化器
    #     scheduler = get_cosine_schedule_with_warmup(opt, num_warmup_steps=len(train_loader),
    #                                             num_training_steps=epochs*len(train_loader))
    loss_fn = torch.nn.BCEWithLogitsLoss()  # 损失函数

    acc_max = float("-inf")
    train_acc_list, train_loss_list = [], []  # 保存训练过程的准确率和损失
    #划分数据集
    texts_train, texts_test, labels_train, labels_test = train_test_split(texts, labels,
                                                                          test_size=0.2,
                                                                          random_state=123)
    texts_train, texts_val, labels_train, labels_val = train_test_split(texts_train,labels_train,
                                                                        test_size=0.1,
                                                                        random_state=246)

    if model_choose < 3:
        # bert词向量编码
        tokens_train = tokenizer(texts_train, padding=True, truncation=True, max_length=args.max_len,
                                 return_tensors='pt')
        tokens_val = tokenizer(texts_val, padding=True, truncation=True, max_length=args.max_len, return_tensors='pt')

        train_data = torch.utils.data.TensorDataset(tokens_train['input_ids'], tokens_train['attention_mask'],
                                                    tokens_train['token_type_ids'], torch.tensor(labels_train))

        val_data = torch.utils.data.TensorDataset(tokens_val['input_ids'], tokens_val['attention_mask'],
                                                  tokens_val['token_type_ids'], torch.tensor(labels_val))
    else:
        # word2vec、random_embedding数据加载
        train_data = torch.utils.data.TensorDataset(texts_train, torch.tensor(labels_train))
        val_data = torch.utils.data.TensorDataset(texts_val, torch.tensor(labels_val))

    train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=False)
    val_loader = torch.utils.data.DataLoader(val_data, batch_size=args.batch_size, shuffle=False)

    for epoch in range(args.epochs):
        train_acc, train_loss = train(model, device, train_loader, opt, epoch, args.epochs)
        val(model, device, val_loader, save_best)
        # 记录每一个迭代的准确率和损失
        train_acc_list.append(train_acc)
        train_loss_list.append(train_loss)

    # test
    if model_choose < 3:
        tokens_test = tokenizer(texts_test, padding=True, truncation=True, max_length=args.max_len, return_tensors='pt')
        test_data = torch.utils.data.TensorDataset(tokens_test['input_ids'], tokens_test['attention_mask'],
                                                   tokens_test['token_type_ids'], torch.tensor(labels_test))
    else:
        test_data = torch.utils.data.TensorDataset(texts_test, torch.tensor(labels_test))
    test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size, shuffle=False)
    test(model, save_best, device, test_loader)
    drawing(args.epochs, train_acc_list, train_loss_list, save_name)

    end = time.time()
    print(f"运行时间：{(end - start) // 3600} h  {(end - start) % 3600 // 60} m")
