import os
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from data import Dataset_Pro
import scipy.io as sio
from models.GPT4CP import Model
import numpy as np
import shutil
from torch.utils.tensorboard import SummaryWriter
from metrics import NMSELoss, SE_Loss

# ============= HYPER PARAMS(Pre-Defined) ==========#
# 设置环境变量和超参数
lr = 0.0001  # 学习率
epochs = 500  # 训练周期
batch_size = 1024  # 批量大小
device = torch.device('cuda')  # 使用CUDA设备

best_loss = 100  # 初始化最佳损失
save_path = "Weights/U2U_LLM4CP.pth"  # 模型保存路径
train_TDD_r_path = "./H_U_his_train.mat"  # 训练数据路径
train_TDD_t_path = "./H_U_pre_train.mat"  # 训练标签路径
key = ['H_U_his_train', 'H_U_pre_train', 'H_D_pre_train']
train_set = Dataset_Pro(train_TDD_r_path, train_TDD_t_path, is_train=1, is_U2D=0, is_few=0)  # # 创建训练数据集
validate_set = Dataset_Pro(train_TDD_r_path, train_TDD_t_path, is_train=0, is_U2D=0)  # 创建验证数据集
# 初始化模型
model = Model(gpu_id=0,
              pred_len=4, prev_len=16,
              UQh=1, UQv=1, BQh=1, BQv=1).to(device)
# 如果模型文件存在，则加载模型
if os.path.exists(save_path):
    model = torch.load(save_path, map_location=device)

# 保存模型的函数
def save_best_checkpoint(model):
    model_out_path = save_path
    torch.save(model, model_out_path)


###################################################################
# ------------------- Main Train (Run second) 训练函数----------------------------------
###################################################################
def train(training_data_loader, validate_data_loader):
    global epochs, best_loss
    print('Start training...')
    for epoch in range(epochs):
        epoch_train_loss, epoch_val_loss = [], []
        # ============Epoch Train 训练阶段=============== #
        model.train()

        for iteration, batch in enumerate(training_data_loader, 1):
            pred_t, prev = Variable(batch[0]).to(device), \
                Variable(batch[1]).to(device)
            optimizer.zero_grad()  # fixed梯度清0
            pred_m = model(prev, None, None, None)
            loss = criterion(pred_m, pred_t)  # compute loss计算损失
            epoch_train_loss.append(loss.item())  # save all losses into a vector for one epoch保存损失

            loss.backward()
            optimizer.step()

        #       lr_scheduler.step()  # update lr

        t_loss = np.nanmean(np.array(epoch_train_loss))  # compute the mean value of all losses, as one epoch loss计算评价损失
        print('Epoch: {}/{} training loss: {:.7f}'.format(epoch + 1, epochs, t_loss))  # print loss for each epoch

        # ============Epoch Validate验证阶段=============== #
        model.eval()
        with torch.no_grad():
            for iteration, batch in enumerate(validate_data_loader, 1):
                pred_t, prev = Variable(batch[0]).to(device), \
                    Variable(batch[1]).to(device)
                optimizer.zero_grad()  # fixed
                pred_m = model(prev, None, None, None)
                loss = criterion(pred_m, pred_t)  # compute loss
                epoch_val_loss.append(loss.item())  # save all losses into a vector for one epoch
            v_loss = np.nanmean(np.array(epoch_val_loss))
            print('validate loss: {:.7f}'.format(v_loss))
            if v_loss < best_loss:
                best_loss = v_loss
                save_best_checkpoint(model)


###################################################################
# ------------------- Main Function (Run first) -------------------
###################################################################
if __name__ == "__main__":
    # 计算模型中的总参数数量（  total  ）和可学习参数数量（  total_learn  ）
    total = sum([param.nelement() for param in model.parameters()])
    print("Number of parameter: %.5fM" % (total / 1e6))
    total_learn = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print("Number of learnable parameter: %.5fM" % (total_learn / 1e6))
    # 数据加载
    training_data_loader = DataLoader(dataset=train_set, num_workers=0, batch_size=batch_size, shuffle=True,
                                      pin_memory=True,
                                      drop_last=True)  # put training data to DataLoader for batches
    #  dataset   参数指定了数据集对象，这里是train_set和validate_set
    #  batch_size   指定了每个批次的大小
    #  shuffle=True   表示在每个epoch开始时随机打乱数据
    #  pin_memory=True   可以加速数据从CPU到GPU的传输
    #  drop_last=True   表示如果数据不能被整除，丢弃最后不完整的批次
    validate_data_loader = DataLoader(dataset=validate_set, num_workers=0, batch_size=batch_size,
                                      shuffle=True,
                                      pin_memory=True,
                                      drop_last=True)  # put training data to DataLoader for batches
    #  定义优化器和损失函数
    optimizer = optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.999), weight_decay=0.0001)
    criterion = NMSELoss().to(device)
    #  训练模型
    train(training_data_loader, validate_data_loader)  # call train function (
    #  再次计算模型参数总数和可学习参数数量
    total = sum([param.nelement() for param in model.parameters()])
    print("Number of parameter: %.5fM" % (total / 1e6))
    total_learn = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print("Number of learnable parameter: %.5fM" % (total_learn / 1e6))
