import argparse
import time
import torch
import torch.nn as nn
import data_preprocess
import model
from torch import optim
from calculate import cal_mean_covariance

# 设置预测模型参数并解析
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default='ecg', help='数据集种类')
parser.add_argument('--filename', type=str, default='xmitdb_x108_0.pkl', help='数据文件名')
parser.add_argument('--model', type=str, default='LSTM', help='神经网络类型')
parser.add_argument('--augment', type=bool, default=True, help='数据增强')
parser.add_argument('--input_size', type=int, default=32, help='RNN输入特征的规模')
parser.add_argument('--units_num', type=int, default=32, help='每个隐藏层中隐藏单元的个数')  # 隐藏层的维度
parser.add_argument('--layers_num', type=int, default=2, help='隐藏层数')  # LSTM的层数
parser.add_argument('--learning_rate', type=float, default=0.0002, help='学习率')
parser.add_argument('--weight_decay', type=float, default=1e-4, help='权值衰减')  # 防止过拟合
parser.add_argument('--clip', type=float, default=10, help='所有参数梯度范数的上界')  # 防止梯度爆炸
parser.add_argument('--epochs', type=int, default=400, help='训练时期数')
parser.add_argument('--batch_size', type=int, default=64, help='一批数据的样本量')
parser.add_argument('--seq_length', type=int, default=50, help='序列长度')
parser.add_argument('--dropout', type=float, default=0.2, help='dropout的比例')  # 防止过拟合
parser.add_argument('--seed', type=int, default=1111, help='随机数种子')
parser.add_argument('--device', type=str, default='cuda', help='GPU')
parser.add_argument('--log_interval', type=int, default=10, help='记录的时间间隔')
parser.add_argument('--save_interval', type=int, default=10, help='保存的时间间隔')
parser.add_argument('--prediction_window_size', type=int, default=10, help='预测窗口的大小')
args = parser.parse_args()

# 设置随机种子，确保每次实验产生的随机数是相同的
torch.manual_seed(args.seed)  # 使用CPU
torch.cuda.manual_seed(args.seed)  # 使用GPU

# 加载数据集
all_dataset = data_preprocess.PreprocessData(args.data, args.filename, augment=args.augment)
train_dataset = all_dataset.batch(args, all_dataset.trainData, args.batch_size)  # 训练集
test_dataset = all_dataset.batch(args, all_dataset.testData, args.batch_size)  # 测试集

# 构建模型
feature_dim = all_dataset.trainData.size(1)  # 输入特征的维数
model = model.Predictor(args.model, feature_dim, args.input_size, args.units_num, feature_dim, args.layers_num,
                        args.dropout).to(args.device)  # encoder-LSTM-decoder
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)  # Adam优化器
criterion = nn.MSELoss()  # 均方误差损失函数，维度缩减


# 获取当前序列和下一时刻序列
def get_sequence(dataset, i):
    curr_length = min(args.seq_length, len(dataset) - 1 - i)  # 当前序列长度
    data = dataset[i:i + curr_length]  # 当前序列
    target = dataset[i + 1:i + 1 + curr_length]  # 下一时刻的序列
    return data, target


# 训练模型
def train():
    with torch.enable_grad():
        model.train()  # train模式，启用Batch Normalization和Dropout
        total_loss = 0
        start_time = time.time()
        hidden = model.init_hidden(args.batch_size)

        # 获取训练批数据
        for batch, i in enumerate(range(0, train_dataset.size(0) - 1, args.seq_length)):
            inputSeq, targetSeq = get_sequence(train_dataset, i)  # 当前序列和下一时刻序列

            # 截断计算图，将隐藏层状态与之前的分离开
            hidden = model.repackage_hidden(hidden)
            hidden_ = model.repackage_hidden(hidden)

            # Teacher forcing loss
            outSeq2, hidden, hid2 = model.forward(inputSeq, hidden, if_return_hidden=True)
            loss = criterion(outSeq2.contiguous().view(args.batch_size, -1),
                             targetSeq.contiguous().view(args.batch_size, -1))  # 均方误差

            # Free running loss
            # decodedSeq = inputSeq[0].unsqueeze(0)  # 前一时刻的输出值，当前时刻的输入值
            # outSeq1 = []  # 解码器输出的内容
            # hid1 = []  # 隐藏层输出的内容（已dropout）
            # for _ in range(inputSeq.size(0)):
            #     decodedSeq, hidden_, hid = model.forward(decodedSeq, hidden_, if_return_hidden=True)
            #     outSeq1.append(decodedSeq)
            #     hid1.append(hid)
            # outSeq1 = torch.cat(outSeq1, dim=0)
            # hid1 = torch.cat(hid1, dim=0)
            # loss = criterion(outSeq1.contiguous().view(args.batch_size, -1),
            #                  targetSeq.contiguous().view(args.batch_size, -1))  # 均方误差

            total_loss += loss.item()

            # 优化
            loss.backward()  # 反向传播，计算梯度
            torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)  # 梯度截断，防止梯度爆炸
            optimizer.step()  # 根据梯度更新网络参数，进行单步优化
            optimizer.zero_grad()  # 清空过往梯度

            # 记录一代训练
            if batch % args.log_interval == 0 and batch > 0:
                cur_loss = total_loss / args.log_interval
                elapsed = time.time() - start_time
                print('| epoch {:3d} | {:5d}/{:5d} batches | {:5.4f} ms/batch | ''loss {:5.2f} '.format(
                    epoch, batch, len(train_dataset) // args.seq_length, elapsed * 1000 / args.log_interval, cur_loss))

                # 初始化数据
                total_loss = 0
                start_time = time.time()


# 进行预测并评估测试集的损失
def evaluate():
    model.eval()  # eval模式，使用训练好的均值和方差，停用dropout
    with torch.no_grad():  # 停止梯度计算，节省GPU算力和显存
        total_loss = 0
        hidden = model.init_hidden(args.batch_size)

        # 获取验证批数据
        for batch, i in enumerate(range(0, test_dataset.size(0) - 1, args.seq_length)):
            inputSeq, targetSeq = get_sequence(test_dataset, i)  # 当前序列和下一时刻序列

            # 将隐藏层状态与之前的分离开
            hidden = model.repackage_hidden(hidden)
            hidden_ = model.repackage_hidden(hidden)

            # Teacher forcing loss
            outSeq2, hidden, hid2 = model.forward(inputSeq, hidden, if_return_hidden=True)
            loss = criterion(outSeq2.contiguous().view(args.batch_size, -1),
                             targetSeq.contiguous().view(args.batch_size, -1))  # 均方误差

            # Free running loss
            # decodedSeq = inputSeq[0].unsqueeze(0)  # 前一时刻的输出值，当前时刻的输入值
            # outSeq1 = []  # 解码器输出的内容
            # hid1 = []  # 隐藏层输出的内容（已dropout）
            # for _ in range(inputSeq.size(0)):
            #     decodedSeq, hidden_, hid = model.forward(decodedSeq, hidden_, if_return_hidden=True)
            #     outSeq1.append(decodedSeq)
            #     hid1.append(hid)
            # outSeq1 = torch.cat(outSeq1, dim=0)
            # hid1 = torch.cat(hid1, dim=0)
            # loss = criterion(outSeq1.contiguous().view(args.batch_size, -1),
            #                  targetSeq.contiguous().view(args.batch_size, -1))  # 均方误差

            total_loss += loss.item()

    return total_loss / (batch + 1)


# 从头开始
epoch = 1  # 当前epoch
best_val_loss = 0  # 最佳验证损失
print("=> 开始训练")
print('-' * 88)

try:
    for epoch in range(1, args.epochs + 1):
        epoch_start_time = time.time()  # 记录当代训练的开始时间
        train()  # 进行一代训练
        val_loss = evaluate()  # 验证测试集损失
        print('-' * 88)
        print('| end of epoch {:3d} | time: {:5.2f}s | validation loss {:5.4f} | '.format(epoch, (
                time.time() - epoch_start_time), val_loss))
        print('-' * 88)

        # 每隔一定时间保存一次模型
        if epoch % args.save_interval == 0:
            is_best_loss = val_loss < best_val_loss  # 是否为最佳损失
            best_val_loss = min(val_loss, best_val_loss)  # 记录最佳损失
            # 保存的模型数据
            model_dictionary = {'epoch': epoch,
                                'best_loss': best_val_loss,
                                'state_dict': model.state_dict(),
                                'optimizer': optimizer.state_dict(),
                                'args': args
                                }
            model.save_checkpoint(model_dictionary, is_best_loss)  # 更新最佳模型

# 使用ctrl + c提前退出训练
except KeyboardInterrupt:
    print('-' * 88)
    print('提前退出训练')

# 计算每个通道预测误差的均值和协方差，并保存在训练好的模型中
print('=> 计算均值和协方差')
means, covariances = list(), list()
cal_train_dataset = all_dataset.batch(args, all_dataset.trainData, batch_size=1)
for channel_idx in range(model.encoder_input_size):
    mean, covariance = cal_mean_covariance(args, model, cal_train_dataset[:all_dataset.length], channel_idx)
    means.append(mean)
    covariances.append(covariance)

# 训练好的模型数据
model_dictionary = {'epoch': epoch,
                    'best_loss': best_val_loss,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'args': args,
                    'means': means,
                    'covariances': covariances
                    }
model.save_checkpoint(model_dictionary, is_best_loss=True)
print('-' * 88)
