import os
import time
import torch
import argparse
import numpy as np
import pandas as pd
from torch import nn
from tqdm import tqdm
from torch.utils.data import DataLoader
from TimeLLMModels import TimeLLMForTEC
from utils import TECDataset, EarlyStopping
from torch.utils.tensorboard import SummaryWriter
import warnings

warnings.filterwarnings('ignore')

# 设置 TensorBoard
writer = SummaryWriter()

# 禁用 Tokenizer 平行化
os.environ["TOKENIZERS_PARALLELISM"] = "false"


'''
设定需要读取的参数
'''
def set_configs():

    parser = argparse.ArgumentParser(description='TimeLLM For TEC')

    '''
    模型超参数
    '''
    parser.add_argument('--llm_model', type=str, default='Qwen/Qwen2-0.5B-Instruct', help='The name of LLM model')
    parser.add_argument('--llm_model_root', type=str, default='LLMs', help='预训练 LLM 的存放路径')
    parser.add_argument('--domain_description', type=str, default='TEC 是描述电离层状态的重要信息', help='The name of LLM model')
    parser.add_argument('--num_features', type=int, default=6, help='每个时刻的特征数量')
    parser.add_argument('--seq_len', type=int, default=48, help='输入时序长度')
    parser.add_argument('--pred_len', type=int, default=16, help='预测时序长度')
    parser.add_argument('--patch_len', type=int, default=16, help='每个 Patch 嵌入前的初始长度')
    parser.add_argument('--patch_stride', type=int, default=8, help='patching 步长')
    parser.add_argument('--d_merge', type=int, default=16, help='SEP和TEC输入输出融合的中间特征数')
    parser.add_argument('--d_model', type=int, default=32, help='每个 Patch 嵌入后的维度(输出映射前)')
    parser.add_argument('--n_heads', type=int, default=4, help='重编程模块头数')
    parser.add_argument('--d_ff', type=int, default=16, help='重编程模块每个头的维度以及 d_llm 降维后维度')
    parser.add_argument('--text_prototype_num', type=int, default=1000, help='Text Prototypes 的数量')
    parser.add_argument('--top_k', type=int, default=5, help='序列自相关的前 K 个最大值')
    parser.add_argument('--dropout', type=float, default=0.1, help='Dropout 系数')
    '''
    训练参数
    '''
    parser.add_argument('--model_name', type=str, default='TimeLLMForTEC', help='模型名称(参数文件名称)')
    parser.add_argument('--model_save_path', type=str, default='models', help='模型存储路径')
    parser.add_argument('--batch_size', type=int, default=256, help='批次大小')
    parser.add_argument('--epochs', type=int, default=50, help='训练轮数')
    parser.add_argument('--learning_rate', type=float, default=0.001, help='学习率')
    parser.add_argument('--num_workers', type=int, default=8, help='线程数')
    parser.add_argument('--gamma', type=float, default=0.98, help='动态学习率指数衰减系数')
    parser.add_argument('--patience', type=int, default=5, help='早停容忍次数')
    parser.add_argument('--delta', type=float, default=0.002, help='早停阈值')
    '''
    数据参数
    '''
    parser.add_argument('--TEC_Data_Path', type=str, default='dataset', help='数据集路径')
    parser.add_argument('--start_date', type=str, default='2013/01/01', help='数据开始时间')
    parser.add_argument('--end_date', type=str, default='2016/01/01', help='数据结束时间')
    parser.add_argument('--train_valid_date', type=str, default='2015/01/01', help='训练与验证数据分割时间')
    parser.add_argument('--sample_stride', type=int, default=1, help='样本抽取步长')
    
    return parser.parse_args()


'''
自定义样本收集函数
'''
def collate_fn(samples: list):
    # 获取每个样本的信息数据
    info = [sample[0] for sample in samples]
    # 获取训练数据和真实数据
    batch_x = [torch.tensor(sample[1]) for sample in samples]
    batch_y = [torch.tensor(sample[2]) for sample in samples]
    
    return info, torch.stack(batch_x), torch.stack(batch_y)


'''
数据预处理
'''
def data_prepare(configs):
    # 读取 TEC 总数据
    tec_data = pd.read_csv(configs.TEC_Data_Path, parse_dates=['date'])
    # 划分时间点
    start_date = pd.to_datetime(configs.start_date)
    train_valid_date = pd.to_datetime(configs.train_valid_date)
    end_date = pd.to_datetime(configs.end_date)
    # 形成训练集和验证集的 Dataset
    train_query = (tec_data['date'] >= start_date) & (tec_data['date'] < train_valid_date)
    train_dataset = TECDataset(tec_data[train_query],
                               sample_stride=configs.sample_stride,
                               seq_len=configs.seq_len,
                               pred_len=configs.pred_len)
    valid_query = (tec_data['date'] >= train_valid_date) & (tec_data['date'] < end_date)
    valid_dataset = TECDataset(tec_data[valid_query],
                               sample_stride=configs.sample_stride,
                               seq_len=configs.seq_len,
                               pred_len=configs.pred_len)
    # 创建训练集和验证集的 Dataloader
    train_dataloader = DataLoader(train_dataset,
                                  collate_fn=collate_fn,
                                  batch_size=configs.batch_size,
                                  shuffle=True,
                                  drop_last=True,
                                  num_workers=configs.num_workers)
    valid_dataloader = DataLoader(valid_dataset,
                                  collate_fn=collate_fn,
                                  batch_size=configs.batch_size,
                                  shuffle=False,
                                  drop_last=True,
                                  num_workers=configs.num_workers)
    return train_dataloader, valid_dataloader


'''
训练流程
'''
def train(model, loss_fn, optimizer, now_e, dataloader, device):
    # 变更为训练模式
    model.train()
    # 运行中的总损失
    running_loss = 0
    # 设置动态进度条
    loop = tqdm(enumerate(dataloader), total=len(dataloader))
    # 循环每个批次
    for i, (info, x, y) in loop:
        
        # 统一输入数据的类型并转移到 device 中
        x = x.float().to(device)
        y = y.float().to(device)
        # 前向传播
        y_pred = model(x, info)
        # 求解损失
        loss = loss_fn(y_pred, y)
        # 优化器中梯度归零
        optimizer.zero_grad()
        # 反向传播求梯度
        loss.backward()
        # 根据梯度更新参数
        optimizer.step()
        # 每一定个数记录一次损失
        if i % 100 == 0:
            writer.add_scalar("Loss/train", loss.item(), now_e * len(dataloader) + i // 100)
        # 计算当前总损失
        running_loss += loss.item()
        # 更新进度信息
        loop.set_description("Training")
        loop.set_postfix(train_loss=running_loss/(i + 1))


'''
验证流程
'''
def validate(model, dataloader, loss_fn, now_e, device):
    # 变更为评估模式
    model.eval()
    # 运行中的总损失
    running_loss = 0
    # 设置动态进度条
    loop = tqdm(enumerate(dataloader), total=len(dataloader))
    # 在不记录计算图的模式下求解前向传播
    with torch.no_grad():
        for i, (info, x, y) in loop:
            # 统一输入数据的类型并转移到 device 中
            x = x.float().to(device)
            y = y.float().to(device)
            # 前向传播
            y_pred = model(x, info)
            # 计算损失
            loss = loss_fn(y_pred, y).item()
            # 每一定个数记录一次损失
            if i % 100 == 0:
                writer.add_scalar("Loss/valid", loss, now_e * len(dataloader) + i // 100)
            # 计算当前总损失
            running_loss += loss
            # 更新进度信息
            loop.set_description("Validating")
            loop.set_postfix(valid_loss=running_loss/(i + 1))
    return running_loss / len(dataloader)


'''
模型训练 (单卡)
'''
def run(configs):
    # 数据准备
    train_dataloader, valid_dataloader = data_prepare(configs)
    
    """ 模型创建 """
    # 设置计算设备
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print(f"Using {device} device!")

    # 创建模型
    model = TimeLLMForTEC(configs).to(device)

    """ 模型训练 """
    # 模型存储路径及名称
    model_save_path = configs.model_save_path
    if not os.path.exists(model_save_path):
        os.makedirs(model_save_path)
    model_name = configs.model_name + '.pth'

    # 获取开始时间
    time_init = time.time()
    # 优化器
    optimizer = torch.optim.AdamW(params=model.parameters(), lr=configs.learning_rate)
    # 动态学习率
    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, configs.gamma, verbose=True)
    # 损失函数
    loss_fn = nn.MSELoss()
    # 早停法
    early_stopping = EarlyStopping(patience=configs.patience, delta=configs.delta)

    # 训练循环
    for e in range(configs.epochs):
        
        print(f"Epoch [{e + 1}/{configs.epochs}]")
        
        """ 训练集批次循环 """
        train(model, loss_fn, optimizer, e, train_dataloader, device)
        
        """ 验证集批次循环 """
        epoch_valid_loss = validate(model, valid_dataloader, loss_fn, e, device)
        
        """ 模型保存 """
        # 检测是否早停，并记录当前的最好模型参数
        if early_stopping(np.average(epoch_valid_loss), model, model_save_path, model_name):
            print("Early stopping!")
            break
        
        # 更新学习率
        print(f"Learning rate : {scheduler.get_last_lr()}")
        scheduler.step()
        
    # 输出总用时
    print(f"Training Over! Total Time: {time.time() - time_init}")
    # 关闭 TensorBoard
    writer.close()


'''
主函数
'''
if __name__ == '__main__':
    run(set_configs())