import pandas as pd
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader
import pytorch_lightning as pl
from sklearn.preprocessing import StandardScaler, OrdinalEncoder, LabelEncoder
import joblib

from PathFormer import PathFormer, TimeSeriesDataset, DataLoader
from predict_utils import create_sequences, add_time_features

# 全局版本变量
# MAJOR.MINOR.PATCH
version = "v3.0.2"

# 配置参数
# 序列产生 阶段参数
seq_length = 672  # 14天周期
# 模型初始化 阶段参数
patch_size = 48
embed_dim = 256
num_heads = 8
num_layers = 6
ff_dim = 1024
# 模型训练 阶段参数
batch_size = 64
num_workers = 2
max_epochs = 500

# 提取文件路径进行管理
data_path = '../../dataset/ZGGG.09.02.2023.09.02.2025.1.0.0.cn.utf8.00000000.xls'
encoder_dd_path = f'../../model_files/label_encoder/label_encoder_dd_{version}.pkl'
encoder_c_path = f'../../model_files/label_encoder/label_encoder_c_{version}.pkl'
scaler_path = f'../../model_files/data_scaler/data_scaler_{version}.pkl'
model_dict_path = f'../../model_files/model_dict/pathformer_dict_{version}.pkl'
model_checkpoint_path = f'../../model_files/model_checkpoint/pathformer_checkpoint_{version}.ckpt'

if __name__ == '__main__':
    # 数据预处理
    data = pd.read_excel(data_path, skiprows=6)
    data.drop(columns=['ff10', 'WW', "W'W'"], inplace=True)
    data['当地时间 广州(机场)'] = pd.to_datetime(data['当地时间 广州(机场)'], format='%d.%m.%Y %H:%M')
    data.set_index('当地时间 广州(机场)', inplace=True)
    data.sort_index(ascending=True, inplace=True)
    data = add_time_features(data)

    # 特征工程
    data['VV'] = data['VV'].astype(str).str.extract(r'(\d+\.?\d*)').astype(float)
    encoder_dd = OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value=-1)
    encoder_c = OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value=-1)
    data[['DD']] = encoder_dd.fit_transform(data[['DD']])
    data[['c']] = encoder_c.fit_transform(data[['c']])
    data.ffill(inplace=True)

    # 标准化
    train_size = int(0.8 * len(data))
    scaler = StandardScaler()
    train_scaled = scaler.fit_transform(data.iloc[:train_size])
    test_scaled = scaler.transform(data.iloc[train_size:])

    # 转换回 DataFrame 并保留特征名称
    train_scaled_df = pd.DataFrame(train_scaled, columns=data.columns, index=data.iloc[:train_size].index)
    test_scaled_df = pd.DataFrame(test_scaled, columns=data.columns, index=data.iloc[train_size:].index)

    data_scaled = pd.concat([train_scaled_df, test_scaled_df]).astype(np.float32)

    # 创建序列
    # ! 说明 sequence, patch, batch之间的关系
    """
    1. sequence
        * 这里创建的序列是一个二维数组，行：时间步，列：特征 -> shape(seq_length, num_features)
    并且这里的一个 sequence 就对应着一个样本
    
    示意图：
    [
        [], -> 时间步1
        [], -> 时间步2
        []  -> 时间步3
    ] --> 这整个算一个样本
    
    2. patch
        * 然后在 Patch Embedding 的部分会将时间步进行分组。
    - patch_size的值就对应了规定上多少个时间步为一组，
    - 实际上就是在对样本的形状进行修改。从 shape(seq_length, num_features) --> shape(patch_size, [seq_length // patch_size] * num_features)
    
    3. batch
        * 这个 batch 的主要功能是让多组数据能够并行进行运算，增加运算速度。是在 DataLoader 中进行设置的。
        - 其中的值代表的含义是 每组batch 处理的样本数量。
    """
    X, y = create_sequences(data_scaled, seq_length)
    train_size = int(0.8 * len(X))
    X_train, X_test = X[:train_size], X[train_size:]
    y_train, y_test = y[:train_size], y[train_size:]

    # 初始化模型
    model = PathFormer(
        input_dim=X_train.shape[2],
        patch_size=patch_size,
        embed_dim=embed_dim,
        num_heads=num_heads,
        num_layers=num_layers,
        output_dim=y_train.shape[1],
        ff_dim=ff_dim
    )

    # 训练配置
    train_loader = DataLoader(
        TimeSeriesDataset(X_train, y_train),
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
        persistent_workers=True
    )

    trainer = pl.Trainer(
        max_epochs=max_epochs,
        accelerator='auto',
        gradient_clip_val=0.5,
        callbacks=[
            pl.callbacks.LearningRateMonitor(),
            pl.callbacks.EarlyStopping(monitor="train_loss", patience=15)
        ]
    )

    # 执行训练
    trainer.fit(model, train_loader)

    # 在训练模型完成后保存 encoder_dd & encoder_c 编码器
    joblib.dump(encoder_dd, encoder_dd_path)
    joblib.dump(encoder_c, encoder_c_path)
    # 保存 StandardScaler 用于后续预测时的标准化
    joblib.dump(scaler, scaler_path)
    # 保存训练完成的模型
    torch.save(model.state_dict(), model_dict_path)
