import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import pytorch_lightning as pl
from sklearn.preprocessing import StandardScaler, OrdinalEncoder
import joblib
from pathformer.PathFormer import PathFormer, ModelConfig
from utils.predict_utils import create_sequences, add_time_features

# 全局版本变量
# MAJOR.MINOR.PATCH
version = "v3.0.0"

# 提取文件路径进行管理
data_path = 'dataset/ZGGG.09.02.2023.09.02.2025.1.0.0.cn.utf8.00000000.xls'
encoder_dd_path = f'model_files/label_encoder/label_encoder_dd_{version}.pkl'
encoder_c_path = f'model_files/label_encoder/label_encoder_c_{version}.pkl'
scaler_path = f'model_files/data_scaler/data_scaler_{version}.pkl'
model_dict_path = f'model_files/model_dict/pathformer_dict_{version}.pkl'
model_checkpoint_path = f'model_files/model_checkpoint/pathformer_checkpoint_{version}.ckpt'

class TimeSeriesDataset(Dataset):
    def __init__(self, X, y):
        self.X = torch.as_tensor(X, dtype=torch.float32)
        self.y = torch.as_tensor(y.values, dtype=torch.float32)

    def __len__(self):
        return len(self.X)

    def __getitem__(self, idx):
        return self.X[idx], self.y[idx]

if __name__ == '__main__':
    # 初始化配置
    config = ModelConfig()

    # 数据预处理
    data = pd.read_excel(data_path, skiprows=6)
    data = data.drop(columns=['ff10', 'WW', "W'W'"])
    data['当地时间 广州(机场)'] = pd.to_datetime(data['当地时间 广州(机场)'], format='%d.%m.%Y %H:%M')
    data.set_index('当地时间 广州(机场)', inplace=True)
    data.sort_index(ascending=True, inplace=True)
    data = add_time_features(data)

    # 特征工程
    data['VV'] = data['VV'].astype(str).str.extract(r'(\d+\.?\d*)').astype(float)
    data[['DD', 'c']] = OrdinalEncoder().fit_transform(data[['DD', 'c']])
    data = data.ffill()

    # 标准化与序列生成
    scaler = StandardScaler()
    data_scaled = pd.DataFrame(scaler.fit_transform(data), columns=data.columns)
    X, y = create_sequences(data_scaled, config.seq_len)

    # 数据集划分
    split_idx = int(len(X) * 0.8)
    train_loader = DataLoader(
        TimeSeriesDataset(X[:split_idx], y[:split_idx]),
        batch_size=32,
        shuffle=True,
        num_workers=2
    )

    # 模型训练
    model = PathFormer(config)
    trainer = pl.Trainer(
        max_epochs=50,
        accelerator='auto',
        precision='16-mixed',
        callbacks=[
            pl.callbacks.EarlyStopping(monitor="train_loss", patience=10),
            pl.callbacks.ModelCheckpoint(filename='best_model')
        ]
    )

    joblib.dump(scaler, scaler_path)
    # 保存训练完成的模型
    torch.save(model.state_dict(), model_dict_path)

