import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import TimeSeriesSplit
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
import matplotlib.pyplot as plt
import argparse
import os
from dataset import DataProcessor  
from my_logging import setup_logging
import logging
from model import LSTMModel

# 参数解析
def parse_args():
    parser = argparse.ArgumentParser(description='LSTM Time Series Training')
    
    # 数据参数
    parser.add_argument('--data_path', type=str, default='data/airline-passengers.csv',
                       help='Path to the dataset file')
    parser.add_argument('--split_ratio', type=float, default=0.8,
                       help='Train-test split ratio')
    
    # 模型参数
    parser.add_argument('--input_size', type=int, default=1,
                       help='Size of input features')
    parser.add_argument('--hidden_size', type=int, default=50,
                       help='Number of hidden units in LSTM')
    parser.add_argument('--output_size', type=int, default=1,
                       help='Size of output prediction')
    parser.add_argument('--num_layers', type=int, default=1,
                       help='Number of LSTM layers')
    
    # 训练参数
    parser.add_argument('--seq_length', type=int, default=12,
                       help='Sequence length for time series')
    parser.add_argument('--batch_size', type=int, default=32,
                       help='Batch size for training')
    parser.add_argument('--learning_rate', type=float, default=0.01,
                       help='Learning rate for optimizer')
    parser.add_argument('--epochs', type=int, default=100,
                       help='Number of training epochs')
    parser.add_argument('--n_splits', type=int, default=5,
                       help='Number of folds for cross validation')
    
    # 系统参数
    parser.add_argument('--seed', type=int, default=42,
                       help='Random seed for reproducibility')
    parser.add_argument('--log_dir', type=str, default='logs',
                       help='Directory to save logs and artifacts')
    
    return parser.parse_args()

# 设置随机种子
def set_seed(seed):
    torch.manual_seed(seed)
    np.random.seed(seed)

# 训练器
class Trainer:
    def __init__(self, model, args):
        self.model = model
        self.args = args
        self.criterion = nn.MSELoss()
        self.optimizer = torch.optim.Adam(
            model.parameters(), 
            lr=args.learning_rate
        )
        self.logger = logging.getLogger()
    
    def train_epoch(self, train_loader):
        self.model.train()
        epoch_loss = 0.0
        for X_batch, y_batch in train_loader:
            self.optimizer.zero_grad()
            y_pred = self.model(X_batch)
            loss = self.criterion(y_pred, y_batch)
            loss.backward()
            self.optimizer.step()
            epoch_loss += loss.item()
        return epoch_loss / len(train_loader)
    
    def evaluate(self, X, y):
        self.model.eval()
        with torch.no_grad():
            y_pred = self.model(X)
            loss = self.criterion(y_pred, y)
        return loss.item()
    
    def train(self, train_loader, val_data, epochs=None):
        if epochs is None:
            epochs = self.args.epochs
        
        X_val, y_val = val_data
        device = next(self.model.parameters()).device
        X_val = X_val.to(device)
        y_val = y_val.to(device)
        
        train_losses = []
        val_losses = []
        
        for epoch in range(epochs):
            train_loss = self.train_epoch(train_loader)
            val_loss = self.evaluate(X_val, y_val)
            
            train_losses.append(train_loss)
            val_losses.append(val_loss)
            
            if (epoch+1) % 10 == 0 or epoch == 0:
                self.logger.info(
                    f"Epoch {epoch+1:3d}/{epochs} | "
                    f"Train Loss: {train_loss:.6f} | "
                    f"Val Loss: {val_loss:.6f}"
                )
        
        return train_losses, val_losses

def save_artifacts(model, scaler, log_dir, fold=None):
    """保存模型和scaler到logs/models目录"""
    model_dir = os.path.join(log_dir, 'models')
    os.makedirs(model_dir, exist_ok=True)
    
    if fold is not None:
        model_path = os.path.join(model_dir, f'lstm_model_fold{fold}.pth')
        scaler_path = os.path.join(model_dir, f'scaler_fold{fold}.pkl')
    else:
        model_path = os.path.join(model_dir, 'lstm_model.pth')
        scaler_path = os.path.join(model_dir, 'scaler.pkl')
    
    torch.save(model.state_dict(), model_path)
    import joblib
    joblib.dump(scaler, scaler_path)
    
    logging.info(f"Model saved to {model_path}")
    logging.info(f"Scaler saved to {scaler_path}")

def save_plot(fig, filename, log_dir):
    """保存图表到logs/plots目录"""
    plot_dir = os.path.join(log_dir, 'plots')
    os.makedirs(plot_dir, exist_ok=True)
    
    plot_path = os.path.join(plot_dir, filename)
    fig.savefig(plot_path)
    plt.close(fig)
    logging.info(f"Plot saved to {plot_path}")

def cross_validation(args, processor, values, log_dir):
    """执行交叉验证"""
    logger = logging.getLogger()
    logger.info("\nStarting Cross Validation...")
    
    # 准备数据
    scaled_values = processor.scale_data(values)
    X, y = processor.create_sequences(scaled_values)
    X = torch.FloatTensor(X).unsqueeze(-1)
    y = torch.FloatTensor(y).unsqueeze(-1)
    
    # 创建时间序列交叉验证
    tscv = TimeSeriesSplit(n_splits=args.n_splits)
    fold_losses = []
    
    for fold, (train_index, val_index) in enumerate(tscv.split(X)):
        logger.info(f"\nFold {fold+1}/{args.n_splits}")
        
        # 分割数据
        X_train, X_val = X[train_index], X[val_index]
        y_train, y_val = y[train_index], y[val_index]
        
        # 创建DataLoader
        train_loader = DataLoader(
            TensorDataset(X_train, y_train),
            batch_size=args.batch_size,
            shuffle=False
        )
        
        # 初始化模型
        model = LSTMModel(args)
        trainer = Trainer(model, args)
        
        # 训练模型
        train_losses, val_losses = trainer.train(train_loader, (X_val, y_val))
        
        # 保存模型和scaler
        save_artifacts(model, processor.scaler, log_dir, fold=fold+1)
        
        # 保存训练曲线
        fig = plt.figure(figsize=(12, 6))
        plt.plot(train_losses, label='Training Loss')
        plt.plot(val_losses, label='Validation Loss')
        plt.title(f'Training and Validation Loss Curve - Fold {fold+1}')
        plt.xlabel('Epoch')
        plt.ylabel('MSE Loss')
        plt.legend()
        plt.grid(True)
        save_plot(fig, f'training_loss_fold{fold+1}.png', log_dir)
        
        # 记录最终验证损失
        final_val_loss = val_losses[-1]
        fold_losses.append(final_val_loss)
        logger.info(f"Fold {fold+1} Final Validation Loss: {final_val_loss:.6f}")
    
    # 计算平均验证损失
    avg_loss = np.mean(fold_losses)
    logger.info(f"\nAverage Validation Loss across all folds: {avg_loss:.6f}")
    
    return avg_loss

# 主训练流程
def train_main():
    args = parse_args()
    
    # 使用统一的日志设置，返回logger和实际log_dir
    logger, log_dir = setup_logging(args.log_dir, 'train_')
    
    logger.info("Starting LSTM Time Series Training")
    logger.info(f"Log directory: {log_dir}")
    logger.info(f"Arguments: {vars(args)}")
    set_seed(args.seed)
    
    # 数据准备
    processor = DataProcessor(args)
    values = processor.load_data()
    
    # 执行交叉验证
    avg_val_loss = cross_validation(args, processor, values, log_dir)
    
    # 训练最终模型在所有数据上
    logger.info("\nTraining Final Model on Entire Dataset...")
    train, test = processor.split_data(values)
    train_scaled = processor.scale_data(train)
    X_train, y_train = processor.create_sequences(train_scaled)
    
    # 转换为Tensor
    X_train = torch.FloatTensor(X_train).unsqueeze(-1)
    y_train = torch.FloatTensor(y_train).unsqueeze(-1)
    
    # 创建DataLoader
    train_loader = DataLoader(
        TensorDataset(X_train, y_train), 
        batch_size=args.batch_size, 
        shuffle=False
    )
    
    # 初始化模型
    model = LSTMModel(args)
    trainer = Trainer(model, args)
    
    # 训练模型
    train_losses, _ = trainer.train(train_loader, (X_train, y_train))
    
    # 保存训练曲线
    fig = plt.figure(figsize=(12, 6))
    plt.plot(train_losses, label='Training Loss')
    plt.title('Final Model Training Loss Curve')
    plt.xlabel('Epoch')
    plt.ylabel('MSE Loss')
    plt.legend()
    plt.grid(True)
    save_plot(fig, 'final_training_loss.png', log_dir)
    
    # 保存模型和scaler
    save_artifacts(model, processor.scaler, log_dir)
    
    logger.info("Training completed. All artifacts saved to log directory.")

if __name__ == '__main__':
    train_main()