import os
# 设置GPU选择
os.environ['CUDA_VISIBLE_DEVICES'] = '3'  # 使用第一块GPU，可以改为'1','2'等选择不同GPU
# os.environ['CUDA_VISIBLE_DEVICES'] = ''  # 如果不想使用GPU，可以设置为空字符串

import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.model_selection import train_test_split

import matplotlib.pyplot as plt
# 设置中文字体
plt.rcParams['font.family'] = 'Noto Sans CJK SC'

import argparse
from datetime import datetime, timedelta
from tqdm import tqdm
import warnings
warnings.filterwarnings('ignore')

import json
import shutil

class SolarRadiationDataset(Dataset):
    """太阳辐照数据集类"""
    def __init__(self, X, y):
        self.X = X
        self.y = y
        
    def __len__(self):
        return len(self.X)
    
    def __getitem__(self, idx):
        return torch.FloatTensor(self.X[idx]), torch.FloatTensor(self.y[idx])

class LSTMSolarModel(nn.Module):
    """LSTM太阳辐照预测模型"""
    def __init__(self, input_size, hidden_size, num_layers, output_size, dropout=0.2):
        super(LSTMSolarModel, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        
        # LSTM层
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, 
                           batch_first=True, dropout=dropout)
        
        # 全连接层
        self.fc = nn.Linear(hidden_size, output_size)
        
    def forward(self, x):
        # x shape: (batch_size, seq_len, input_size)
        batch_size = x.size(0)
        
        # 初始化隐藏状态
        h0 = torch.zeros(self.num_layers, batch_size, self.hidden_size).to(x.device)
        c0 = torch.zeros(self.num_layers, batch_size, self.hidden_size).to(x.device)
        
        # LSTM前向传播
        lstm_out, _ = self.lstm(x, (h0, c0))
        
        # 取最后一个时间步的输出
        out = self.fc(lstm_out[:, -1, :])
        
        return out

def load_and_preprocess_data(file_path, start_year=None, end_year=None, single_year=None, scaler=None):
    """加载和预处理数据，可指定区间或单一年份，支持传入已有scaler"""
    print(f"正在加载数据文件: {file_path}")
    df = pd.read_csv(file_path)
    if single_year is not None:
        df = df[df['YEAR'] == single_year].copy()
        print(f"数据范围: {single_year}, 总记录数: {len(df)}")
    elif start_year is not None and end_year is not None:
        df = df[(df['YEAR'] >= start_year) & (df['YEAR'] <= end_year)].copy()
        print(f"数据范围: {start_year}-{end_year}, 总记录数: {len(df)}")
    else:
        raise ValueError("必须指定年份区间或单一年份")
    print("正在预处理数据...")
    df['datetime'] = pd.to_datetime({
        'year': df['YEAR'],
        'month': df['MO'],
        'day': df['DY'],
        'hour': df['HR']
    })
    df['month_normalized'] = df['MO'] / 12.0
    df['hour_normalized'] = df['HR'] / 23.0
    df['month_sin'] = np.sin(2 * np.pi * df['MO'] / 12)
    df['month_cos'] = np.cos(2 * np.pi * df['MO'] / 12)
    df['hour_sin'] = np.sin(2 * np.pi * df['HR'] / 24)
    df['hour_cos'] = np.cos(2 * np.pi * df['HR'] / 24)
    feature_columns = ['ALLSKY_SFC_SW_DWN', 'month_sin', 'month_cos', 'hour_sin', 'hour_cos']
    data = df[feature_columns].values
    if scaler is None:
        print("正在进行数据标准化...")
        scaler = MinMaxScaler()
        data_scaled = scaler.fit_transform(data)
    else:
        data_scaled = scaler.transform(data)
    print("数据预处理完成！")
    return data_scaled, scaler, df

def create_sequences(data, input_window, output_window):
    """创建时间序列数据"""
    X, y = [], []
    
    total_sequences = len(data) - input_window - output_window + 1
    print(f"正在创建序列数据，总共 {total_sequences} 个序列...")
    
    for i in range(total_sequences):
        # 输入序列：包含太阳辐照和月份信息
        X.append(data[i:i + input_window])
        # 输出序列：只包含太阳辐照
        y.append(data[i + input_window:i + input_window + output_window, 0])
        
    return np.array(X), np.array(y)

def train_model(model, train_loader, val_loader, num_epochs, learning_rate, device, patience=15):
    """训练模型，使用早停机制选择最佳模型"""
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=10, factor=0.5)
    
    train_losses = []
    val_losses = []
    best_val_loss = float('inf')
    best_model_state = None
    patience_counter = 0
    
    print("开始训练...")
    epoch_pbar = tqdm(range(num_epochs), desc="训练进度", unit="epoch")
    
    for epoch in epoch_pbar:
        # 训练阶段
        model.train()
        train_loss = 0.0
        train_pbar = tqdm(train_loader, desc=f"Epoch {epoch+1}/{num_epochs} - 训练", leave=False)
        
        for batch_X, batch_y in train_pbar:
            batch_X, batch_y = batch_X.to(device), batch_y.to(device)
            
            optimizer.zero_grad()
            outputs = model(batch_X)
            loss = criterion(outputs, batch_y)
            loss.backward()
            optimizer.step()
            
            train_loss += loss.item()
            train_pbar.set_postfix({'loss': f'{loss.item():.6f}'})
        
        # 验证阶段
        model.eval()
        val_loss = 0.0
        val_pbar = tqdm(val_loader, desc=f"Epoch {epoch+1}/{num_epochs} - 验证", leave=False)
        
        with torch.no_grad():
            for batch_X, batch_y in val_pbar:
                batch_X, batch_y = batch_X.to(device), batch_y.to(device)
                outputs = model(batch_X)
                loss = criterion(outputs, batch_y)
                val_loss += loss.item()
                val_pbar.set_postfix({'loss': f'{loss.item():.6f}'})
        
        train_loss /= len(train_loader)
        val_loss /= len(val_loader)
        
        train_losses.append(train_loss)
        val_losses.append(val_loss)
        
        # 检查是否为最佳模型
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            best_model_state = model.state_dict().copy()
            patience_counter = 0
            print(f"\n✓ 发现更好的模型！验证损失: {val_loss:.6f} (Epoch {epoch+1})")
        else:
            patience_counter += 1
        
        scheduler.step(val_loss)
        
        # 更新主进度条
        epoch_pbar.set_postfix({
            'train_loss': f'{train_loss:.6f}',
            'val_loss': f'{val_loss:.6f}',
            'best_val_loss': f'{best_val_loss:.6f}',
            'patience': f'{patience_counter}/{patience}',
            'lr': f'{optimizer.param_groups[0]["lr"]:.6f}'
        })
        
        # 早停检查
        if patience_counter >= patience:
            print(f"\n⚠️ 早停触发！{patience} 个epoch没有改善，停止训练。")
            break
    
    # 加载最佳模型
    if best_model_state is not None:
        model.load_state_dict(best_model_state)
        print(f"\n✓ 已加载最佳模型，验证损失: {best_val_loss:.6f}")
    
    return train_losses, val_losses, best_val_loss, best_model_state

def evaluate_model(model, test_loader, scaler, device):
    """评估模型"""
    model.eval()
    predictions = []
    actuals = []
    
    print("开始评估...")
    with torch.no_grad():
        test_pbar = tqdm(test_loader, desc="模型评估", unit="batch")
        for batch_X, batch_y in test_pbar:
            batch_X, batch_y = batch_X.to(device), batch_y.to(device)
            outputs = model(batch_X)
            
            # 反标准化预测结果
            pred_reshaped = outputs.cpu().numpy().reshape(-1, 1)
            actual_reshaped = batch_y.cpu().numpy().reshape(-1, 1)
            
            # 创建用于反标准化的完整特征矩阵（只取第一列进行反标准化）
            pred_full = np.zeros((pred_reshaped.shape[0], scaler.n_features_in_))
            actual_full = np.zeros((actual_reshaped.shape[0], scaler.n_features_in_))
            
            pred_full[:, 0] = pred_reshaped.flatten()
            actual_full[:, 0] = actual_reshaped.flatten()
            
            # 反标准化
            pred_original = scaler.inverse_transform(pred_full)[:, 0]
            actual_original = scaler.inverse_transform(actual_full)[:, 0]
            
            # 物理约束：辐照值不能为负
            pred_original[pred_original < 0] = 0
            
            predictions.extend(pred_original)
            actuals.extend(actual_original)
            
            # 更新进度条
            test_pbar.set_postfix({
                'processed': f'{len(predictions)}',
                'batch_size': f'{batch_X.size(0)}'
            })
    
    # 计算评估指标
    mse = mean_squared_error(actuals, predictions)
    mae = mean_absolute_error(actuals, predictions)
    rmse = np.sqrt(mse)
    
    print(f"测试集评估结果:")
    print(f"MSE: {mse:.4f}")
    print(f"MAE: {mae:.4f}")
    print(f"RMSE: {rmse:.4f}")
    
    return predictions, actuals, mse, mae, rmse

def plot_results(train_losses, val_losses, predictions, actuals, save_path=None):
    """绘制结果"""
    fig, axes = plt.subplots(2, 2, figsize=(15, 10))
    
    # 训练损失
    axes[0, 0].plot(train_losses, label='训练损失')
    axes[0, 0].plot(val_losses, label='验证损失')
    axes[0, 0].set_title('训练和验证损失')
    axes[0, 0].set_xlabel('Epoch')
    axes[0, 0].set_ylabel('Loss')
    axes[0, 0].legend()
    axes[0, 0].grid(True)
    
    # 预测vs实际值散点图
    axes[0, 1].scatter(actuals, predictions, alpha=0.5)
    axes[0, 1].plot([min(actuals), max(actuals)], [min(actuals), max(actuals)], 'r--', lw=2)
    axes[0, 1].set_xlabel('实际值')
    axes[0, 1].set_ylabel('预测值')
    axes[0, 1].set_title('预测值 vs 实际值')
    axes[0, 1].grid(True)
    
    # 时间序列预测结果（显示前100个样本）
    sample_size = min(500, len(predictions))
    axes[1, 0].plot(actuals[:sample_size], label='实际值', alpha=0.7)
    axes[1, 0].plot(predictions[:sample_size], label='预测值', alpha=0.7)
    axes[1, 0].set_title('预测结果对比（前100个样本）')
    axes[1, 0].set_xlabel('时间步')
    axes[1, 0].set_ylabel('太阳辐照 (Wh/m²)')
    axes[1, 0].legend()
    axes[1, 0].grid(True)
    
    # 预测误差分布
    errors = np.array(predictions) - np.array(actuals)
    axes[1, 1].hist(errors, bins=50, alpha=0.7, edgecolor='black')
    axes[1, 1].set_title('预测误差分布')
    axes[1, 1].set_xlabel('预测误差')
    axes[1, 1].set_ylabel('频次')
    axes[1, 1].grid(True)
    
    plt.tight_layout()
    
    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        print(f"结果图已保存到: {save_path}")
    

def main():
    parser = argparse.ArgumentParser(description='LSTM太阳辐照预测模型')
    parser.add_argument('--data_file', type=str, default='POWER_Point_Hourly_20150101_20250101.csv',
                        help='数据文件路径')
    parser.add_argument('--input_window', type=int, default=120,  # 5天 * 24小时
                        help='输入窗口长度（小时）')
    parser.add_argument('--output_window', type=int, default=24,  # 1天
                        help='输出窗口长度（小时）')
    parser.add_argument('--hidden_size', type=int, default=256,
                        help='LSTM隐藏层大小')
    parser.add_argument('--num_layers', type=int, default=2,
                        help='LSTM层数')
    parser.add_argument('--batch_size', type=int, default=512,
                        help='批次大小')
    parser.add_argument('--num_epochs', type=int, default=100,
                        help='训练轮数')
    parser.add_argument('--learning_rate', type=float, default=0.001,
                        help='学习率')
    parser.add_argument('--dropout', type=float, default=0.2,
                        help='Dropout率')
    parser.add_argument('--start_year', type=int, default=2019,
                        help='开始年份')
    parser.add_argument('--end_year', type=int, default=2024,
                        help='结束年份')
    parser.add_argument('--test_year', type=int, default=2024,
                        help='测试年份')
    parser.add_argument('--patience', type=int, default=15,
                        help='早停耐心值（多少个epoch没有改善后停止训练）')
    parser.add_argument('--val_ratio', type=float, default=0.2,
                        help='训练集中划分为验证集的比例（0-1之间）')
    
    args = parser.parse_args()
    
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")
    
    # ==== 自动生成输出文件夹名 ====
    output_dir = f"results_LSTM_in{args.input_window}_out{args.output_window}_h{args.hidden_size}_l{args.num_layers}_bs{args.batch_size}_lr{args.learning_rate}_drop{args.dropout}_train{args.start_year}-{args.end_year}_test{args.test_year}"
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    print(f"所有输出将保存在: {output_dir}")

    # 加载和预处理训练数据（不含test_year）
    data_scaled, scaler, df = load_and_preprocess_data(
        args.data_file, start_year=args.start_year, end_year=args.end_year
    )
    # 过滤掉test_year的数据
    if args.test_year >= args.start_year and args.test_year <= args.end_year:
        mask = df['YEAR'] != args.test_year
        data_scaled = data_scaled[mask.values]
        df = df[mask]

    # 创建训练集序列
    X_all, y_all = create_sequences(data_scaled, args.input_window, args.output_window)
    print(f"全部训练数据序列形状: {X_all.shape}, {y_all.shape}")

    # 随机划分训练集和验证集（val_ratio做验证）
    X_train, X_val, y_train, y_val = train_test_split(
        X_all, y_all, test_size=args.val_ratio, random_state=42, shuffle=True
    )
    print(f"训练集序列形状: {X_train.shape}, {y_train.shape}")
    print(f"验证集序列形状: {X_val.shape}, {y_val.shape}")

    # 加载和预处理test_year数据
    data_test_scaled, _, df_test = load_and_preprocess_data(
        args.data_file, single_year=args.test_year, scaler=scaler
    )
    X_test, y_test = create_sequences(data_test_scaled, args.input_window, args.output_window)
    print(f"测试集序列形状: {X_test.shape}, {y_test.shape}")

    # 创建数据加载器
    train_dataset = SolarRadiationDataset(X_train, y_train)
    val_dataset = SolarRadiationDataset(X_val, y_val)
    test_dataset = SolarRadiationDataset(X_test, y_test)

    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False)
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False)
    
    # 创建模型
    input_size = X_train.shape[2]  # 特征数量
    model = LSTMSolarModel(
        input_size=input_size,
        hidden_size=args.hidden_size,
        num_layers=args.num_layers,
        output_size=args.output_window,
        dropout=args.dropout
    ).to(device)
    
    print(f"模型参数数量: {sum(p.numel() for p in model.parameters())}")
    
    # 训练模型
    train_losses, val_losses, best_val_loss, best_model_state = train_model(
        model, train_loader, val_loader, args.num_epochs, args.learning_rate, device, patience=args.patience
    )

    # 用验证集评估模型好坏
    print("\n在验证集上评估模型...")
    _, _, mse_val, mae_val, rmse_val = evaluate_model(model, val_loader, scaler, device)
    print(f"验证集: MSE={mse_val:.4f}, MAE={mae_val:.4f}, RMSE={rmse_val:.4f}")

    # 用测试集做最终测试
    print("\n在测试集上评估模型...")
    predictions, actuals, mse, mae, rmse = evaluate_model(model, test_loader, scaler, device)
    
    # 绘制结果
    plot_results(train_losses, val_losses, predictions, actuals, os.path.join(output_dir, 'solar_radiation_results.png'))
    
    # 保存最佳模型
    if best_model_state is not None:
        best_model_path = os.path.join(output_dir, 'solar_radiation_lstm_best_model.pth')
        torch.save(best_model_state, best_model_path)
        print(f"最佳模型已保存到: {best_model_path}")
        # 保存训练信息
        training_info = {
            'best_val_loss': best_val_loss,
            'final_train_loss': train_losses[-1] if train_losses else None,
            'final_val_loss': val_losses[-1] if val_losses else None,
            'test_mse': mse,
            'test_mae': mae,
            'test_rmse': rmse,
            'model_config': {
                'input_size': input_size,
                'hidden_size': args.hidden_size,
                'num_layers': args.num_layers,
                'output_size': args.output_window,
                'dropout': args.dropout
            },
            'training_config': {
                'input_window': args.input_window,
                'output_window': args.output_window,
                'batch_size': args.batch_size,
                'learning_rate': args.learning_rate,
                'num_epochs': args.num_epochs,
                'patience': args.patience
            }
        }
        info_path = os.path.join(output_dir, 'training_info.json')
        with open(info_path, 'w', encoding='utf-8') as f:
            json.dump(training_info, f, indent=2, ensure_ascii=False)
        print(f"训练信息已保存到: {info_path}")

if __name__ == "__main__":
    main() 