import torch
import numpy as np
import pandas as pd
import yaml
import os
from datetime import datetime, timedelta
import matplotlib.pyplot as plt

from data_utils import download_himawari_data, load_himawari_data, load_multi_band_himawari_data, prepare_station_data, load_config, find_himawari_file
from models import CombinedModel

def load_trained_model(config, model_path):
    """Load a trained model from checkpoint with GPU selection and DataParallel support"""
    import random
    
    if torch.cuda.is_available():
        gpu_count = torch.cuda.device_count()
        selection_mode = config['training'].get('gpu_selection_mode', 'first')
        gpu_index = config['training'].get('gpu_index', 0)
        
        if selection_mode == "random" and gpu_count > 1:
            # 随机选择一个GPU
            selected_index = random.randint(0, gpu_count - 1)
            device = torch.device(f"cuda:{selected_index}")
            print(f"随机选择GPU: {selected_index} ({torch.cuda.get_device_name(selected_index)})")
        elif selection_mode == "specific" and 0 <= gpu_index < gpu_count:
            # 使用特定GPU
            device = torch.device(f"cuda:{gpu_index}")
            print(f"使用指定GPU: {gpu_index} ({torch.cuda.get_device_name(gpu_index)})")
        else:
            # 默认使用第一个GPU
            device = torch.device("cuda:0")
            print(f"使用第一个GPU: 0 ({torch.cuda.get_device_name(0)})")
    else:
        device = torch.device("cpu")
        print("Using CPU device (CUDA not available)")
    
    # Create model
    model = CombinedModel(config).to(device)
    
    # Load checkpoint
    checkpoint = torch.load(model_path, map_location=device)
    
    # Handle DataParallel model state dict
    state_dict = checkpoint['model_state_dict']
    if any(k.startswith('module.') for k in state_dict.keys()):
        print("检测到DataParallel模型，移除'module.'前缀")
        state_dict = {k.replace('module.', ''): v for k, v in state_dict.items()}
    
    model.load_state_dict(state_dict)
    model.eval()
    
    print(f"Loaded model from epoch {checkpoint['epoch']} with validation loss: {checkpoint['val_loss']:.4f}")
    return model, device

def prepare_prediction_input(station_csv, himawari_dir, config, prediction_time=None):
    """Prepare input data for prediction"""
    # Load and prepare station data
    station_df = prepare_station_data(station_csv)
    
    # If no specific prediction time provided, use the latest available data
    if prediction_time is None:
        prediction_time = station_df['timestamp'].max()
    else:
        prediction_time = pd.to_datetime(prediction_time, utc=True)
    
    # Get historical station data (exactly 5 days * 24 hours * 4 points per hour = 480 points)
    hist_start = prediction_time - timedelta(days=config['data']['historical_days'])
    hist_mask = (station_df['timestamp'] >= hist_start) & (station_df['timestamp'] < prediction_time)
    hist_station = station_df[hist_mask].copy()
    
    # Ensure we have exactly the right number of data points (480 for 5 days at 15min intervals)
    expected_points = config['data']['historical_days'] * 24 * 4
    if len(hist_station) > expected_points:
        # Take the most recent expected_points
        hist_station = hist_station.tail(expected_points)
    elif len(hist_station) < expected_points:
        raise ValueError(f"Insufficient historical data. Need {expected_points} points, got {len(hist_station)}")
    
    # Get Himawari data for prediction time - 支持新旧数据目录
    himawari_path = find_himawari_file(prediction_time, himawari_dir, config['data'].get('output_nc_dir'))
    
    if himawari_path:
        # 使用配置中的多波段设置
        band_list = config['data'].get('himawari_bands', ['tbb_13'])
        if len(band_list) > 1:
            # 多波段模式
            himawari_data = load_multi_band_himawari_data(himawari_path, band_list)
            #print(f"多波段数据加载: {band_list}, 形状: {himawari_data.shape if himawari_data is not None else 'None'}")
        else:
            # 单波段模式（向后兼容）
            band_name = band_list[0]
            himawari_data = load_himawari_data(himawari_path, band_name)
    else:
        # 如果找不到文件，尝试下载
        himawari_path = download_himawari_data(prediction_time, himawari_dir)
        himawari_data = load_himawari_data(himawari_path) if himawari_path else None
    
    # Prepare input tensors - 只使用3个核心变量（与训练时一致）
    station_features = ['direct_irradiance', 'diffuse_irradiance', 'temperature']
    x_station = hist_station[station_features].values.astype(np.float32)
    
    # Handle Himawari data
    if himawari_data is not None:
        x_himawari = himawari_data.astype(np.float32)
        
        # 多波段数据已经是 [channels, height, width] 格式
        if len(x_himawari.shape) == 2:
            # 单波段数据，添加通道维度
            x_himawari = x_himawari[np.newaxis, :, :]
        
        # Resize to 224x224 for ViT model
        import torch.nn.functional as F
        x_himawari_tensor = torch.from_numpy(x_himawari)
        
        # 多波段数据需要分别对每个通道进行插值
        if x_himawari_tensor.dim() == 3:
            # [channels, height, width] 格式
            x_himawari_tensor = F.interpolate(x_himawari_tensor.unsqueeze(0), 
                                            size=(224, 224), 
                                            mode='bilinear', 
                                            align_corners=False).squeeze(0)
        else:
            # 单通道数据保持原有逻辑
            x_himawari_tensor = F.interpolate(x_himawari_tensor.unsqueeze(0), 
                                            size=(224, 224), 
                                            mode='bilinear', 
                                            align_corners=False).squeeze(0)
        
        x_himawari = x_himawari_tensor.numpy()
    else:
        # Create dummy data if Himawari data is missing
        num_channels = len(config['data'].get('himawari_bands', ['tbb_13']))
        x_himawari = np.zeros((num_channels, 224, 224), dtype=np.float32)
    
    # Convert to tensors and add batch dimension
    x_station_tensor = torch.from_numpy(x_station).unsqueeze(0)  # [1, historical_days, 5]
    x_himawari_tensor = torch.from_numpy(x_himawari).unsqueeze(0)  # [1, channels, height, width]
    
    return {
        'station_data': x_station_tensor,
        'himawari_data': x_himawari_tensor,
        'prediction_time': prediction_time,
        'station_df': station_df
    }

def predict_future(model, device, input_data):
    """Make future predictions"""
    with torch.no_grad():
        # Move data to device
        station_data = input_data['station_data'].to(device)
        himawari_data = input_data['himawari_data'].to(device)
        
        # Make prediction
        predictions = model(station_data, himawari_data)
        
        # Move back to CPU and convert to numpy
        predictions = predictions.cpu().numpy()[0]  # Remove batch dimension
    
    return predictions

def denormalize_predictions(predictions, station_df):
    """Denormalize predictions using original statistics - 只处理3个核心变量"""
    import json
    import os
    
    # 只使用3个核心变量（与训练时一致）
    station_features = ['direct_irradiance', 'diffuse_irradiance', 'temperature']
    denormalized = np.zeros_like(predictions)
    
    # 尝试从文件加载标准化统计量
    stats_file = os.path.join(os.path.dirname(__file__), 'normalization_stats.json')
    if os.path.exists(stats_file):
        try:
            with open(stats_file, 'r') as f:
                stats = json.load(f)
            
            for i, feature in enumerate(station_features):
                if feature in stats:
                    mean = stats[feature]['mean']
                    std = stats[feature]['std']
                    denormalized[:, i] = predictions[:, i] * std + mean
                    print(f"反标准化 {feature}: 使用保存的统计量 mean={mean:.2f}, std={std:.2f}")
                else:
                    # 回退到使用station_df的统计量
                    mean = station_df[feature].mean()
                    std = station_df[feature].std()
                    denormalized[:, i] = predictions[:, i] * std + mean
                    print(f"反标准化 {feature}: 使用station_df统计量 mean={mean:.2f}, std={std:.2f}")
        except Exception as e:
            print(f"加载标准化统计量失败: {e}, 使用station_df统计量")
            for i, feature in enumerate(station_features):
                if feature in station_df.columns:
                    mean = station_df[feature].mean()
                    std = station_df[feature].std()
                    denormalized[:, i] = predictions[:, i] * std + mean
    else:
        # 如果没有统计量文件，使用station_df的统计量
        print("未找到标准化统计量文件，使用station_df统计量进行反标准化")
        for i, feature in enumerate(station_features):
            if feature in station_df.columns:
                mean = station_df[feature].mean()
                std = station_df[feature].std()
                denormalized[:, i] = predictions[:, i] * std + mean
    
    return denormalized

def save_predictions(predictions, prediction_time, output_dir, station_df, config):
    """Save predictions to CSV file with 15-minute intervals"""
    os.makedirs(output_dir, exist_ok=True)
    
    # Denormalize predictions
    denormalized_preds = denormalize_predictions(predictions, station_df)
    
    # Create future timestamps with 15-minute intervals
    future_hours = config['data']['future_hours']
    future_times = []
    for hour in range(future_hours):
        for minute in [0, 15, 30, 45]:
            future_times.append(prediction_time + timedelta(hours=hour, minutes=minute))
    
    # Ensure we have the correct number of time points
    expected_points = future_hours * 4
    if len(denormalized_preds) != expected_points:
        print(f"Warning: Predictions have {len(denormalized_preds)} points, expected {expected_points}")
        # Truncate or pad if necessary
        if len(denormalized_preds) > expected_points:
            denormalized_preds = denormalized_preds[:expected_points]
        else:
            padding = np.zeros((expected_points - len(denormalized_preds), denormalized_preds.shape[1]))
            denormalized_preds = np.concatenate([denormalized_preds, padding], axis=0)
    
    # Create DataFrame - 只输出3个核心变量（与训练时一致）
    result_df = pd.DataFrame({
        'timestamp': future_times[:len(denormalized_preds)],
        'predicted_direct_irradiance': denormalized_preds[:, 0],
        'predicted_diffuse_irradiance': denormalized_preds[:, 1],
        'predicted_temperature': denormalized_preds[:, 2]
    })
    
    # Save to CSV
    timestamp_str = prediction_time.strftime("%Y%m%d_%H%M")
    output_path = os.path.join(output_dir, f'predictions_{timestamp_str}.csv')
    result_df.to_csv(output_path, index=False)
    
    print(f"Predictions saved to: {output_path}")
    return result_df, output_path

def plot_predictions(predictions, prediction_time, station_df, output_dir, config):
    """Create visualization of predictions with 15-minute intervals - 3个核心变量"""
    os.makedirs(output_dir, exist_ok=True)
    
    # Denormalize predictions
    denormalized_preds = denormalize_predictions(predictions, station_df)
    
    # Create time axis in hours
    time_points = np.arange(len(denormalized_preds)) / 4.0  # Convert 15min intervals to hours
    
    # Create subplots - 调整为1行3列布局（3个核心变量）
    fig, axes = plt.subplots(1, 3, figsize=(18, 6))
    fig.suptitle(f'24-Hour Weather Predictions starting from {prediction_time.strftime("%Y-%m-%d %H:%M")}', fontsize=16)
    
    # Plot each variable - 只绘制3个核心变量
    variables = ['Direct Irradiance (W/m²)', 'Diffuse Irradiance (W/m²)', 'Temperature (°C)']
    colors = ['red', 'blue', 'green']
    
    for i, (ax, var_name, color) in enumerate(zip(axes, variables, colors)):
        ax.plot(time_points, denormalized_preds[:, i], color=color, linewidth=2, marker='o', markersize=3)
        ax.set_title(var_name, fontsize=12)
        ax.set_xlabel('Hours into Future', fontsize=10)
        ax.set_ylabel(var_name, fontsize=10)
        ax.grid(True, alpha=0.3)
        ax.set_xlim(0, 24)  # Set x-axis to 24 hours
        
        # Add grid lines for every hour
        ax.set_xticks(np.arange(0, 25, 3))  # Show every 3 hours
    
    plt.tight_layout()
    
    # Save plot
    timestamp_str = prediction_time.strftime("%Y%m%d_%H%M")
    plot_path = os.path.join(output_dir, f'predictions_plot_{timestamp_str}.png')
    plt.savefig(plot_path, dpi=300, bbox_inches='tight')
    plt.close()
    
    print(f"Plot saved to: {plot_path}")
    return plot_path

def main():
    # Load configuration
    config = load_config()
    
    # Load trained model
    model_path = os.path.join(config['training']['save_dir'], 'best_model.pth')
    if not os.path.exists(model_path):
        raise FileNotFoundError(f"Model checkpoint not found at {model_path}. Please train the model first.")
    
    model, device = load_trained_model(config, model_path)
    
    # Prepare input data
    try:
        input_data = prepare_prediction_input(
            config['data']['station_csv'],
            config['data']['himawari_dir'],
            config
        )
    except Exception as e:
        print(f"Error preparing input data: {e}")
        return
    
    # Make predictions
    predictions = predict_future(model, device, input_data)
    
    # Save predictions
    result_df, csv_path = save_predictions(
        predictions,
        input_data['prediction_time'],
        config['inference']['output_dir'],
        input_data['station_df'],
        config
    )
    
    # Create visualization
    plot_path = plot_predictions(
        predictions,
        input_data['prediction_time'],
        input_data['station_df'],
        config['inference']['output_dir'],
        config
    )
    
    print("\nPrediction Summary:")
    print("=" * 50)
    print(f"Prediction time: {input_data['prediction_time']}")
    print(f"Future hours predicted: {config['data']['future_hours']}")
    print(f"Time resolution: {config['data']['time_resolution']}")
    print(f"Variables predicted: Direct Irradiance, Diffuse Irradiance, Temperature")
    print(f"Results saved to: {csv_path}")
    print(f"Visualization saved to: {plot_path}")
    
    # Display first few predictions
    print(f"\nFirst 12 time points (3 hours) of predictions:")
    print(result_df.head(12).to_string(index=False))

if __name__ == "__main__":
    main()
