import os
import pandas as pd
import numpy as np
import xarray as xr
from datetime import datetime, timedelta
import torch
from torch.utils.data import Dataset
import yaml
import logging
from typing import Optional
import pytz

def load_config(config_path="config.yaml"):
    """Load configuration from YAML file"""
    with open(config_path, 'r') as f:
        config = yaml.safe_load(f)
    return config

# 襄阳经纬度
XIANGYANG_LAT = 32.137729
XIANGYANG_LON = 112.259116

# 卫星数据裁剪范围优化 - 250km半径（约2.25度）
RADIUS_DEGREES = 2.25

class XiangyangHimawariCollector:
    """襄阳地区Himawari-9数据采集器（已禁用云端功能）"""
    
    def __init__(self):
        pass  # 禁用所有云端功能
        
    def collect_himawari9_data(self, target_time: datetime, band: str = 'B01') -> Optional[xr.Dataset]:
        """收集Himawari-9数据（仅本地模式）"""
        print(f"警告：云端数据收集功能已禁用，时间: {target_time}")
        return None

def download_himawari_data(timestamp, himawari_dir):
    """下载Himawari-9卫星数据（已禁用，仅使用本地数据）"""
    print(f"警告：下载功能已禁用，仅使用本地数据。请求时间: {timestamp}")
    return None

def load_himawari_data(file_path, band_name=None):
    """加载NetCDF文件并提取襄阳地区数据，支持新旧数据格式"""
    try:
        ds = xr.open_dataset(file_path, decode_timedelta=False)
        
        # 优先使用指定的波段
        if band_name and band_name in ds.data_vars:
            data = ds[band_name].values
        else:
            # 自动检测可用波段（支持新旧数据格式）
            # 新数据格式：albedo_*, tbb_*, SAZ, SAA, SOZ, SOA
            # 旧数据格式：radiance_*
            
            # 优先选择红外亮温波段（tbb_*）
            tbb_vars = [var for var in ds.data_vars if var.startswith('tbb_')]
            if tbb_vars:
                data = ds[tbb_vars[0]].values  # 使用第一个亮温波段
            else:
                # 其次选择反射率波段（albedo_*）
                albedo_vars = [var for var in ds.data_vars if var.startswith('albedo_')]
                if albedo_vars:
                    data = ds[albedo_vars[0]].values
                else:
                    # 最后尝试其他二维数据变量
                    for var_name in ds.data_vars:
                        if len(ds[var_name].dims) >= 2 and var_name not in ['latitude', 'longitude']:
                            data = ds[var_name].values
                            break
                    else:
                        raise ValueError("No suitable 2D data variable found in NetCDF file")
        
        # 处理缺失值和异常值
        data = np.nan_to_num(data, nan=0.0)
        
        # 归一化数据
        data_mean = np.mean(data)
        data_std = np.std(data)
        if data_std > 0:
            data = (data - data_mean) / data_std
        else:
            data = data - data_mean  # 避免除零
        
        return data
    except Exception as e:
        print(f"Error loading NetCDF file {file_path}: {e}")
        return None

def load_multi_band_himawari_data(file_path, band_list):
    """加载多波段Himawari数据"""
    try:
        ds = xr.open_dataset(file_path, decode_timedelta=False)
        multi_band_data = []
        
        for band_name in band_list:
            if band_name in ds.data_vars:
                band_data = ds[band_name].values
                # 处理缺失值和异常值
                band_data = np.nan_to_num(band_data, nan=0.0)
                
                # 对每个波段单独归一化
                band_mean = np.mean(band_data)
                band_std = np.std(band_data)
                if band_std > 0:
                    band_data = (band_data - band_mean) / band_std
                else:
                    band_data = band_data - band_mean
                
                multi_band_data.append(band_data)
            else:
                print(f"警告: 波段 {band_name} 在文件 {file_path} 中不存在")
                # 创建零数据作为占位符
                if multi_band_data:
                    placeholder = np.zeros_like(multi_band_data[0])
                else:
                    # 如果还没有任何数据，需要先获取数据维度
                    for var_name in ds.data_vars:
                        if len(ds[var_name].dims) >= 2 and var_name not in ['latitude', 'longitude']:
                            placeholder = np.zeros_like(ds[var_name].values)
                            break
                    else:
                        placeholder = np.zeros((224, 224))  # 默认尺寸
                multi_band_data.append(placeholder)
        
        # 将多波段数据堆叠为 [channels, height, width]
        if multi_band_data:
            multi_band_array = np.stack(multi_band_data, axis=0)
            return multi_band_array
        else:
            return None
            
    except Exception as e:
        print(f"Error loading multi-band NetCDF file {file_path}: {e}")
        return None

def find_himawari_file(timestamp, himawari_dir, output_nc_dir=None, verbose=False):
    """查找匹配的Himawari数据文件，仅使用本地数据，支持时间容差匹配"""
    time_str = timestamp.strftime("%Y%m%d_%H%M")
    target_time = timestamp
    
    # 确保目标时间是UTC时区
    if target_time.tzinfo is None:
        target_time = target_time.replace(tzinfo=pytz.UTC)
    
    # 优先在新数据目录中查找
    if output_nc_dir and os.path.exists(output_nc_dir):
        # 获取所有新数据文件
        nc_files = [f for f in os.listdir(output_nc_dir) if f.startswith('himawari_') and f.endswith('.nc')]
        
        if nc_files:
            # 解析文件名中的时间信息
            file_times = []
            for filename in nc_files:
                try:
                    # 解析文件名格式: himawari_YYYYMMDD_HHMM_*
                    parts = filename.split('_')
                    if len(parts) >= 3:
                        date_str = parts[1]
                        time_str = parts[2]
                        file_time = datetime.strptime(f"{date_str}{time_str}", "%Y%m%d%H%M")
                        # 确保文件时间也是UTC时区
                        if file_time.tzinfo is None:
                            file_time = file_time.replace(tzinfo=pytz.UTC)
                        file_times.append((file_time, filename))
                except (ValueError, IndexError):
                    continue
            
            if file_times:
                # 找到时间最接近的文件（±30分钟容差）
                best_match = None
                min_diff = float('inf')
                
                for file_time, filename in file_times:
                    time_diff = abs((file_time - target_time).total_seconds())
                    if time_diff <= 1800:  # 30分钟容差
                        if time_diff < min_diff:
                            min_diff = time_diff
                            best_match = filename
                
                if best_match:
                    if verbose:
                        print(f"找到时间匹配文件: {best_match} (时间差: {min_diff/60:.1f}分钟)")
                    return os.path.join(output_nc_dir, best_match)
    
    # 如果在旧数据目录中查找（保持向后兼容）
    old_filename = f"himawari_xiangyang_{time_str}.nc"
    old_path = os.path.join(himawari_dir, old_filename)
    
    if os.path.exists(old_path):
        return old_path
    
    if verbose:
        print(f"未找到匹配的Himawari数据文件，时间: {target_time}")
    return None

def prepare_station_data(station_csv):
    """Load and preprocess station data"""
    df = pd.read_csv(station_csv)
    
    # 确保列名正确（根据CSV文件的实际列名）
    expected_columns = ['timestamp', 'direct_irradiance', 'diffuse_irradiance', 'wind_speed', 'wind_direction', 'temperature']
    
    # 检查列名是否匹配，如果不匹配则重新命名
    if list(df.columns) != expected_columns:
        if len(df.columns) == 6:
            df.columns = expected_columns
        else:
            # 如果列数不匹配，使用默认列名
            df.columns = ['timestamp'] + [f'col_{i}' for i in range(1, len(df.columns))]
    
    # Convert timestamp to datetime with explicit format
    df['timestamp'] = pd.to_datetime(df['timestamp'], format='%Y/%m/%d %H:%M', utc=True)
    
    # Handle missing values - 优化为3个核心变量
    numeric_cols = ['direct_irradiance', 'diffuse_irradiance', 'temperature']
    for col in numeric_cols:
        if col in df.columns:
            df[col] = df[col].fillna(df[col].mean())
    
    # Normalize numeric columns
    for col in numeric_cols:
        if col in df.columns:
            mean = df[col].mean()
            std = df[col].std()
            df[col] = (df[col] - mean) / std
    
    return df

class WeatherDataset(Dataset):
    def __init__(self, station_csv, himawari_dir, historical_days=7, future_hours=24):
        self.config = load_config()
        self.station_df = prepare_station_data(station_csv)
        self.himawari_dir = himawari_dir
        self.historical_days = historical_days
        self.future_hours = future_hours
        self.valid_indices = self._get_valid_indices()
    
    def _get_valid_indices(self):
        """Get indices where we have sufficient historical and future data (15-minute intervals)"""
        valid_indices = []
        
        # Calculate required number of data points for 15-minute intervals
        historical_points = self.historical_days * 24 * 4  # 4 points per hour (15-min intervals)
        future_points = self.future_hours * 4  # 4 points per hour (15-min intervals)
        
        for idx in range(len(self.station_df)):
            current_time = self.station_df.iloc[idx]['timestamp']
            
            # Check if we have enough historical data (15-minute intervals)
            hist_start = current_time - timedelta(days=self.historical_days)
            # Ensure both are timezone-aware for comparison
            hist_start = hist_start.tz_localize('UTC') if hist_start.tz is None else hist_start
            
            # Check if we have enough future data (15-minute intervals)
            future_end = current_time + timedelta(hours=self.future_hours)
            future_end = future_end.tz_localize('UTC') if future_end.tz is None else future_end
            
            # Use pandas Series for proper timezone-aware comparison
            hist_mask = (self.station_df['timestamp'] >= hist_start) & (self.station_df['timestamp'] <= current_time)
            future_mask = (self.station_df['timestamp'] > current_time) & (self.station_df['timestamp'] <= future_end)
            
            # Count actual data points (not just days)
            hist_count = hist_mask.sum()
            future_count = future_mask.sum()
            
            if hist_count >= historical_points and future_count >= future_points:
                valid_indices.append(idx)
        
        return valid_indices
    
    def __len__(self):
        return len(self.valid_indices)
    
    def __getitem__(self, idx):
        actual_idx = self.valid_indices[idx]
        current_time = self.station_df.iloc[actual_idx]['timestamp']
        
        # Get historical station data (exclude current time point)
        hist_start = current_time - timedelta(days=self.historical_days)
        hist_mask = (self.station_df['timestamp'] >= hist_start) & (self.station_df['timestamp'] < current_time)
        hist_station = self.station_df[hist_mask].copy()
        
        # Get future station data (labels)
        future_end = current_time + timedelta(hours=self.future_hours)
        future_mask = (self.station_df['timestamp'] > current_time) & (self.station_df['timestamp'] <= future_end)
        future_station = self.station_df[future_mask].copy()
        
        # Get Himawari data for current time - 仅使用本地数据
        config = load_config()
        himawari_path = find_himawari_file(current_time, self.himawari_dir, config['data'].get('output_nc_dir'), verbose=False)
        
        if himawari_path:
            # 使用配置中的多波段设置
            band_list = config['data'].get('himawari_bands', ['tbb_13'])
            if len(band_list) > 1:
                # 多波段模式
                himawari_data = load_multi_band_himawari_data(himawari_path, band_list)
                #print(f"多波段数据加载: {band_list}, 形状: {himawari_data.shape if himawari_data is not None else 'None'}")
            else:
                # 单波段模式（向后兼容）
                band_name = band_list[0]
                himawari_data = load_himawari_data(himawari_path, band_name)
        else:
            # 如果找不到文件，创建空数据
            himawari_data = None
            print(f"警告: 未找到Himawari数据文件，时间: {current_time}")
        
        # Prepare input tensors - 优化为3个核心变量，去除风场干扰
        station_features = ['direct_irradiance', 'diffuse_irradiance', 'temperature']
        x_station = hist_station[station_features].values.astype(np.float32)
        
        # Handle Himawari data
        if himawari_data is not None:
            x_himawari = himawari_data.astype(np.float32)
            
            # 多波段数据已经是 [channels, height, width] 格式
            if len(x_himawari.shape) == 2:
                # 单波段数据，添加通道维度
                x_himawari = x_himawari[np.newaxis, :, :]
            
            # Resize to 224x224 for ViT model
            import torch.nn.functional as F
            x_himawari_tensor = torch.from_numpy(x_himawari)
            
            # 多波段数据需要分别对每个通道进行插值
            if x_himawari_tensor.dim() == 3:
                # [channels, height, width] 格式
                x_himawari_tensor = F.interpolate(x_himawari_tensor.unsqueeze(0), 
                                                size=(224, 224), 
                                                mode='bilinear', 
                                                align_corners=False).squeeze(0)
            else:
                # 单通道数据保持原有逻辑
                x_himawari_tensor = F.interpolate(x_himawari_tensor.unsqueeze(0), 
                                                size=(224, 224), 
                                                mode='bilinear', 
                                                align_corners=False).squeeze(0)
            
            x_himawari = x_himawari_tensor.numpy()
        else:
            # Create dummy data if Himawari data is missing
            num_channels = len(config['data'].get('himawari_bands', ['tbb_13']))
            x_himawari = np.zeros((num_channels, 224, 224), dtype=np.float32)
        
        # Prepare labels
        y_future = future_station[station_features].values.astype(np.float32)
        
        # Convert to tensors
        x_station_tensor = torch.from_numpy(x_station)
        x_himawari_tensor = torch.from_numpy(x_himawari)
        y_future_tensor = torch.from_numpy(y_future)
        
        return {
            'station_data': x_station_tensor,
            'himawari_data': x_himawari_tensor,
            'future_data': y_future_tensor
        }
