import os
from datetime import datetime, timedelta
from typing import Union, List, Dict, Tuple, Optional
from collections import OrderedDict

import numpy as np
import pandas as pd
import xarray as xr
import torch
from torch.utils.data import Dataset

from src.utils.day_utils import get_day_progress, get_year_progress, get_feature_progress


class MetPolDataset(Dataset):

    ERA5_VARS = ["u10", "v10", "t2m"]
    
    LIMIT_REGION = [27, 32, 118, 123]

    def __init__(
        self,
        met_data_path: str = "data/era5",
        pol_data_path: str = "data/pol",
        flag: str = 'train',
        year_st_end: Dict[str, List[str]] = {
            # YYYYMMDDHH
            'train': ['...', '...'],
            'val': ['...', '...'],
            'test': ['...', '...'],
        },
        limit_region = LIMIT_REGION,
        era5_vars = ERA5_VARS,
        site_info = "data/kmeans.csv",
        cv_mean = torch.tensor([0.485, 0.456, 0.406]),
        cv_std = torch.tensor([0.229, 0.224, 0.225]),
        met_norm_file: str = "data/norm_files/met_min_max.npz",
        pol_norm_file: str = "data/norm_files/pm25_stats.npz",
        lead_time = 4,
        block_time = 6,
        target_pol = "PM2.5"
    ):
        super().__init__()
        
        self.met_data_path = met_data_path
        self.pol_data_path = pol_data_path
        
        self.flag = flag
        self.year_st_end = year_st_end
        
        self.limit_region = limit_region
        self.era5_vars = era5_vars
        self.site_info = site_info
        self.cv_mean = cv_mean
        self.cv_std = cv_std
        
        self.met_norm_file = met_norm_file
        self.pol_norm_file = pol_norm_file
        
        self.lead_time = lead_time
        self.block_time = block_time
        self.target_pol = target_pol

        self.target_mean = torch.from_numpy(np.load(self.pol_norm_file)['mean']).float()
        self.target_std = torch.from_numpy(np.load(self.pol_norm_file)['std']).float()
        
        self.data_cache = {}
        
        st_str, end_str = year_st_end[flag]
        self.timestamps = self._generate_timestamps(st_str, end_str)
        
        df_kmeans = pd.read_csv(self.site_info)
        self.cluster_order = []
        self.clusters = OrderedDict()
        self.site_coords = OrderedDict()
        self.site_to_cluster = {}
        self.map_order = []
        
        site_coords_temp = {}

        for _, row in df_kmeans.iterrows():
            site_code = row['siteCode']
            lat = float(row['latitude'])
            lon = float(row['longitude'])
            cluster = int(row['cluster'])

            self.map_order.append(cluster)
            site_coords_temp[site_code] = (lat, lon)
            self.site_to_cluster[site_code] = cluster

            if cluster not in self.clusters:
                self.cluster_order.append(cluster)
                self.clusters[cluster] = []

            self.clusters[cluster].append(site_code)

        for cluster_id in self.cluster_order:
            site_codes_in_cluster = self.clusters[cluster_id]
            self.site_coords[cluster_id] = [
                site_coords_temp[site_code] for site_code in site_codes_in_cluster
            ]
        
    def _generate_timestamps(self, start: str, end: str) -> List[datetime]:
        fmt = "%Y%m%d%H"
        start_dt = datetime.strptime(start, fmt)
        end_dt = datetime.strptime(end, fmt)
        
        current = start_dt
        timestamps = []
        while current <= end_dt:
            timestamps.append(current)
            current += timedelta(hours=1)
        return timestamps

    def _read_xr_(self, year: str):
        file_path = os.path.join(self.met_data_path, f"{year}.nc")
        if year in self.data_cache:
            return self.data_cache[year]
        
        with xr.open_dataset(file_path, engine='netcdf4') as ds:
            ds = ds.sortby('latitude', ascending=True)
            dss = ds.sel(
                latitude=slice(self.limit_region[0], self.limit_region[1]),
                longitude=slice(self.limit_region[2], self.limit_region[3])
            )
            data_arr = dss[self.era5_vars].to_array().values
            data_arr = data_arr.transpose(1,0,2,3)
        self.data_cache[year] = data_arr
        return data_arr

    def _read_pol_sequence(self, site_code: str, start_time: datetime, total_steps: int) -> List[Optional[float]]:

        file_path = os.path.join(self.pol_data_path, f"{site_code}_2015_2023.csv")

        cache_key = f"pol_{site_code}"
        if cache_key not in self.data_cache:
            if not os.path.exists(file_path):
                raise FileNotFoundError(f"Pollution data file not found: {file_path}")

            df = pd.read_csv(file_path)
            df['datetime'] = pd.to_datetime(df[['year', 'month', 'day', 'hour']])
            df = df.set_index('datetime')
            df.index.name = 'datetime'

            df = df[~df.index.duplicated(keep='first')]

            self.data_cache[cache_key] = df

        df = self.data_cache[cache_key]

        sequence_times = [
            pd.Timestamp(start_time + timedelta(hours=8) + timedelta(hours=i)).floor('h')
            for i in range(total_steps)
        ]

        result = []
        for ts in sequence_times:
            try:
                value = df.loc[ts, self.target_pol]
                if pd.isna(value):
                    result.append(None)
                else:
                    result.append(float(value))
            except KeyError:
                result.append(None)
            except Exception:
                result.append(None)

        return result

    def _process_datetime_list(self, datetime_list):
        timestamps = np.array([dt.timestamp() for dt in datetime_list], dtype=np.float64)

        day_progress = get_day_progress(timestamps) 
        day_progress = day_progress[:, 0] 

        year_progress = get_year_progress(timestamps)

        sin_day, cos_day = get_feature_progress(day_progress)
        sin_year, cos_year = get_feature_progress(year_progress)
        features = np.stack([sin_day, cos_day, sin_year, cos_year], axis=1) 

        return torch.tensor(features, dtype=torch.float32)
    
    def _get_time_features_by_index(self, current_time: int, total_pol_hours: int):

        end_idx = current_time + total_pol_hours
        selected_timestamps = self.timestamps[current_time:end_idx]
        
        if len(selected_timestamps) != total_pol_hours:
            raise ValueError(
                f"Requested {total_pol_hours} hours, but only {len(selected_timestamps)} "
                f"available from index {current_time}."
            )

        months = torch.tensor([dt.month for dt in selected_timestamps], dtype=torch.int32)
        days   = torch.tensor([dt.day  for dt in selected_timestamps], dtype=torch.int32)
        hours  = torch.tensor([dt.hour for dt in selected_timestamps], dtype=torch.int32)
        
        return months, days, hours


    def __getitem__(self, index):
        current_time = self.timestamps[index]
        time_shift = timedelta(hours=(self.block_time - current_time.hour % self.block_time))
        base_time = current_time + time_shift

        num_history_steps = 2
        total_steps = num_history_steps + self.lead_time
        total_pol_hours = (total_steps - 1) * 6
        
        required_times = [
            base_time + timedelta(hours=i * self.block_time)
            for i in range(total_steps)
        ]
        
        met_data_list = []
        for req_time in required_times:
            year_str = str(req_time.year)
            year_start = datetime(req_time.year, 1, 1, 0)
            delta_hours = (req_time - year_start).total_seconds() / 3600
            step_idx = int(delta_hours // self.block_time)
            data = self._read_xr_(year_str)
            met_data_list.append(data[step_idx]) 
        
        x_met = np.stack(met_data_list, axis=0)
        x_met = torch.from_numpy(x_met).float() 
        met_norm_data = np.load(self.met_norm_file)
        met_min_vals = torch.from_numpy(met_norm_data['min_vals']).float()
        met_max_vals = torch.from_numpy(met_norm_data['max_vals']).float()
        
        x_met = x_met.permute(0, 2, 3, 1)
        x_met = (x_met - met_min_vals) / (met_max_vals - met_min_vals)
        # x_met = (x_met - self.cv_mean) / self.cv_std
        x_met = x_met.permute(0, 3, 1, 2)
        
        pol_seq_dict = {}
        pol_mask_dict = {}
        
        
        for cluster_id in self.cluster_order:
            site_codes = self.clusters[cluster_id]
            num_sites = len(site_codes)
            
            pol_matrix = torch.full((num_sites, total_pol_hours), torch.nan, dtype=torch.float32)
            
            for i, site_code in enumerate(site_codes):
                pol_list = self._read_pol_sequence(site_code=site_code, start_time=current_time, total_steps=total_pol_hours)
                pol_tensor = torch.tensor([v if v is not None else torch.nan for v in pol_list], dtype=torch.float32)
                pol_matrix[i] = pol_tensor

            mask_matrix = torch.isnan(pol_matrix)
            mask_matrix = mask_matrix.transpose(0, 1).unsqueeze(-1)
            pol_mask_dict[cluster_id] = mask_matrix
            
            df = pd.DataFrame(pol_matrix.numpy())
            df = df.bfill(axis=1).ffill(axis=1)
            pol_filled = torch.tensor(df.values, dtype=torch.float32)
            pol_filled = torch.nan_to_num(pol_filled, nan=self.target_mean)
            pol_filled = (pol_filled - self.target_mean) / self.target_std
            pol_filled = pol_filled.transpose(0, 1).unsqueeze(-1)
            pol_seq_dict[cluster_id] = pol_filled
        
        time_shift = torch.tensor(int(time_shift.total_seconds() // 3600), dtype=torch.long)
        
        met_time = self._process_datetime_list(required_times)
        month, day, hour = self._get_time_features_by_index(index, total_pol_hours)
        
        return x_met, pol_seq_dict, pol_mask_dict, time_shift, met_time, month, day, hour
    
    def __len__(self) -> int:
        return len(self.timestamps) - (self.lead_time + 1) * self.block_time + 1

