import os
import numpy as np
import torch
from torch.utils.data import Dataset
import xarray as xr
import cv2


VAR_NAME_MAP = {
    '2m_temperature': 't2m',
    '10m_u_component_of_wind': 'u10',
    '10m_v_component_of_wind': 'v10',
    'mean_sea_level_pressure': 'msl',
}

def resize_sample(sample, target_h, target_w, input_vars):
    for var in input_vars:
        data = sample[var]  # shape: [T, H, W]
        T, H, W = data.shape
        resized = np.zeros((T, target_h, target_w), dtype=np.float32)
        for t in range(T):
            resized[t] = cv2.resize(data[t], (target_w, target_h), interpolation=cv2.INTER_LINEAR)
        sample[var] = resized
    return sample

class WeatherDataset(torch.utils.data.Dataset):
    def __init__(self, nc_paths, config):
        self.input_len = config["data"]["input_length"]
        self.forecast_len = config["data"]["forecast_length"]
        self.input_vars = [VAR_NAME_MAP.get(v, v) for v in config["data"]["input_variables"]]
        self.target_vars = [VAR_NAME_MAP.get(v, v) for v in config["data"]["target_variables"]]
        self.lat_range = slice(*config["data"]["lat_range"])
        self.lon_range = slice(*config["data"]["lon_range"])
        self.normalize = config["data"]["normalize"]
        self.concat_files = config["data"].get("concat_nc_files", False)
        self.do_resize = config["data"].get("resize", False)

        # 读取所有文件的数据
        self.data_list = []
        for path in nc_paths:
            ds = xr.open_dataset(path)
            sample = {}
            for var in set(self.input_vars + self.target_vars):
                arr = ds[var].values[:, self.lat_range, self.lon_range]
                sample[var] = arr.astype(np.float32)
            if self.do_resize: #为了cpu训练做出的妥协
                sample = resize_sample(sample, 60, 120, set(self.input_vars + self.target_vars))
                # sample = resize_sample(sample, 120, 240, set(self.input_vars + self.target_vars))
            self.data_list.append(sample)


        # 标准化：计算 mean/std（仅在 concat 模式下统一）
        if self.normalize:
            self.mean = {}
            self.std = {}
            for var in self.input_vars:
                all_var_data = []
                for sample in self.data_list:
                    all_var_data.append(sample[var])  # shape [T, H, W]
                all_stack = np.concatenate(all_var_data, axis=0)  # shape [T_total, H, W]
                self.mean[var] = all_stack.mean()
                self.std[var] = all_stack.std()
        else:
            self.mean = {var: 0 for var in self.input_vars}
            self.std = {var: 1 for var in self.input_vars}

        # 拼接 or 各自处理
        self.indices = []
        if self.concat_files:
            print('正在拼接')
            self.data = {}
            for var in set(self.input_vars + self.target_vars):
                self.data[var] = np.concatenate([sample[var] for sample in self.data_list], axis=0)
                if self.normalize:
                    self.data[var] = (self.data[var] - self.mean[var]) / self.std[var]
            T = self.data[self.input_vars[0]].shape[0]
            for t in range(T - self.input_len - self.forecast_len + 1):
                self.indices.append(t)
        else:
            self.data = self.data_list
            for file_idx, sample in enumerate(self.data):
                for var in self.input_vars:
                    if self.normalize:
                        sample[var] = (sample[var] - self.mean[var]) / self.std[var]
                T = sample[self.input_vars[0]].shape[0]
                for t in range(T - self.input_len - self.forecast_len + 1):
                    self.indices.append((file_idx, t))

    def __len__(self):
        return len(self.indices)

    def __getitem__(self, idx):
        if self.concat_files:
            t = self.indices[idx]
            x = np.stack([
                self.data[var][t: t + self.input_len] for var in self.input_vars
            ], axis=1)  # [T, C, H, W]
            y = np.stack([
                self.data[var][t + self.input_len: t + self.input_len + self.forecast_len] for var in
                self.target_vars
            ], axis=1)  # 这里多了一个时间维度        # [T, C, H, W]
        else:
            file_idx, t = self.indices[idx]
            sample = self.data[file_idx]
            x = np.stack([
                sample[var][t: t + self.input_len] for var in self.input_vars
            ], axis=1)
            y = np.stack([
                self.data[var][t + self.input_len: t + self.input_len + self.forecast_len] for var in
                self.target_vars
            ], axis=1)  # 这里多了一个时间维度        # [T, C, H, W]
        return torch.from_numpy(x), torch.from_numpy(y)

    def denormalize(self, tensor):
        # tensor 形状假设是 [B, T, C, H, W]
        for i, var in enumerate(self.target_vars): #可能有问题：不一定只有目标调用反归一
            mean = torch.tensor(self.mean[var]).to(tensor.device)
            std = torch.tensor(self.std[var]).to(tensor.device)
            tensor[:, :, i] = tensor[:, :, i] * std + mean
        return tensor

if __name__ == "__main__":

    from datetime import datetime, timedelta


    def get_nc_file_paths(nc_dir, start_date, end_date):
        date_format = "%Y-%m-%d"
        start = datetime.strptime(start_date, date_format)
        end = datetime.strptime(end_date, date_format)

        file_paths = []
        current = start
        while current <= end:
            filename = f"surface_{current.strftime(date_format)}.nc"
            full_path = os.path.join(nc_dir, filename)
            if os.path.exists(full_path):
                file_paths.append(full_path)
            else:
                print(f"警告：未找到文件 {full_path}")
            current += timedelta(days=1)
        return file_paths


    import yaml

    # 假设config.yaml和测试文件路径
    config_path = "../config.yaml"

    with open(config_path, 'r', encoding='utf-8') as f:
        config = yaml.safe_load(f)

    # 这里写你本地的nc文件列表，或用glob获取几个样本测试
    start_date = config["data"]["start_date"]
    end_date = config["data"]["end_date"]
    nc_dir = '../era5_data/'

    test_nc_files = get_nc_file_paths(nc_dir, start_date, end_date)

    dataset = WeatherDataset(test_nc_files, config)

    print(f"数据集总样本数: {len(dataset)}")

    # 取第一个样本测试
    x, y = dataset[0]
    print(f"输入x形状 (time, channel, lat, lon): {x.shape}")
    print(f"预测目标y形状 (channel, lat, lon): {y.shape}")

    # 打印第一个输入时间步某个通道的最大值，查看数据合理性
    print(f"x第一个时间步第一个通道最大值: {x[0,0].max().item()}")
    print(f"y第一个通道最大值: {y[0].max().item()}")

    # 也可以尝试索引最后一个样本，确认索引边界无误
    x_last, y_last = dataset[len(dataset) - 1]
    print(f"最后一个样本x形状: {x_last.shape}")
    print(f"最后一个样本y形状: {y_last.shape}")

