# dataloader.py
import os
import glob
from torch.utils.data import DataLoader, Subset
from dataset.dataset import WeatherDataset
import numpy as np
from datetime import datetime, timedelta
import torch

def get_nc_file_paths(nc_dir, start_date, end_date):
    date_format = "%Y-%m-%d"
    start = datetime.strptime(start_date, date_format)
    end = datetime.strptime(end_date, date_format)

    file_paths = []
    current = start
    while current <= end:
        filename = f"surface_{current.strftime(date_format)}.nc"
        full_path = os.path.join(nc_dir, filename)
        if os.path.exists(full_path):
            file_paths.append(full_path)
        else:
            print(f"警告：未找到文件 {full_path}")
        current += timedelta(days=1)
    return file_paths


def get_dataloader(config, mode="train"):
    assert mode in ["train", "val", "test"], f"Unknown mode: {mode}"

    nc_dir = config["data"]["nc_dir"]
    nc_files = get_nc_file_paths(nc_dir, config["data"]["start_date"], config["data"]["end_date"])

    # 这里强制使用 concat_files=True，拼接所有文件，构造连续时间序列
    config["data"]["concat_nc_files"] = True

    dataset = WeatherDataset(nc_files, config)

    total_samples = len(dataset)  # 拼接后总样本数
    train_end = int(total_samples * 0.6)
    val_end = int(total_samples * 0.8)

    if mode == "train":
        indices = list(range(0, train_end))
    elif mode == "val":
        indices = list(range(train_end, val_end))
    else:
        indices = list(range(val_end, total_samples))

    subset = Subset(dataset, indices)

    dataloader = DataLoader(
        subset,
        batch_size=config["train"]["batch_size"],
        shuffle=(mode == "train"),
        num_workers=config["train"].get("num_workers", 2),
        pin_memory=torch.cuda.is_available(),
        drop_last=(mode == "train"),
    )

    print(f"Mode: {mode}, total samples: {total_samples}, using {len(indices)} samples")

    return dataloader , dataset

def get_dataloader_only_for_test(config):

    nc_dir = config["data"]["nc_dir"]
    nc_files = get_nc_file_paths(nc_dir, config["data"]["test_start_date"], config["data"]["test_end_date"])

    # 这里强制使用 concat_files=True，拼接所有文件，构造连续时间序列
    config["data"]["concat_nc_files"] = True

    dataset = WeatherDataset(nc_files, config)

    total_samples = len(dataset)  # 拼接后总样本数
    train_end = int(total_samples * 0.6)
    val_end = int(total_samples * 0.8)

    dataloader = DataLoader(
        dataset,
        batch_size=config["train"]["batch_size"],
        num_workers=config["train"].get("num_workers", 2),
        pin_memory=torch.cuda.is_available(),
    )

    print(f"Mode: 'only test', total samples: {total_samples}, using {len(dataset)} samples")

    return dataloader , dataset

if __name__ == "__main__":
    import yaml
    from dataset.dataset import WeatherDataset

    with open("../config.yaml", "r", encoding="utf-8") as f:
        config = yaml.safe_load(f)

    train_loader ,train_dataset= get_dataloader(config, mode="train")
    val_loader , val_dataset= get_dataloader(config, mode="val")
    test_loader ,  test_dataset= get_dataloader(config, mode="test")

    print("========== 数据加载器测试 ==========")
    print(f"训练集 batch 数: {len(train_loader)}")
    print(f"验证集 batch 数: {len(val_loader)}")
    print(f"测试集 batch 数: {len(test_loader)}")

    for name, loader in zip(["Train", "Val", "Test"], [train_loader, val_loader, test_loader]):
        x, y = next(iter(loader))
        print(f"\n[{name}]")
        print(f"x 形状: {x.shape}")  # [B, T, C, H, W]
        print(f"y 形状: {y.shape}")  # [B, C, H, W]
        print(f"x[0][0][0] 最大值: {x[0,0,0].max().item():.3f}")
        print(f"y[0][0] 最大值: {y[0,0].max().item():.3f}")

