"""
数据读取 / 索引划分 / DataLoader 统一入口
服务器 : partition_indices  -> 得到每个客户端的样本索引
客户端 : get_dataloader_by_idx -> 根据索引拿到 DataLoader
"""

import os, pickle, numpy as np, torch
from pathlib import Path
from torch.utils.data import Dataset, DataLoader, Subset
from dataloaders.data_irg import TSNote_Irg, TextTSIrgcollate_fn
from transformers import AutoTokenizer
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler


# -------------------- 路径 & 缓存 --------------------
_DATA_ROOT = os.getenv("FEDCP_DATA_ROOT",
                       "/home/liuning/hzx/MIMICdataprocess/MultimodalMIMIC/Data/ihm")
_CACHE = {"train": None, "test": None}           # 全量数据缓存
# ---------- 新版 P2XDataset ----------
from torch.nn.utils.rnn import pad_sequence

# 新增：懒加载函数
def get_tokenizer(path="/home/liuning/miaohr/bioformer-8L"):
    # 如果已有缓存文件，也可以写成 "bert-base-uncased"
    from transformers import AutoTokenizer
    return AutoTokenizer.from_pretrained(path, use_fast=True)
def build_irg_dataloader(args, mode, indices):
    """根据客户端索引 → DataLoader（train / val / test 通用）"""
    tokenizer = get_tokenizer(args.bert_name)
    full_dataset = TSNote_Irg(args, mode, tokenizer)
    subset = torch.utils.data.Subset(full_dataset, indices)
    sampler = RandomSampler(subset) if mode == "train" else SequentialSampler(subset)
    return DataLoader(subset,
                      sampler=sampler,
                      batch_size=args.batch_size,
                      collate_fn=TextTSIrgcollate_fn,
                      drop_last=(mode == "train"))

def _pkl_path(split: str) -> Path:
    """train -> ts_train.pkl,  test -> ts_test.pkl"""
    fname = "ts_train.pkl" if split == "train" else "ts_test.pkl"
    return Path(_DATA_ROOT) / fname

# 2) 修改 _load_full，让它找三份 p2x 文件
def _load_full(split: str):
    """
    split in {"train", "val", "test"}
    寻址:  <DATA_ROOT>/<split>p2x_data.pkl
    """
    if _CACHE[split] is None:
        fname = os.path.join(_DATA_ROOT, f"{split}p2x_data.pkl")
        with open(fname, "rb") as f:
            _CACHE[split] = pickle.load(f)
    return _CACHE[split]

class _TextDataset(Dataset):
    """
    支持三种结构：
      A. list[dict] : [{text_data, label/y, ...}, ...]
      B. dict[str]  : {"text_data": ndarray, "label": ndarray, ...}
      C. list/tuple : [(x, y), ...]
    对于 A 类，如果个别样本缺 'text_data'，将自动过滤。
    """
    def __init__(self, raw):
        # ---------- list[dict] ----------
        if isinstance(raw, list) and isinstance(raw[0], dict):
            txt_key  = "text_data"
            lbl_key  = "label" if "label" in raw[0] else "y"

            xs, ys, drop = [], [], 0
            for r in raw:
                if txt_key in r and lbl_key in r:
                    xs.append(r[txt_key])
                    ys.append(r[lbl_key])
                else:
                    drop += 1
            if not xs:
                raise RuntimeError(
                    f"所有 {len(raw)} 条样本都缺 '{txt_key}'，请检查数据划分或键名。")
            if drop:
                print(f"⚠️  _TextDataset: 跳过 {drop} 条缺少 '{txt_key}' 的样本")

        # ---------- dict[str] ----------
        elif isinstance(raw, dict):
            txt_key  = "text_data"
            lbl_key  = "label" if "label" in raw else "y"
            xs = raw[txt_key]
            ys = raw[lbl_key]

        # ---------- (x, y) tuple ----------
        else:
            xs, ys = zip(*raw)

        # ---------- 转 tensor ----------
        self.x = torch.as_tensor(xs, dtype=torch.int64)
        self.y = torch.as_tensor(ys, dtype=torch.int64)

    def __len__(self):  return len(self.y)
    def __getitem__(self, i):  return self.x[i], self.y[i]

# -------------------- 服务器用：划分索引 ----------------
def partition_indices(
        split: str,
        num_clients: int,
        strategy: str = "iid",
        seed: int = 42,
        alpha: float = 0.5):
    """
    返回 shape=[num_clients] 的 list，每一项是该客户端的样本索引 np.ndarray
    * 这里只实现 IID 均分；需要 Dirichlet / 其他策略可自行扩展
    """
    rng  = np.random.default_rng(seed)
    full = _load_full(split)
    all_idx = rng.permutation(len(full))
    shards  = np.array_split(all_idx, num_clients)
    return [np.asarray(s, dtype=np.int64) for s in shards]

# -------------------- 通用：DataLoader ----------------
# ★ 传入自定义 collate
def get_subset_size(split: str, indices) -> int:
    """统计样本数（服务器聚合时用）"""
    return len(indices)