import pandas as pd
from torch.utils.data import Dataset, DataLoader
from datasets import load_dataset
from utils import build_text
from utils import try_load_dataset_locally
import torch

class KnowledgeUnlearnDataset(Dataset):
    def __init__(self, path_or_name, subset, split, tokenizer, input_length, output_length, dataset_name):
        self.tokenizer = tokenizer
        self.in_len, self.out_len = input_length, output_length
        self.dataset_name = dataset_name
        if path_or_name.endswith(".csv"):
            self.df = pd.read_csv(path_or_name)
        else:
            ds = try_load_dataset_locally(path_or_name, subset, split=split)
            print(f"[DEBUG] Columns in dataset {path_or_name}/{subset}: {ds.column_names}")
            self.df = ds.to_pandas()
        if "text" not in self.df.columns:
            print(f"[INFO] Constructing 'text' field for dataset {dataset_name}")
            self.df["text"] = self.df.apply(lambda row: build_text(row, dataset_name), axis=1)
        # 简单清洗：非空文本
        self.df = self.df.dropna(subset=["text"]).reset_index(drop=True)

    def __len__(self): return len(self.df)

    def __getitem__(self, idx):
        text = self.df.loc[idx, "text"]
        enc = self.tokenizer(text, max_length=self.in_len, truncation=True,
                             padding="max_length", return_tensors="pt")
        return {
            "input_ids": enc.input_ids.squeeze(),
            "attention_mask": enc.attention_mask.squeeze(),
            "labels": enc.input_ids.squeeze(),
            "dataset_name": self.dataset_name
        }

def build_dataloaders(cfg, train_set, tokenizer):
    # Train loader
    train_ds = KnowledgeUnlearnDataset(train_set, "", "train", tokenizer,
                                        cfg["input_length"], cfg["output_length"], dataset_name="train")
    train_loader = DataLoader(train_ds, batch_size=cfg["train_batch_size"], shuffle=True,
                              num_workers=cfg["num_workers"])
    path = cfg["valid_sets"][0]
    subset = cfg["valid_subset_path"][0]
    val_ds = KnowledgeUnlearnDataset(
        path, subset, cfg["valid_type_path"][0], tokenizer,
        cfg["input_length"], cfg["output_length"], dataset_name = f"{path}/{subset}" if subset else path
    )
    val_loader = DataLoader(val_ds, batch_size=cfg["train_batch_size"], shuffle=False,
                            num_workers=cfg["num_workers"])

    return train_loader, val_loader

def build_val_loaders(cfg,tokenizer):
    val_loaders = []
    for path, subset, split in zip(cfg["valid_sets"], cfg["valid_subset_path"], cfg["valid_type_path"]):
        val_ds = KnowledgeUnlearnDataset(
            path, subset, split, tokenizer,
            cfg["input_length"], cfg["output_length"],
            dataset_name=f"{path}/{subset}" if subset else path
        )
        val_loader = DataLoader(val_ds, batch_size=cfg["train_batch_size"], shuffle=False,
                                num_workers=cfg["num_workers"])
        val_loaders.append(val_loader)
    return val_loaders
