import csv
import os
import pandas as pd
from datasets import load_dataset, Dataset

def save_metrics_to_csv(filepath, metrics_dict, header=None):
    """
    将验证指标保存为 CSV，每行对应一个验证集。
    
    参数:
        filepath: 保存路径
        metrics_dict: {dataset_name: {"val_el10": x, "val_ma": y}, ...}
        header: list[str]，要保存的指标字段，默认为所有字段
    """
    os.makedirs(os.path.dirname(filepath), exist_ok=True)

    rows = []
    for dataset_name, metrics in metrics_dict.items():
        row = {"dataset": dataset_name}
        row.update(metrics)
        rows.append(row)

    df = pd.DataFrame(rows)

    if header is None:
        header = [col for col in df.columns if col != "dataset"]

    df.to_csv(filepath, index=False, columns=["dataset"] + header)

def build_text(row, dataset_name):
    dataset_name = dataset_name.lower()

    if dataset_name.endswith(".csv") or "lambada" in dataset_name or "pubmed_qa" in dataset_name:
        if "pubmed_qa" in dataset_name:
            context = row.get("context", "")
            abstract = row.get("abstract", "")
            question = row.get("question", "")
            # 如果 context 非空则优先使用，否则使用 abstract
            ctx = context if isinstance(context, str) and context.strip() else abstract
            return f"Context: {ctx}\nQuestion: {question}"
        return row.get("text", "")

    elif "piqa" in dataset_name:
        return f"{row['goal']}\nOption 1: {row['sol1']}\nOption 2: {row['sol2']}"

    elif "hellaswag" in dataset_name:
        try:
            endings = row["endings"]
            if isinstance(endings, str):
                import json
                endings = json.loads(endings)
            label = int(row["label"])
            ending = endings[label] if isinstance(endings, list) and label < len(endings) else ""
            return f"{row['ctx_a']} {row['ctx_b']} {ending}"
        except Exception as e:
            print(f"[WARN] Error parsing hellaswag row: {e}")
            return f"{row.get('ctx_a', '')} {row.get('ctx_b', '')}"

    elif "arc" in dataset_name:
        return f"Question: {row['question']}\nChoices: {', '.join(row['choices']['text'])}"

    elif "super_glue" in dataset_name and "copa" in dataset_name:
        return f"Premise: {row['premise']}\nQuestion: {row['question']}\nChoice1: {row['choice1']}\nChoice2: {row['choice2']}"

    elif "winogrande" in dataset_name:
        return f"{row['sentence'].replace('_', row['option1'])}\n{row['sentence'].replace('_', row['option2'])}"

    elif "math_qa" in dataset_name:
        return f"Problem: {row['Problem']}"

    else:
        raise ValueError(f"Unknown dataset structure for {dataset_name}. Please add support in utils.build_text.")

def try_load_dataset_locally(path_or_name, subset=None, split="validation"):
    file_name = f"{path_or_name}-{subset}-{split}" if subset else f"{path_or_name}-{split}"
    local_path = f"./datasets/{file_name}"
    try:
        if os.path.exists(local_path):
            print(f"[INFO] 从本地加载数据集: {local_path}")
            return Dataset.load_from_disk(local_path)
        else:
            print(f"[WARN] 本地数据集未找到，尝试联网加载: {file_name}")
            ds = load_dataset(path_or_name, subset, split=split, trust_remote_code=True)
            # 创建保存路径
            os.makedirs(local_path, exist_ok=True)
            print(f"[INFO] 数据集已从网络加载，正在保存到本地: {local_path}")
            ds.save_to_disk(local_path)

            return ds
    except Exception as e:
        print(f"[ERROR] 加载数据集{file_name}失败: {e}")
        raise

