# data_loader.py

import logging
from typing import Dict, List, Tuple
import pandas as pd
from datasets import load_dataset
from llama_index.finetuning import EmbeddingQAFinetuneDataset

logger = logging.getLogger(__name__)


def _format_hf_dataset(
        corpus_raw, queries_raw, qrels_raw
) -> Tuple[Dict[str, str], Dict[str, str], Dict[str, List[str]]]:
    """Helper function to format raw Hugging Face datasets."""
    corpus = {str(row["record_id"]): row["text"] for row in corpus_raw}
    queries = {str(row["q_id"]): row["input"] for row in queries_raw}

    qrels = (
        qrels_raw.to_pandas()
        .groupby("q_id")["record_id"]
        .apply(list)
        .to_dict()
    )
    # Convert all keys and values to strings
    qrels = {str(k): [str(i) for i in v] for k, v in qrels.items()}
    return corpus, queries, qrels


def load_finetuning_datasets(
        dataset_name: str,
) -> Tuple[EmbeddingQAFinetuneDataset, EmbeddingQAFinetuneDataset, EmbeddingQAFinetuneDataset]:
    """
    Loads a specified dataset from Hugging Face and formats it for finetuning.

    Args:
        dataset_name (str): The name of the dataset (e.g., "scifact").

    Returns:
        A tuple containing the train, validation, and test datasets.
    """
    hf_dataset_name = f"sepz/{dataset_name}_ft"
    logger.info(f"Loading dataset '{hf_dataset_name}' from Hugging Face...")

    try:
        # Load all components
        corpus_raw = load_dataset(hf_dataset_name, "data_records", split="train")
        queries_train_raw = load_dataset(hf_dataset_name, "qs", split="train")
        queries_val_raw = load_dataset(hf_dataset_name, "qs", split="dev")
        queries_test_raw = load_dataset(hf_dataset_name, "qs", split="test")
        qrels_train_raw = load_dataset(hf_dataset_name, "qs_rel", split="train")
        qrels_val_raw = load_dataset(hf_dataset_name, "qs_rel", split="dev")
        qrels_test_raw = load_dataset(hf_dataset_name, "qs_rel", split="test")

        # Format each split
        corpus, queries_train, qrels_train = _format_hf_dataset(corpus_raw, queries_train_raw, qrels_train_raw)
        _, queries_val, qrels_val = _format_hf_dataset(corpus_raw, queries_val_raw, qrels_val_raw)
        _, queries_test, qrels_test = _format_hf_dataset(corpus_raw, queries_test_raw, qrels_test_raw)

        # Create LlamaIndex dataset objects
        train_dataset = EmbeddingQAFinetuneDataset(
            corpus=corpus, queries=queries_train, relevant_docs=qrels_train
        )
        val_dataset = EmbeddingQAFinetuneDataset(
            corpus=corpus, queries=queries_val, relevant_docs=qrels_val
        )
        test_dataset = EmbeddingQAFinetuneDataset(
            corpus=corpus, queries=queries_test, relevant_docs=qrels_test
        )

        logger.info("Successfully loaded and formatted datasets.")
        return train_dataset, val_dataset, test_dataset

    except Exception as e:
        logger.error(f"Failed to load dataset '{dataset_name}': {e}")
        raise


def prefix_dataset_ids(dataset: EmbeddingQAFinetuneDataset, prefix: str) -> EmbeddingQAFinetuneDataset:
    """Adds a prefix to all IDs in a dataset to avoid conflicts."""
    dataset.queries = {f"{prefix}-{k}": v for k, v in dataset.queries.items()}
    dataset.corpus = {f"{prefix}-{k}": v for k, v in dataset.corpus.items()}
    dataset.relevant_docs = {
        f"{prefix}-{k}": [f"{prefix}-{doc_id}" for doc_id in v]
        for k, v in dataset.relevant_docs.items()
    }
    return dataset