# finetuner.py

import logging
from typing import Dict
import torch
from llama_index.core.embeddings import resolve_embed_model
from llama_index.finetuning import EmbeddingQAFinetuneDataset, EmbeddingAdapterFinetuneEngine
from llama_index.experimental.finetuning import Nudge
from utils import build_retriever
from evaluation import evaluate_ndcg_at_k

logger = logging.getLogger(__name__)


class FinetuningExperiment:
    def __init__(self, config):
        self.config = config
        logger.info(f"Loading base embedding model: {config.BASE_EMBED_MODEL}")
        self.base_embed_model = resolve_embed_model(config.BASE_EMBED_MODEL)
        self.nudge_instance = None
        self.nudge_corpus_embeddings = None

    def run_baseline(self, train_dataset, test_dataset):
        logger.info("----- Running Baseline Experiment -----")
        retriever = build_retriever(
            corpus=train_dataset.corpus,
            embed_model=self.base_embed_model,
            k=self.config.RETRIEVER_TOP_K,
        )
        score = evaluate_ndcg_at_k(test_dataset, retriever, k=self.config.RETRIEVER_TOP_K)
        return score

    def run_adapter_finetuning(self, train_dataset, test_dataset):
        logger.info("----- Running Adapter Finetuning Experiment -----")
        engine = EmbeddingAdapterFinetuneEngine(
            train_dataset,
            self.base_embed_model,
            epochs=self.config.ADAPTER_FINETUNE_EPOCHS,
            batch_size=self.config.ADAPTER_FINETUNE_BATCH_SIZE,
            device=self.config.DEVICE
        )
        engine.finetune()
        finetuned_model = engine.get_finetuned_model()

        retriever = build_retriever(
            corpus=train_dataset.corpus,
            embed_model=finetuned_model,
            k=self.config.RETRIEVER_TOP_K,
        )
        score = evaluate_ndcg_at_k(test_dataset, retriever, k=self.config.RETRIEVER_TOP_K)
        return score

    def run_nudge_finetuning(self, train_dataset, val_dataset, test_dataset):
        logger.info("----- Running NUDGE Finetuning Experiment -----")
        self.nudge_instance = Nudge(
            train_dataset=train_dataset,
            val_dataset=val_dataset,
            embed_model=self.base_embed_model,
            use_nudge_n=self.config.USE_NUDGE_N,
            device=self.config.DEVICE,
        )
        self.nudge_instance.finetune()
        self.nudge_corpus_embeddings = self.nudge_instance.get_finetuned_corpus_embeddings()

        retriever = build_retriever(
            corpus=train_dataset.corpus,
            embed_model=self.base_embed_model,
            corpus_embeddings=self.nudge_corpus_embeddings,
            k=self.config.RETRIEVER_TOP_K,
        )
        score = evaluate_ndcg_at_k(test_dataset, retriever, k=self.config.RETRIEVER_TOP_K)
        return score

    def run_nudge_insertion(self, new_train_dataset, new_val_dataset, new_test_dataset, original_test_dataset):
        if not self.nudge_instance:
            raise RuntimeError("NUDGE must be finetuned before running insertion.")

        logger.info("----- Running NUDGE Insertion Experiment -----")
        self.nudge_instance.insert_data_and_finetune(
            new_train_dataset_batch=new_train_dataset,
            new_val_dataset_batch=new_val_dataset,
        )

        updated_embeddings = self.nudge_instance.get_finetuned_corpus_embeddings()

        # Aggregate corpus from original and new datasets
        original_corpus = self.nudge_instance.train_dataset.corpus
        aggregated_corpus = {**original_corpus, **new_train_dataset.corpus}

        # Build a single retriever with all data
        aggregated_retriever = build_retriever(
            corpus=aggregated_corpus,
            embed_model=self.base_embed_model,
            corpus_embeddings=updated_embeddings,
            k=self.config.RETRIEVER_TOP_K,
        )

        # Evaluate on the new dataset
        logger.info(f"Evaluating on new dataset: {self.config.INSERTION_DATASET_NAME}")
        score_new = evaluate_ndcg_at_k(new_test_dataset, aggregated_retriever, k=self.config.RETRIEVER_TOP_K)

        # Evaluate on the original dataset to check for regression
        logger.info(f"Evaluating on original dataset: {self.config.INITIAL_DATASET_NAME}")
        score_original = evaluate_ndcg_at_k(original_test_dataset, aggregated_retriever, k=self.config.RETRIEVER_TOP_K)

        return score_new, score_original