# src/finetuner.py

from llama_index.llms.openai import OpenAI
from llama_index.finetuning import OpenAIFinetuneEngine
from llama_index.finetuning.callbacks import OpenAIFineTuningHandler
from llama_index.core.callbacks import CallbackManager
from llama_index.core import VectorStoreIndex
from typing import List

from . import config, data_handler


def generate_finetuning_data(documents: List, train_questions: List):
    """使用 '教师' 模型生成微调所需的数据集。"""
    print(f"正在使用教师模型 {config.TEACHER_MODEL} 生成微调数据...")

    finetuning_handler = OpenAIFineTuningHandler()
    callback_manager = CallbackManager([finetuning_handler])

    # 使用更强大的模型作为教师
    teacher_llm = OpenAI(
        model=config.TEACHER_MODEL,
        temperature=config.BASE_MODEL_TEMP,
        callback_manager=callback_manager
    )

    index = VectorStoreIndex.from_documents(documents)
    query_engine = index.as_query_engine(
        similarity_top_k=config.SIMILARITY_TOP_K,
        llm=teacher_llm
    )

    for question in train_questions:
        query_engine.query(question)

    finetuning_handler.save_finetuning_events(config.FINETUNING_EVENTS_PATH)
    print(f"微调数据已保存到 {config.FINETUNING_EVENTS_PATH}")


def run_finetuning_job() -> OpenAI:
    """启动 OpenAI 微调任务并返回微调后的 LLM 实例。"""
    print("启动微调任务...")

    finetune_engine = OpenAIFinetuneEngine(
        config.BASE_MODEL,
        config.FINETUNING_EVENTS_PATH,
    )

    finetune_engine.finetune()

    print("微调任务已启动。等待任务完成...")

    # 等待任务完成并获取模型
    # 注意：实际项目中可能需要更复杂的轮询或回调机制
    ft_llm = finetune_engine.get_finetuned_model(temperature=config.FINETUNED_MODEL_TEMP)

    print(f"微调完成！微调后的模型 ID: {finetune_engine.get_current_job().fine_tuned_model}")
    return ft_llm