from llm_rag.chains import build_pdf_context_chain
from llm_rag.llms import ChatGLM, ChatYi
from llm_rag.llm import ChatQwen
from llm_rag.templates import RAG_TEMPLATE_A, RAG_TEMPLATE_B, RAG_TEMPLATE_C
from langchain.prompts import PromptTemplate
from pathlib import Path
import json
import hydra
from omegaconf import DictConfig
from logging import getLogger
import time

logger = getLogger(__name__)

@hydra.main(config_path='configs', config_name='config.yaml', version_base=None)
def predict(config: DictConfig):
    
    # 预测数据地址
    result_path: Path = Path(config.result_path)
    if result_path.exists():
        result_path.unlink()
        
    # 问题数据地址
    with open(config.question_path, 'r') as f:
        data = json.load(f)
        
    # retriever 
    context_chain = build_pdf_context_chain(
        pdf_path=config.pdf_path,
        embedder_dir=config.embedder_dir,
        embedder_device=config.embedder_device,
        pdf_split_type=config.pdf_split_type,
        search_top_k=config.retrieval_top_k,
        split_size=config.split_size,
        split_overlap=config.split_overlap
    )
    
    # llm
    results = []
    try:
        if 'chatglm' in config.llm_dir:
            llm = ChatGLM()
            llm.load_model(pretrained_dir=config.llm_dir)
        elif 'Qwen' in config.llm_dir:
            llm = ChatQwen()
            llm.load_model(pretrained_dir=config.llm_dir)
        elif 'yi' in config.llm_dir or 'Yi' in config.llm_dir:
            llm = ChatYi()
            llm.load_model(pretrained_dir=config.llm_dir)
        else:
            raise ValueError('llm_dir must be chatglm or Qwen')
    
        # 每个prompt分别预测
        templates = [RAG_TEMPLATE_A]
        start_time = time.perf_counter()
        for i in range(len(data)):
            d = data[i]
            question = d['question']
            logger.info(f'question {i}: {question}')
            context = context_chain.invoke(question)
            prompts = [PromptTemplate.from_template(template=template).format(question=question, context=context) for template in templates]
            preds = llm.batch(prompts)
            if len(preds) == 1:
                answers = {"question": question, "answer_1": preds[0], "answer_2": "", "answer_3": ""}
            elif len(preds) == 2:
                answers = {"question": question, "answer_1": preds[0], "answer_2": preds[1], "answer_3": ""}
            elif len(preds) == 3:
                answers = {"question": question, "answer_1": preds[0], "answer_2": preds[1], "answer_3": preds[2]}
            results.append(answers)
            logger.info(f'question {i} done! \n')
        end_time = time.perf_counter()
        logger.info(f'num of questions: {len(data)}. time cost: {round((end_time - start_time) / 60, 2) } min')
    except:
        pass
    # 保存结果
    with open(result_path, 'w', encoding='utf-8') as f:
        json.dump(results, f, ensure_ascii=False, indent=4)
        
if __name__ == '__main__':
    predict()