# 进行检索词汇生成，实现多路召回

import json
import os, sys
from tqdm import tqdm
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from general_agent import Agent
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from api import api_tool, api_llm, api_prompt_engineering

from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed
class QueryRewritingAgent(Agent):
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
        self.template = self.PE.get_template(os.path.join(self.TEMPLATE_DIR, "query_rewriting.txt"))

    def rewrite(self, topic: str, **kwargs):
        augmented_query_list = []
        # augmented_query_list.append(topic)
        
        prompt = self.PE.format(
            template=self.template,
            topic=topic,
            num_queries=kwargs.get("num_queries", 16)
        )
        if self.DEBUG or ("DEBUG" in kwargs and kwargs["DEBUG"] == True):
            print(prompt+"\n"+"-"*50)
        response = self.LLM.chat(prompt=prompt, model=self.model)
        json_obj = self.PE.parse_to_json(response)
        if type(json_obj) == dict:
            augmented_query_list.extend(list(json_obj.values()))
        return augmented_query_list

    # 混合式查询 + 结果重排
    def retrieve(self, query_list, **kwargs):
        if type(query_list) == str:
            query_list = [query_list]
        elif type(query_list) != list:
            raise ValueError(">> query_list 必须是列表类型！")
        
        relevance_scaling = 1 # 对相关度进行衰减，保证合并结果时更前序
        weight_decay = kwargs.get("weight_decay", 0.95)
        
        total_chunk_list = []
        threshold = kwargs.get("threshold", 0.5)
        max_return_size = kwargs.get("max_return_size", 128)
        top_k = kwargs.get("top_k", 128)
        enable_weight_decay = kwargs.get("enable_weight_decay", False)
        
        searched_chunk_set = set()
        
        # 使用 ThreadPoolExecutor 进行并发请求
        with ProcessPoolExecutor() as executor:
            future_to_query = {executor.submit(api_tool.search_papers, query, top_k): query for query in query_list}
            
            for future in tqdm(as_completed(future_to_query), total=len(query_list), desc="批量检索中"):
                query = future_to_query[future]
                try:
                    raw_chunk_list = future.result()
                    processed_chunk_list = []
                    for chunk in raw_chunk_list:
                        if chunk["distance"] >= threshold and chunk["id"] not in searched_chunk_set:
                            chunk["distance"] *= relevance_scaling
                            processed_chunk_list.append(chunk)
                            searched_chunk_set.add(chunk["id"])
                            
                    total_chunk_list.extend(processed_chunk_list)

                    if enable_weight_decay:
                        relevance_scaling *= weight_decay

                except Exception as e:
                    print(f"Error processing query '{query}': {e}")
        
        total_chunk_list = total_chunk_list[:max_return_size]
        return total_chunk_list

if __name__=="__main__":
    model="gpt-4o"
    topic = "什么是损失函数"
    # topic = "text2sql是什么？"
    # topic = "what is Transformer?"
    # topic = "Text2SQL研究现状如何，面临哪些挑战？"
    # topic = "有哪些方法可以提升大模型的规划能力，各自优劣是什么？"
    # topic = "多模态大模型的技术发展路线是什么样的？"
    
    
    query_rewriting_agent = QueryRewritingAgent(model="gpt-4o")
    query_list = query_rewriting_agent.rewrite(
        topic=topic
    )
    query_rewriting_agent.PE.print_json(query_list)
    
    
    chunk_list = query_rewriting_agent.retrieve(query_list=query_list)
    
    # query_rewriting_agent.PE.print_json(chunk_list)
    print("len(chunk_list):", len(chunk_list))