import json
import re 
from utils.llm_client import llm_chat
from utils.api_request import search_papers
from utils.data_type import wrap_search_results
from prompts.paper_search import paper_search_system_prompt, paper_search_user_prompt
from utils.data_type import QueryResult, display_query_results
from utils.rerank import rerank_query_docs
from utils.pasa_model import Agent
from prompts.pasa_generate_query import generate_query_prompt
import concurrent.futures

def list_extract(raw_str:str):
    """
    用于从字符串中抽取出列表
    """
    # 使用正则匹配出列表字符串
    pattern = r'\[.*?\]'
    match = re.search(pattern, raw_str) 
    # 获取匹配的列表字符串
    list_str = match.group()
    # 将单引号替换为双引号,以便json加载
    list_str = list_str.replace("'", '"')
    # 使用json加载列表
    result = json.loads(list_str)
    return result



class PaperSearchAgent():
    def __init__(self,origin_query:str,user_summary_type:str,query_list:list=[],top_k=50,papers_count:int=30,distance_threshold=0.6,rerank=True):
        self.query_list = query_list
        self.origin_query = origin_query
        self.query_history = query_list + [origin_query]
        self.top_k = top_k
        self.distance_threshold = distance_threshold
        self.papers_count = papers_count
        self.user_summary_type = user_summary_type
        self.save_data = {}
        self.origin_result = {}
        self.rerank = rerank
        if query_list :
            with concurrent.futures.ThreadPoolExecutor() as executor:
                futures = [executor.submit(self.search_and_add, final_query) for final_query in query_list + [origin_query]]
                for future in concurrent.futures.as_completed(futures):
                    try:
                        future.result()  # 获取结果，确保任务完成
                    except Exception as e:
                        print(f"任务执行出错: {e}")
        else:
            self.search_and_add(self.origin_query,self.top_k)
        # self.scrawler:Agent = Agent("pasa_scrawler")
        # self.templates       = {
        #     "cite_template":   r"~\\cite\{(.*?)\}",
        #     "search_template": r"Search\](.*?)\[",
        #     "expand_template": r"Expand\](.*?)\["
        # }
    
    
    def first_work(self):
        prompt = generate_query_prompt.format(user_query=self.origin_query)
        result = self.scrawler.infer(prompt)
        queries = [q.strip() for q in re.findall(self.templates["search_template"], result, flags=re.DOTALL)]
        for query in queries:
            self.search_and_add(query,self.top_k)


    def create_new_query_by_pasa_scrawler(self,query_list):
        prompt_list = []
        for i in query_list:
            generate_query_prompt.format(user_query=i)
            prompt_list.append(generate_query_prompt)
        final_query:list = self.scrawler.batch_infer(prompt_list)
        queries_list = set()
        for query in final_query:
            temp = [q.strip() for q in re.findall(self.templates["search_template"], query, flags=re.DOTALL)]
            for i in temp:
                queries_list.add(i)
                
        return list(queries_list)

    def create_new_query(self):
        saved_papers_info = ""
        index = 1
        for index,item in enumerate(self.save_data.items()):
            paper_title = item[1].entity.paper_title
            chunk_text = item[1].entity.chunk_text
            saved_papers_info += f"已检索到的第{index+1}篇论文块：\n标题: {paper_title}\n"
            index += 1
        
        messages = [
            # {"role": "system", "content": paper_search_system_prompt},
            {"role": "user", "content": paper_search_user_prompt.format(user_origin_query=self.origin_query,
                                                                            user_summary_type=self.user_summary_type,
                                                                            query_history=json.dumps(self.query_history,ensure_ascii=False),
                                                                            papers_info=saved_papers_info
                                                                            )},
            # {"role": "user", "content": f"请你继续编写新的查询。"}
        ]
        llm_response = llm_chat(messages,temperature=0,model_name='GLM_4_PLUS')
        llm_result = llm_response.choices[0].message.content
        try:
            result = list_extract(llm_result)
            unique_list = []
            for item in result:
                if item not in self.query_history:
                    self.query_history.append(item)
                    unique_list.append(item)
            return unique_list
        except Exception as e:
            print(f"提取错误: {e}")
            return []


    def work(self):
        while len(self.save_data) < self.papers_count:
            new_query_list = self.create_new_query()
            # final_query_list = self.create_new_query_by_pasa_scrawler(new_query_list)
            with concurrent.futures.ThreadPoolExecutor() as executor:
                futures = [executor.submit(self.search_and_add, final_query) for final_query in new_query_list]
                for future in concurrent.futures.as_completed(futures):
                    try:
                        future.result()  # 获取结果，确保任务完成
                    except Exception as e:
                        print(f"任务执行出错: {e}")
        with open('examples_data/origin_paper_search_result.json', 'w', encoding='utf-8') as f:
            json.dump(self.origin_result, f, ensure_ascii=False, indent=4)
        return self.save_data,self.origin_result
    
    def filter_paper_by_pasa_selector(self,origin_search_result):
        pass

    def search_and_add(self,query,top_k=30):
        origin_search_result = search_papers(query, top_k)
        if self.rerank:
            filtered_papers = rerank_query_docs(query,origin_search_result,score_threshold=17.5)
        else:
            filtered_papers = self.filter_paper_by_pasa_selector(query,origin_search_result)
        search_results = wrap_search_results(filtered_papers)
        for index,item in enumerate(search_results.results):
            paper_id = item.entity.paper_id
            if paper_id not in self.save_data.keys():
                self.save_data[paper_id] = item
                self.origin_result[paper_id] = filtered_papers[index]

def search_paper_loop(origin_query:str,user_summary_type:str,keywords:list=[],papers_count:int=50,rerank=False):
    agent = PaperSearchAgent(origin_query=origin_query,user_summary_type=user_summary_type,query_list=keywords,papers_count=papers_count,rerank=rerank)
    # agent.first_work()
    search_results,origin_result = agent.work()
    print("搜索过的问题:",agent.query_history)
    result_list = QueryResult(results=list(search_results.values()))
    # display_result = display_query_results(result_list,text_truncate=500)
    # with open('paper_content.txt', 'w', encoding='utf-8') as f:
    #     f.write(display_result)
    return result_list,origin_result

if __name__ == "__main__":
    origin_query = "What is the current status and challenges of Text2SQL research?" 
    user_summary_type = "A review of the current research status in a certain direction" 
    papers_count = 70
    paper_search_result, origin_result = search_paper_loop(origin_query,user_summary_type,papers_count=papers_count,rerank=True)
    # agent = PaperSearchAgent(origin_query="有哪些方法可以提升大模型的规划能力，各自优劣是什么？",user_summary_type="对多篇文献的对比分析综述")
    # search_results = agent.work()
    # print("搜索过的问题:",agent.query_history)
    # result_list = QueryResult(results=list(search_results.values()))
    # display_result = display_query_results(result_list,text_truncate=500)
    # with open('paper_content.txt', 'w', encoding='utf-8') as f:
    #     f.write(display_result)
