import json
import os, sys
import re
from concurrent.futures import ThreadPoolExecutor

sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from general_agent import Agent
from chunk_conclusion import MultiChunkConclusionAgent
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from api import api_tool, api_llm, api_prompt_engineering

class CitationAgent(Agent):
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
        self.template = self.PE.get_template(os.path.join(self.TEMPLATE_DIR, "citation_generation.txt"))
        
    def cite_section(self, section_topic: str, chunk_list: list = [], **kwargs):
        self.template = self.PE.get_template(os.path.join(self.TEMPLATE_DIR, "citation_generation_for_section.txt"))
        return self.cite(topic=section_topic, chunk_list=chunk_list, **kwargs)
        
    def cite(self, topic: str, chunk_list: list = [], **kwargs):
        # for chunk in chunk_list:
        #     if "总结" not in chunk:
        #         raise ValueError("Citation Agent: 必须提供 chunk 的总结。")
        
        # for ele in chunk_list:
        #     print(json.dumps(ele, indent=2, ensure_ascii=False))
        #     print("-"*100)
        
        simplified_chunk_list = [
            {"id": ele["id"], "总结": ele["总结"]} for ele in chunk_list if "总结" in ele
        ]
        
        prompt = self.PE.format(
            template=self.template,
            topic=topic,
            chunk_list=json.dumps(simplified_chunk_list, indent=2, ensure_ascii=False)
        )
        # if self.DEBUG or ("DEBUG" in kwargs and kwargs["DEBUG"] == True):
        #     print(prompt+"\n"+"-"*50)
        response = self.LLM.chat(prompt=prompt, model=self.model)
        # json_obj = self.PE.parse_to_json(response)
        # raw_citation = json_obj.get("回复", "")
        
        raw_citation = response.replace("```text\n", "").replace("```\n", "").strip()
        
        enable_citation_formatting = kwargs.get("enable_citation_formatting", True)
        if enable_citation_formatting:
            formatted_citation, reference_suffix = self.format_citation(raw_citation=raw_citation, chunk_list=chunk_list)
            
            final_citation = formatted_citation + "\n\n" + reference_suffix
        else:
            final_citation = raw_citation
        return final_citation
    
    def format_citation(self, raw_citation: str, chunk_list: list = [], **kwargs):
        cite_count = 1
        
        if self.DEBUG or ("DEBUG" in kwargs and kwargs["DEBUG"] == True):
            print("len(chunk_list):", len(chunk_list))
            
        # 添加引用
        real_chunk_id_set = set([str(chunk['id']) for chunk in chunk_list])
        rank_id_dictionary = {}
        pattern = r"<sup>(.*?)</sup>"
        matches = re.findall(pattern, raw_citation)
        for match in matches:
            chunk_id = match
            if chunk_id not in real_chunk_id_set or chunk_id in rank_id_dictionary:
                continue
            rank_id_dictionary[chunk_id] = cite_count
            cite_count += 1
        if self.DEBUG or ("DEBUG" in kwargs and kwargs["DEBUG"] == True):
            print(self.PE.print_json(rank_id_dictionary))
        chunk_list = [ele for ele in chunk_list if str(ele['id']) in rank_id_dictionary]
        
        # 查找引用
        chunk_list = self.augment_chunk_information(raw_citation, chunk_list=chunk_list, **kwargs)
               
        # 添加引用
        reference_dict = {}
        chunk_index_count = {}
        for chunk in chunk_list:
            pattern = f"<sup>{chunk['id']}</sup>"
            if re.search(pattern, raw_citation):
                try:
                    raw_citation = re.sub(pattern, f"<sup>{rank_id_dictionary[str(chunk['id'])]}</sup>", raw_citation)
                    paper_id = chunk['entity']["paper_id"]
                    if paper_id not in chunk_index_count:
                        chunk_index_count[paper_id] = 1
                    else:
                        chunk_index_count[paper_id] += 1
                    
                    if chunk['entity']['paper_title'][-1] == '.':
                        chunk['entity']['paper_title'] = chunk['entity']['paper_title'][:-1]
                    single_reference = (f"[{rank_id_dictionary[str(chunk['id'])]}] {chunk['entity']['paper_title'].strip()}, " +
                                        f"{chunk['entity']['venue']}, " + (f"{chunk['entity']['year']}, " if chunk['entity']['year'] else "") +
                                        f"chunk {chunk_index_count[paper_id]}\n")
                    reference_dict[rank_id_dictionary[str(chunk['id'])]] = single_reference
                except Exception as e:
                    print(">> 添加引用失败:", chunk['id'])
        reference_suffix = "## References\n\n"
        for single_reference in sorted(reference_dict):
            reference_suffix += reference_dict[single_reference]
        
        # 去掉幻觉
        raw_citation = re.sub(r"<sup>\d{18,18}</sup>", "", raw_citation)
        
        return raw_citation, reference_suffix
    
    def augment_chunk_information(self, raw_citation, chunk_list: list = [], **kwargs):
        max_workers = kwargs.get("max_workers", 512)
        
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            futures = [
                executor.submit(self.__augment_single_chunk_information, raw_citation=raw_citation, chunk=chunk, **kwargs)
                for chunk in chunk_list
            ]
            
            # Optionally, wait for all futures to complete and handle exceptions
            for future in futures:
                try:
                    future.result()
                except Exception as e:
                    print(f"Chunk processing failed with exception: {e}")
                    
        return chunk_list
            
    
    def __augment_single_chunk_information(self, raw_citation, chunk: dict, **kwargs):
        pattern = f"<sup>{chunk['id']}</sup>"
        if re.search(pattern, raw_citation):
            
            venue_match = re.search(r'(?<=Meta_Data_).*(?=_with_whole_text)', chunk['entity']['original_filename'])
            chunk['entity']['paper_source'] = venue_match.group() if venue_match else ""
            
            if self.DEBUG or ("DEBUG" in kwargs and kwargs["DEBUG"] == True):
                print(">> 原始文件:", chunk['entity']['original_filename'])
                print(">> 正则结果:", chunk['entity']['paper_source'])
            
            detail_match = re.match(r"([A-Za-z]+)[^A-Za-z0-9]*(\d+)", chunk['entity']['paper_source'])
            
            chunk['entity']['venue'] = detail_match.group(1) if detail_match else ""
            if 'year' not in chunk['entity']:
                chunk['entity']['year'] = detail_match.group(2) if detail_match else ""
            if not chunk['entity']['year']:
                chunk['entity']['venue'] = chunk['entity']['paper_source'].replace("_", " ").strip()
            
            chunk['entity']['paper_title'] = re.sub(r'\s+', ' ', chunk['entity']['paper_title'])
        
        return chunk
    
if __name__=="__main__":
    # model="glm-4-flash"
    model="gpt-4o"
    topic = "什么是损失函数"
    # topic = "text2sql是什么？"
    # topic = "what is Transformer?"
    # topic = "Text2SQL研究现状如何，面临哪些挑战？"
    # topic = "有哪些方法可以提升大模型的规划能力，各自优劣是什么？"
    # topic = "多模态大模型的技术发展路线是什么样的？"
    search_keyword = topic
    print("-"*100)
    print(">> search_papers...")
    chunk_list=api_tool.search_papers(
        query=search_keyword, 
        top_k=50
    )
    print(str(chunk_list)[:256]+"...")
    print("-"*100)
    print(">> Agenting...")
    agent = MultiChunkConclusionAgent(model=model, DEBUG=True)
    chunk_list = agent.conclude(
        topic=topic,
        chunk_list=chunk_list,
        return_type="chunk",
        return_relevant=True
    )
    
    print(json.dumps(chunk_list, ensure_ascii=False, indent=4))
    
    cite_agent = CitationAgent(model=model, DEBUG=True)
    
    ret = cite_agent.cite(
        topic=topic,
        chunk_list=chunk_list,
        DEBUG=True,
    )
    
    print("-"*100+"\n"+"-"*100+"\n"+"-"*100+"\n"+ret)
    
    