import json
import os, sys
from tqdm import tqdm
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from general_agent import Agent
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from api import api_tool, api_llm, api_prompt_engineering

import func_timeout
from func_timeout import func_set_timeout
from func_timeout.exceptions import FunctionTimedOut
from colorama import Fore, Style
import concurrent
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed
class MultiChunkConclusionAgent(Agent):
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        
    def conclude(self, topic: str, chunk_list: list = [], **kwargs):
        max_workers = kwargs.get("max_workers", 512)
        expected_return_workers = kwargs.get("expected_return_workers", 128)
        results = []
        
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            future_to_chunk = {
                executor.submit(self.__conclude_single_chunk, topic, chunk, **kwargs): chunk for chunk in chunk_list
            }

            
            # 使用tqdm显示进度
            progress_bar = tqdm(desc="Concluding chunk", unit="chunk", total=len(future_to_chunk))
            not_completed_count = 0
            completed_workers = 0
            for future in as_completed(future_to_chunk):
                try:
                    progress_bar.update(1)
                    completed_workers += 1
                    if completed_workers >= expected_return_workers:
                        if self.DEBUG or ("DEBUG" in kwargs and kwargs["DEBUG"] == True):
                            print(f">>>> 提前达到总结数量要求: {expected_return_workers}")
                        break
                    result = future.result()
                    results.append(result)
                except FunctionTimedOut:
                    not_completed_count += 1
                    # print(f"{Fore.RED}Task timed out{Style.RESET_ALL}")
                except Exception as e:
                    not_completed_count += 1
                    # print(f"{Fore.RED}An error occurred: {e}{Style.RESET_ALL}")

            # 打印任务数量
            total_count = len(future_to_chunk)
            completed_count = min(completed_workers, total_count - not_completed_count)

            print(
                f">> chunk conclusion results: "
                f"{Fore.GREEN}[{completed_count}]{Style.RESET_ALL} + "
                f"{Fore.RED}[{not_completed_count}]{Style.RESET_ALL} = "
                f"{Fore.YELLOW}{total_count}{Style.RESET_ALL}"
            )
            
        
        if ("return_relevant" in kwargs) and (kwargs["return_relevant"] == True):
            results = [ele for ele in results if ele["相关"]]
        
        return results
    
    def __conclude_single_chunk(self, topic, chunk, **kwargs):
        @func_set_timeout(kwargs.get("timeout", 15))
        def conclude_with_timeout(topic, chunk, **kwargs):
            single_chunk_agent = SingleChunkConclusionAgent(model=self.model, **kwargs)
            return single_chunk_agent.conclude(topic=topic, chunk=chunk, **kwargs)
        return conclude_with_timeout(topic, chunk, **kwargs)


class SingleChunkConclusionAgent(Agent):
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
        self.template = self.PE.get_template(os.path.join(self.TEMPLATE_DIR, "single_chunk_conclusion.txt"))
        
    def conclude(self, topic: str, chunk: dict, **kwargs):
        prompt = self.PE.format(
            template=self.template,
            topic=topic,
            chunk=json.dumps(chunk, indent=2, ensure_ascii=False)
        )
        if self.DEBUG or ("DEBUG" in kwargs and kwargs["DEBUG"] == True):
            print(prompt+"\n"+"-"*50)
        response = self.LLM.chat(prompt=prompt, model=self.model)
        response = response.replace("```text\n", "").replace("```\n", "").strip()
        # json_obj = self.PE.parse_to_json(response)
        
        if "[irrelevant]" in response:
            json_obj = {
                "相关": False,
                "总结": "",
            }
        else:
            json_obj = {
                "相关": True,
                "总结": response,
            }
        
        if json_obj:
            if ("return_type" in kwargs) and (kwargs["return_type"] == "chunk"):
                chunk.update(json_obj)
                return chunk
            return json_obj.get("总结", "")
        else:
            if ("return_type" in kwargs) and (kwargs["return_type"] == "chunk"):
                return {"总结": ""}
            return ""
            
    
    
if __name__=="__main__":
    
    model="gpt-4o-mini"
    topic = "text2sql是什么？"
    search_keyword = topic
    chunk_list=api_tool.search_papers(search_keyword, top_k=128)
    
    # agent = SingleChunkConclusionAgent(model=model)
    # agent.PE.print_json(agent.conclude(
    #     topic=topic,
    #     chunk=chunk_list[0],
    #     return_type="chunk"
    # ))
    
    agent = MultiChunkConclusionAgent(model=model)
    ret = agent.conclude(
        topic=topic,
        chunk_list=chunk_list,
        return_type="chunk", 
        timeout=20,
    )
    # agent.PE.print_json(ret)
    