import time

from utils.api_request import *
from utils.data_type import QueryResult, QueryResultItem, wrap_search_results
from utils.llm_client import llm_chat
from agents.paper_search_agent import search_paper_loop
from prompts.xml_detail_outline import XML_DETAIL_OUTLINE_GENERATE_PROMPT, XML_DETAIL_OUTLINE_GENERATE_PROMPT_SYSTEM
import asyncio
import json
from utils.token_count import glm_tokenize_text
import re
from openai import OpenAI
from utils.data_io import save_datas


def parse_attrs(attr_str):
    attrs = {}
    pattern = r'(\w+)="(.*?)"'
    matches = re.findall(pattern, attr_str)
    for key, value in matches:
        attrs[key] = value
    return attrs

def parse_tag(tag_content):
    tag_content = tag_content.strip()
    if tag_content.startswith('/'):
        return {'type': 'end', 'name': tag_content[1:]}
    if tag_content.endswith('/'):
        tag_content = tag_content[:-1].strip()
        parts = tag_content.split(' ', 1)
        name = parts[0]
        attrs_str = parts[1] if len(parts) > 1 else ''
        attrs = parse_attrs(attrs_str)
        return {'type': 'self_closing', 'name': name, 'attrs': attrs}
    else:
        parts = tag_content.split(' ', 1)
        name = parts[0]
        attrs_str = parts[1] if len(parts) > 1 else ''
        attrs = parse_attrs(attrs_str)
        return {'type': 'start', 'name': name, 'attrs': attrs}

def xml_to_dict(xml_str):
    tags_str = re.findall(r'<(.*?)>', xml_str)
    parsed_tags = [parse_tag(tag) for tag in tags_str]

    stack = []
    root = None

    for tag in parsed_tags:
        if tag['type'] == 'start':
            new_node = {
                'title': tag['attrs'].get('title', ''),
                'content': '',
                'children': []
            }
            if stack:
                stack[-1]['children'].append(new_node)
            else:
                root = new_node
            stack.append(new_node)
        elif tag['type'] == 'self_closing':
            new_node = {
                'title': tag['attrs'].get('title', ''),
                'refs': json.loads(tag['attrs'].get('refs', '[]').replace("'", '"')),
                'content': '',
                'children': []
            }
            if stack:
                stack[-1]['children'].append(new_node)
        elif tag['type'] == 'end':
            if stack:
                stack.pop()

    return root


class DetailOutlineAgent:
    def __init__(self, summary_type: str, origin_query: str, origin_result: dict, paper_search_result: QueryResult = None):
        self.paper_search_result = paper_search_result
        self.summary_type = summary_type
        self.origin_query = origin_query
        self.origin_result = origin_result
        self.detail_outline_str = ""
        self.reference_mark_str = ""
        self.detail_outline_dict = {}
        # if self.paper_search_result:
        #     self.paper_id_list = [item.entity.paper_id for item in self.paper_search_result.results]
        #     print(f"一共有 {len(self.paper_id_list)} 篇相关文献！")
        #     # 使用异步方法获取所有论文内容
        #     self.all_papers_content = asyncio.run(self.get_all_papers_content())

    async def get_entire_paper_content(self, paper_id):
        result = await async_query_by_paper_id(paper_id=paper_id)
        return {paper_id: result}

    async def get_all_papers_content(self):
        # 并发获取所有论文内容
        tasks = [self.get_entire_paper_content(paper_id) for paper_id in self.paper_id_list]
        results = await asyncio.gather(*tasks)
        return results

    def generate_result(self):
        # with open('detail_outline.txt', 'r', encoding='utf-8') as f:
        #     result = f.read()
        paper_chunks = []
        index = 1
        paper_index_dict = {}
        for paper_id, chunk_content in self.origin_result.items():
            temp = ""
            temp += f"<论文块{index}>\n"
            temp += f"<论文ID>paper_{index}</论文ID\n"
            paper_index_dict[f"paper_{index}"] = paper_id
            temp += f"<论文标题>{chunk_content['entity']['paper_title']}</论文标题>\n"
            # temp += f"<论文块内容>{chunk_content['entity']['chunk_text']}</论文块内容>\n"
            temp += f"<论文块内容>{' '.join(chunk_content['entity']['chunk_text'].split()[:500])}</论文块内容>\n" # 只取前500个单词

            temp += f"</论文块{index}>\n"
            index += 1
            paper_chunks.append(temp)
        total_token = glm_tokenize_text("\n".join(paper_chunks))
        print(f"所有文献toke总数为：{total_token}")
        # 如果超出指定token数才进行截断
        token_limit = 110*1000
        if total_token > token_limit:
            end_index = int((token_limit / total_token) * index)
            paper_chunks = paper_chunks[:end_index+1]
        paper_chunks_str = "\n".join(paper_chunks)
        print(f"截断之后的token总数为：{glm_tokenize_text(paper_chunks_str)}")
        prompt = XML_DETAIL_OUTLINE_GENERATE_PROMPT.format(user_requirement=self.origin_query, summary_type=self.summary_type,paper_chunks=paper_chunks_str)
        system_prompt = XML_DETAIL_OUTLINE_GENERATE_PROMPT_SYSTEM
        # with open('prompt.txt','w') as f:
        #     f.write(prompt)   
        save_datas(prompt,"prompt.txt")

        start_time = time.time()
        # 使用智谱
        response = llm_chat(messages=[{"role": "system", "content": system_prompt},{"role": "user", "content": prompt}],model_name="GLM_4_PLUS",temperature=0, max_tokens=20480)

        # 使用GPT4o
        # def gptchat(messages):
        #     client = OpenAI(api_key= "sk-8xSiNEEPhMTPSHzlBXFYezb1J1FEWR4R", base_url="https://api-gateway.glm.ai/v1")
        #     response = client.chat.completions.create(model='gpt-4o-2024-11-20',messages=messages)
        #     return response
        # response = gptchat([{"role": "user", "content": prompt}])

        result = response.choices[0].message.content

        # 使用其它模型
        # response = llm_chat(
        #     messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": prompt}],
        #     model_name="DEEPSEEK_R1", temperature=0.6)
        # result = response['choices'][0]['message']['content']

        end_time = time.time()
        print(f"生成大纲耗时：{end_time-start_time} s")

        outline_dict = xml_to_dict(result)

        print(result)
        return outline_dict, paper_index_dict

def generate_detail_outline_loop(origin_query, user_summary_type, origin_result):

    # 创建 DetailOutlineAgent 实例
    detail_outline_agent = DetailOutlineAgent(
        summary_type=user_summary_type,
        origin_query=origin_query,
        origin_result=origin_result
    )

    # 生成并解析大纲
    content, paper_index_dict = detail_outline_agent.generate_result()
    return content, paper_index_dict

if __name__ == '__main__':
    origin_query = "What is the current status and challenges of Text2SQL research?"
    user_summary_type = "A review of the current research status in a certain direction"

    # 从文件读取 origin_paper_search_result
    try:
        with open('examples_data/origin_paper_search_result.json', 'r', encoding='utf-8') as f:
            origin_result = json.load(f)
    except FileNotFoundError:
        print("文件 'origin_paper_search_result.json' 未找到，请确保文件存在。")
        origin_result = {}

    # # 创建 DetailOutlineAgent 实例
    # detail_outline_agent = DetailOutlineAgent(
    #     summary_type=user_summary_type,
    #     origin_query=origin_query,
    #     origin_result=origin_result
    # )

    # # 生成并解析大纲
    # content = detail_outline_agent.generate_outline()
    content = generate_detail_outline_loop(origin_query, user_summary_type, origin_result)
    if content:
        with open('examples_data/xml_detail_outline.txt', 'w', encoding='utf-8') as f:
            f.write(content)
    else:
        print("未找到匹配的内容")
