import requests
from langchain_core.tools import tool
from langchain_core.documents import Document
from typing import Dict, List, Optional, cast, Tuple
from io import StringIO
import pathlib
from patagent.constant import *
import json


def split_html_by_headers(html_doc: str, headers_to_split_on: List[Tuple[str, str]], page_id) -> List[Dict[str, Optional[str]]]:
    try:
        from bs4 import BeautifulSoup, PageElement  # type: ignore[import-untyped]
    except ImportError as e:
        raise ImportError(
            "Unable to import BeautifulSoup/PageElement, \
                please install with `pip install \
                bs4`."
        ) from e
        
    is_table = 'table' in html_doc
    soup = BeautifulSoup(html_doc, "html.parser")
    headers = list(headers_to_split_on.keys())
    sections: list[dict[str, str | None]] = []
    schema = []
    parent_row = []
    row = []

    headers = soup.find_all(headers)
    for i, header in enumerate(headers):
        is_schema = False
        header_element: PageElement = header
        if is_table:
            in_tab = False
            for element in header_element.next_elements:
                if not is_schema and (element.name == 'th' or i == 0):
                    is_schema = True
                if i + 1 < len(headers) and element == headers[i + 1]:
                    break
                if isinstance(element, str):
                    if element == '\n':
                        continue
                    if is_schema:
                        schema.append(element)
                    else:
                        if len(row) > 0 and in_tab:
                            row[len(row) - 1] += element
                        else:
                            row.append(element)
                else:
                    if element.name == 'td' or element.name == 'p':
                        in_tab = False
                    else:
                        in_tab = True
            if is_schema:
                row = []
                continue
            current_header = header_element.text.strip()
            current_header_tag = header_element.name
            section_content = []
            if len(schema) == len(row):
                parent_row = row
                section_content = row
            else:
                for j in range(len(schema) - len(row)):
                    section_content.append(parent_row[j])
                section_content.extend(row)
            
            content = "\n".join(section_content).strip()

            if content != "":
                sections.append(
                    {
                        "content": content,
                        "metadata": {
                            "url": 'https://confluence.zhihuiya.com/pages/viewpage.action?pageId=' + page_id,
                            "header": ' | '.join(schema)
                        },
                    }
                )
            row = []
        else:
            current_header = header_element.text.strip()
            current_header_tag = header_element.name
            section_content = []
            for element in header_element.next_elements:
                if i + 1 < len(headers) and element == headers[i + 1]:
                    break
                if isinstance(element, str):
                    section_content.append(element)
            content = "\n".join(section_content).strip()

            if content != "":
                sections.append(
                    {
                        "content": content,
                        "metadata": {
                            "url": 'https://confluence.zhihuiya.com/pages/viewpage.action?pageId=' + page_id,
                            "header": current_header
                        },
                    }
                )

    return sections

def convert_possible_tags_to_header(html_content: str) -> str:
    xslt_path = pathlib.Path(os.path.dirname(os.path.abspath(os.path.dirname(__file__))) + "/xsl/converting_to_header.xslt").absolute()

    try:
        from lxml import etree
    except ImportError as e:
        raise ImportError(
            "Unable to import lxml, please install with `pip install lxml`."
        ) from e
    # use lxml library to parse html document and return xml ElementTree
    parser = etree.HTMLParser()
    tree = etree.parse(StringIO(html_content), parser)

    xslt_tree = etree.parse(xslt_path)
    transform = etree.XSLT(xslt_tree)
    result = transform(tree)
    return str(result)

def get_confluence_list_by_pageid(page_id, headers_to_split_on = []):
    # 设置API请求头
    headers = {
        "Authorization": f"Bearer {CONFLUENCE_TOKEN}",
        "Content-Type": "application/json"
    }

    # 发送GET请求获取页面内容，包括渲染后的body
    params = {
        "expand": "body.view"  # 请求渲染后的页面内容
    }

    # 发送GET请求获取页面内容
    response = requests.get(f"{CONFLUENCE_URL}/rest/api/content/{page_id}", headers=headers, params=params)

    headers_to_split_on = dict(headers_to_split_on)
    # 检查响应状态码
    if response.status_code == 200:
        # 解析JSON响应
        page_data = response.json()
        html_content = page_data["body"]["view"]["value"]
        
        file_content = convert_possible_tags_to_header(html_content)
        sections = split_html_by_headers(file_content, headers_to_split_on, page_id)

        return sections

def get_confluence_text_by_pageid(page_id, headers_to_split_on = []):
    sections = get_confluence_list_by_pageid(page_id, headers_to_split_on)
    return [json.dumps(section) for section in sections]

@tool
def get_confluence_document_by_pageid(page_id, headers_to_split_on = []):
    """parse confluence content with a page id"""
    sections = get_confluence_list_by_pageid(page_id, headers_to_split_on)
    return [
        Document(
            cast(str, section["content"]),
            metadata={
                headers_to_split_on[section["tag_name"]]: section["header"]
            },
        )
        for section in sections
    ]
