from fastmcp import FastMCP
import requests  # You'll need to install the requests package if you don't have it
import html as html5
from bs4 import BeautifulSoup
from urllib.parse import urlparse, urljoin
import os

mcp = FastMCP()

@mcp.tool()
def get_user_manual(url: str) -> str:
    """
    Description:
    This tool fetches and simplifies HTML from the Cangjie user manual, removing scripts, styles, sidebars, menu bars, and class/style attributes. Relative links become absolute. All content links are available in the sidebar, which serves as a comprehensive directory.
    Use this tool when you don't know how to get started with Cangjie programming or need guidance on language features and tools.

    Usage Guidelines for Cangjie User Manual:
    - Start from the release notes (https://docs.cangjie-lang.cn/docs/1.0.0/release_notes.html) where the sidebar lists all sections and subsections.
    - Use the sidebar as a directory to find and access specific chapters directly via their URLs.
    - Determine sections by matching titles in the sidebar.
    - Access detailed guides and appendices using the URLs found in the sidebar.

    Args:
        url (str): The URL to fetch, optimized for Cangjie user manual docs.

    Returns:
        str: Simplified response body, with non-essential elements removed for conciseness.
    """

    try:
        response = requests.get(url)
        response.raise_for_status()
        response.encoding = response.apparent_encoding
        html = response.text
        soup = BeautifulSoup(html, 'html.parser')
        # Remove scripts and styles
        for script in soup.find_all('script'):
            script.decompose()
        for style in soup.find_all('style'):
            style.decompose()
        for link in soup.find_all('link', rel='stylesheet'):
            link.decompose()     # Do NOT remove sidebar, but filter it
        sidebar = soup.find(id='sidebar')
        if sidebar:
            if not url.__contains__('release_notes'):
                sidebar.decompose()
            chapter_list = sidebar.find('ol', class_='chapter')
            if chapter_list:
                all_items = chapter_list.find_all('li', recursive=False)  # Get all top-level <li> items
                keep_items = []
                i = 0
                while i < len(all_items):
                    item = all_items[i]
                    title_div = item.find('div')
                    if title_div and title_div.text.strip() in ['版本说明', '开发指南']:
                        keep_items.append(item)  # Keep category
                        # Keep the next <li> which should contain the subsections <ol>
                        if i + 1 < len(all_items):
                            next_item = all_items[i + 1]
                            if next_item.find('ol', class_='section'):
                                keep_items.append(next_item)
                            i += 1  # Skip the next one since we added it
                    i += 1
                # Clear the chapter list and re-add kept items
                for child in list(chapter_list.children):
                    if child.name:  # Remove existing children
                        child.extract()
                for kept in keep_items:
                    chapter_list.append(kept)
                for a in chapter_list.find_all('a'):
                    a.insert_after('\n')
        # Remove menu-bar
        menubar = soup.find(id='menu-bar')
        if menubar:
            menubar.decompose()
        # Remove style and class attributes
        for tag in soup.find_all(True):
            if 'style' in tag.attrs:
                del tag.attrs['style']
            if 'class' in tag.attrs:
                del tag.attrs['class']
        # Convert relative links to absolute
        for a in soup.find_all('a', href=True):
            a['href'] = urljoin(url, a['href'])
        for img in soup.find_all('img', src=True):
            img['src'] = urljoin(url, img['src'])
        # No fragment handling
        return html5.unescape(str(soup))
    except requests.RequestException as e:
        return f"Please start from the user manual(https://docs.cangjie-lang.cn/docs/1.0.0/release_notes.html)."


@mcp.tool()
def get_std_lib(url: str) -> str:
    """
    Description:
    This tool fetches and simplifies HTML from the Cangjie std library manual, removing scripts, styles, sidebars, menu bars, and class/style attributes. Relative links become absolute. All sub-URLs can be navigated step-by-step from the starting point @https://docs.cangjie-lang.cn/docs/1.0.0/libs/std/std_module_overview.html.
    Use this tool when you need detailed information about standard library APIs.

    Usage Guidelines for Cangjie Std Library Manual:
    - Start from the std overview(https://docs.cangjie-lang.cn/docs/1.0.0/libs/std/std_module_overview.html) to access package lists.
    - Determine packages by matching descriptions.
    - Access overviews and APIs via patterns derived from the overview page.
    - Use fragments for specific sections.

    Args:
        url (str): The URL to fetch, optimized for Cangjie docs. Use #fragment (e.g., #class-typeinfo) to extract only that section.

    Returns:
        str: Simplified response body, with non-essential elements removed for conciseness; if fragment specified, only that part and siblings until next h2.
    """
    try:
        response = requests.get(url)
        response.raise_for_status()
        response.encoding = response.apparent_encoding
        html = response.text
        soup = BeautifulSoup(html, 'html.parser')
        # Remove scripts and styles
        for script in soup.find_all('script'):
            script.decompose()
        for style in soup.find_all('style'):
            style.decompose()
        for link in soup.find_all('link', rel='stylesheet'):
            link.decompose()
        # Remove sidebar and menu-bar
        sidebar = soup.find(id='sidebar')
        if sidebar:
            sidebar.decompose()
        menubar = soup.find(id='menu-bar')
        if menubar:
            menubar.decompose()
        # Remove style and class attributes
        for tag in soup.find_all(True):
            if 'style' in tag.attrs:
                del tag.attrs['style']
            if 'class' in tag.attrs:
                del tag.attrs['class']
        # Convert relative links to absolute
        for a in soup.find_all('a', href=True):
            a['href'] = urljoin(url, a['href'])
        for img in soup.find_all('img', src=True):
            img['src'] = urljoin(url, img['src'])

        # Handle URL fragment
        parsed_url = urlparse(url)
        fragment = parsed_url.fragment
        if fragment:
            element = soup.find(id=fragment)
            if element:
                content = [str(element)]
                for sib in element.next_siblings:
                    if sib.name == 'h2':
                        break
                    if sib.name and sib.text.strip():  # Skip empty text nodes
                        content.append(str(sib))
                return html5.unescape(''.join(content))
            else:
                return f"Fragment '{fragment}' not found in the page."
        return html5.unescape(str(soup))
    except requests.RequestException as e:
        return f"Please start from the std overview(https://docs.cangjie-lang.cn/docs/1.0.0/libs/std/std_module_overview.html)."
    

@mcp.tool()
def get_stdx_lib(url: str) -> str:
    """
    Description:
    This tool reads and simplifies HTML from the local Cangjie stdx library manual, removing scripts, styles, sidebars, menu bars, and class/style attributes. Relative links become absolute local paths. All sub-paths can be navigated step-by-step from the starting point libs_stdx/index.html.
    Use this tool when you need detailed information about extended library APIs.

    Usage Guidelines for Cangjie Stdx Library Manual:
    - Start from the stdx overview(libs_stdx/index.html) to access package lists.
    - Determine packages by matching descriptions.
    - Access overviews and APIs via patterns derived from the overview page.
    - Use fragments for specific sections.

    Args:
        url (str): The relative path to read, optimized for local stdx docs. Use #fragment (e.g., #class-typeinfo) to extract only that section.

    Returns:
        str: Simplified response body, with non-essential elements removed for conciseness; if fragment specified, only that part and siblings until next h2.
    """
    try:
        root = os.path.dirname(os.path.abspath(__file__))
        parsed_url = urlparse(url)
        path = parsed_url.path.lstrip('/')
        fragment = parsed_url.fragment
        full_path = os.path.join(root, path)
        
        with open(full_path, 'r', encoding='utf-8') as f:
            html_content = f.read()
        
        soup = BeautifulSoup(html_content, 'html.parser')
        
        # Remove scripts and styles
        for script in soup.find_all('script'):
            script.decompose()
        for style in soup.find_all('style'):
            style.decompose()
        for link in soup.find_all('link', rel='stylesheet'):
            link.decompose()
        
        # Remove sidebar and menu-bar
        sidebar = soup.find(id='sidebar')
        if sidebar:
            sidebar.decompose()
        menubar = soup.find(id='menu-bar')
        if menubar:
            menubar.decompose()
        
        # Remove style and class attributes
        for tag in soup.find_all(True):
            if 'style' in tag.attrs:
                del tag.attrs['style']
            if 'class' in tag.attrs:
                del tag.attrs['class']
        
        # Convert relative links to absolute local paths
        for a in soup.find_all('a', href=True):
            a['href'] = urljoin(url, a['href'])
        for img in soup.find_all('img', src=True):
            img['src'] = urljoin(url, img['src'])
        
        if fragment:
            element = soup.find(id=fragment)
            if element:
                content = [str(element)]
                for sib in element.next_siblings:
                    if sib.name == 'h2':
                        break
                    if sib.name and sib.text.strip():
                        content.append(str(sib))
                return html5.unescape(''.join(content))
            else:
                return f"Fragment '{fragment}' not found in the page."
        return html5.unescape(str(soup))
    except Exception as e:
        return f"Error accessing local stdx documentation: {str(e)}. Please start from the stdx overview(libs_stdx/index.html)."


if __name__ == "__main__":
    print(os.getcwd())
    mcp.run()