from fastmcp import FastMCP
import requests
from bs4 import BeautifulSoup, Tag
import validators
import markdownify
import time
from typing import Dict, Any, Optional
from urllib.parse import urlparse, urljoin
import logging

# 初始化MCP服务
mcp = FastMCP("web_content.mcp")

# 配置项
TIMEOUT = 10  # HTTP请求超时时间(秒)
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
MAX_RETRIES = 3  # 最大重试次数


@mcp.tool()
def ping() -> str:
    """健康检查工具

    返回"pong"表示服务正常运行
    """
    return "pong"


@mcp.tool()
def validate_url(url: str) -> dict:
    """验证网址有效性

    参数:
        url: 待验证的网址

    返回:
        dict: 包含验证结果的字典
            - valid: 布尔值，表示网址是否有效
            - status_code: HTTP状态码(如果valid为True)
            - error: 错误信息(如果valid为False)
            - domain: 域名(如果valid为True)
            - scheme: 协议(如果valid为True)
    """
    result: dict = {
        "valid": False,
        "status_code": None,
        "domain": None,
        "scheme": None,
        "error": None
    }

    # 基本格式验证
    if not validators.url(url):
        result["error"] = "无效的URL格式"
        return result

    # 解析域名和协议
    try:
        parsed = urlparse(url)
        result["domain"] = parsed.netloc
        result["scheme"] = parsed.scheme
    except Exception as e:
        result["error"] = f"URL解析失败: {str(e)}"
        return result

    # 尝试访问URL，HEAD失败自动降级GET
    try:
        headers = {"User-Agent": USER_AGENT}
        try:
            response = requests.head(url, headers=headers, timeout=TIMEOUT, allow_redirects=True)
            status_code = response.status_code
        except requests.RequestException:
            # HEAD失败降级为GET
            response = requests.get(url, headers=headers, timeout=TIMEOUT, allow_redirects=True, stream=True)
            status_code = response.status_code

        result["status_code"] = status_code
        result["valid"] = 200 <= status_code < 400

        if not result["valid"]:
            result["error"] = f"HTTP请求失败，状态码: {status_code}"

    except requests.exceptions.SSLError as e:
        result["error"] = f"SSL证书错误: {str(e)}"
    except requests.exceptions.Timeout:
        result["error"] = f"请求超时({TIMEOUT}秒)"
    except requests.exceptions.ConnectionError:
        result["error"] = "连接错误，可能是网络问题或服务器不可用"
    except requests.exceptions.RequestException as e:
        result["error"] = f"请求异常: {str(e)}"
    except Exception as e:
        result["error"] = f"未知错误: {str(e)}"

    return result


@mcp.tool()
def get_web_content(url: str, extract_text: bool = True, extract_metadata: bool = True, extract_images: bool = False) -> dict:
    """获取网页内容

    参数:
        url: 网页URL
        extract_text: 是否提取文本内容
        extract_metadata: 是否提取元数据(标题、描述等)
        extract_images: 是否提取图片链接

    返回:
        dict: 包含网页内容的字典
            - success: 布尔值，表示是否成功获取内容
            - url: 请求的URL
            - title: 网页标题(如果extract_metadata为True)
            - description: 网页描述(如果extract_metadata为True)
            - text: 网页文本内容(如果extract_text为True)
            - markdown: 网页内容的Markdown格式(如果extract_text为True)
            - images: 图片链接列表(如果extract_images为True)
            - error: 错误信息(如果success为False)
            - status_code: HTTP状态码
    """
    result: dict = {
        "success": False,
        "url": url,
        "status_code": None,
        "title": None,
        "description": None,
        "text": None,
        "markdown": None,
        "images": [],
        "error": None
    }

    # 先验证URL
    validation = validate_url(url)
    if not validation.get("valid", False):
        result["error"] = f"URL无效: {validation.get('error', '未知错误')}"
        return result

    # 尝试获取网页内容
    try:
        headers = {"User-Agent": USER_AGENT}
        response = requests.get(url, headers=headers, timeout=TIMEOUT)
        result["status_code"] = response.status_code

        if 200 <= response.status_code < 400:
            # 自动检测编码
            response.encoding = response.apparent_encoding
            soup = BeautifulSoup(response.text, "lxml")

            # 提取元数据
            if extract_metadata:
                title = soup.title.string.strip() if soup.title and soup.title.string else "无标题"
                result["title"] = title

                # 提取多种描述
                desc = None
                for meta in soup.find_all("meta"):
                    if isinstance(meta, Tag):
                        name = meta.get("name", "").lower()
                        prop = meta.get("property", "").lower()
                        if (name == "description" or prop == "og:description" or prop == "twitter:description") and meta.get("content"):
                            desc = meta.get("content").strip()
                            break
                result["description"] = desc if desc else "无描述"

            # 提取文本内容
            if extract_text:
                # 尝试移除脚本和样式
                for script in soup(["script", "style"]):
                    script.decompose()
                # 获取文本
                text = soup.get_text(separator="\n\n", strip=True)
                # 内容大小限制
                max_text_len = 10000
                if len(text) > max_text_len:
                    text = text[:max_text_len] + "..."
                result["text"] = text

                # 转换为Markdown
                markdown = markdownify.markdownify(response.text, heading_style="ATX")
                if len(markdown) > max_text_len:
                    markdown = markdown[:max_text_len] + "..."
                result["markdown"] = markdown

            # 提取图片
            if extract_images:
                images = set()
                for img in soup.find_all("img"):
                    if isinstance(img, Tag):
                        img_url = img.get("src")
                        if img_url:
                            img_url = urljoin(url, str(img_url))
                            images.add(img_url)
                result["images"] = list(images)

            result["success"] = True
        else:
            result["error"] = f"HTTP错误状态码: {response.status_code}"

    except requests.exceptions.Timeout:
        result["error"] = f"请求超时({TIMEOUT}秒)"
    except requests.exceptions.ConnectionError:
        result["error"] = "连接错误，可能是网络问题或服务器不可用"
    except requests.exceptions.RequestException as e:
        result["error"] = f"请求异常: {str(e)}"
    except Exception as e:
        result["error"] = f"未知错误: {str(e)}"

    return result


@mcp.tool()
def generate_web_intro(url: str, intro_length: int = 200, retry_count: int = 0) -> dict:
    """生成网页介绍

    参数:
        url: 网页URL
        intro_length: 介绍长度(字符数)
        retry_count: 当前重试次数(内部使用)

    返回:
        dict: 包含网页介绍的字典
            - success: 布尔值，表示是否成功生成介绍
            - intro: 网页介绍文本
            - error: 错误信息(如果success为False)
            - url: 请求的URL
    """
    result: dict = {
        "success": False,
        "url": url,
        "intro": None,
        "error": None
    }

    # 获取网页内容
    content_result = get_web_content.raw_call(url, extract_text=True, extract_metadata=True)

    if not content_result.get("success", False):
        # 重试逻辑
        if retry_count < MAX_RETRIES:
            time.sleep(1)  # 等待1秒后重试
            return generate_web_intro.raw_call(url, intro_length, retry_count + 1)
        else:
            result["error"] = f"获取网页内容失败: {content_result.get('error', '未知错误')}"
            return result

    # 生成介绍
    try:
        title = content_result.get("title", "无标题")
        description = content_result.get("description", "")
        text = content_result.get("text", "")

        # 构建介绍文本
        intro = f"{title} - "

        # 确保剩余长度不为负数
        remaining_length = max(intro_length - len(intro), 0)

        if description:
            intro += description[:remaining_length].strip()
        elif text:
            # 从文本中提取前remaining_length个字符
            intro += text[:remaining_length].strip()
        else:
            intro += "无法获取网页详细信息"

        # 确保介绍长度不超过intro_length
        if len(intro) > intro_length:
            intro = intro[:intro_length].rsplit(' ', 1)[0] + '...'

        result["intro"] = intro
        result["success"] = True

    except Exception as e:
        result["error"] = f"生成介绍时出错: {str(e)}"

    return result


if __name__ == "__main__":
    import os

    def run_server():
        """启动MCP服务"""
        mode = os.getenv("MCP_MODE", "").lower()
        if mode == "sse":
            mcp.run(transport="sse", port=8000, path="/sse")
        elif mode == "streamable-http":
            mcp.run(transport="streamable-http", port=8000, path="/mcp")
        else:
            mcp.run()  # 默认STDIO

    run_server()