#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
MCP 高级工具示例

本文件展示了如何创建高级的 MCP 工具，包括：
- 异步工具
- 网络请求工具
- 数据库操作工具
- 文件处理工具
- 图像处理工具
- 缓存和性能优化
"""

from mcp.server.fastmcp import FastMCP
from typing import List, Dict, Optional, Union, Any
import asyncio
import aiohttp
import sqlite3
import json
import os
import hashlib
import time
from functools import lru_cache, wraps
from datetime import datetime, timedelta
import tempfile
import zipfile
import csv
import io

# 创建 MCP 服务实例
mcp = FastMCP("Advanced Tools Server")

# ============================================================================
# 缓存装饰器
# ============================================================================

def timed_cache(seconds: int):
    """带时间过期的缓存装饰器"""
    def decorator(func):
        cache = {}
        
        @wraps(func)
        def wrapper(*args, **kwargs):
            # 创建缓存键
            key = str(args) + str(sorted(kwargs.items()))
            key_hash = hashlib.md5(key.encode()).hexdigest()
            
            # 检查缓存
            if key_hash in cache:
                result, timestamp = cache[key_hash]
                if time.time() - timestamp < seconds:
                    return f"[缓存] {result}"
            
            # 执行函数并缓存结果
            result = func(*args, **kwargs)
            cache[key_hash] = (result, time.time())
            
            # 清理过期缓存
            current_time = time.time()
            expired_keys = [
                k for k, (_, ts) in cache.items() 
                if current_time - ts >= seconds
            ]
            for k in expired_keys:
                del cache[k]
            
            return result
        return wrapper
    return decorator

# ============================================================================
# 异步网络工具
# ============================================================================

@mcp.tool()
async def fetch_url_content(url: str, timeout: int = 10, headers: Optional[str] = None) -> str:
    """异步获取网页内容
    
    Args:
        url: 目标URL
        timeout: 超时时间（秒）
        headers: JSON格式的请求头
        
    Returns:
        网页内容信息
    """
    try:
        # 解析headers
        request_headers = {}
        if headers:
            request_headers = json.loads(headers)
        
        async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=timeout)) as session:
            async with session.get(url, headers=request_headers) as response:
                content = await response.text()
                
                result = f"URL: {url}\n"
                result += f"状态码: {response.status}\n"
                result += f"内容类型: {response.headers.get('content-type', '未知')}\n"
                result += f"内容长度: {len(content)} 字符\n"
                result += f"响应头: {dict(response.headers)}\n\n"
                result += f"内容预览 (前500字符):\n{content[:500]}..."
                
                return result
    except aiohttp.ClientError as e:
        return f"网络请求失败: {str(e)}"
    except json.JSONDecodeError:
        return "请求头格式错误，请提供有效的JSON格式"
    except Exception as e:
        return f"获取内容失败: {str(e)}"

@mcp.tool()
async def batch_url_checker(urls: List[str], timeout: int = 5) -> str:
    """批量检查URL状态
    
    Args:
        urls: URL列表
        timeout: 每个请求的超时时间
        
    Returns:
        所有URL的状态信息
    """
    async def check_url(session, url):
        try:
            async with session.head(url, timeout=aiohttp.ClientTimeout(total=timeout)) as response:
                return {
                    "url": url,
                    "status": response.status,
                    "accessible": True,
                    "error": None
                }
        except Exception as e:
            return {
                "url": url,
                "status": None,
                "accessible": False,
                "error": str(e)
            }
    
    try:
        async with aiohttp.ClientSession() as session:
            tasks = [check_url(session, url) for url in urls]
            results = await asyncio.gather(*tasks)
        
        # 格式化结果
        output = f"批量URL检查结果 (共 {len(urls)} 个):\n\n"
        
        accessible_count = 0
        for result in results:
            status_icon = "✓" if result["accessible"] else "✗"
            status_code = result["status"] or "N/A"
            
            output += f"{status_icon} {result['url']} - 状态码: {status_code}\n"
            if result["error"]:
                output += f"   错误: {result['error']}\n"
            
            if result["accessible"]:
                accessible_count += 1
        
        output += f"\n总结: {accessible_count}/{len(urls)} 个URL可访问"
        return output
        
    except Exception as e:
        return f"批量检查失败: {str(e)}"

@mcp.tool()
async def api_request(url: str, method: str = "GET", data: Optional[str] = None, 
                     headers: Optional[str] = None, timeout: int = 10) -> str:
    """通用API请求工具
    
    Args:
        url: API端点URL
        method: HTTP方法 (GET, POST, PUT, DELETE)
        data: 请求数据 (JSON格式)
        headers: 请求头 (JSON格式)
        timeout: 超时时间
        
    Returns:
        API响应信息
    """
    try:
        # 解析参数
        request_headers = json.loads(headers) if headers else {}
        request_data = json.loads(data) if data else None
        
        async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=timeout)) as session:
            async with session.request(
                method.upper(),
                url,
                json=request_data,
                headers=request_headers
            ) as response:
                response_text = await response.text()
                
                # 尝试解析JSON响应
                try:
                    response_json = json.loads(response_text)
                    formatted_response = json.dumps(response_json, indent=2, ensure_ascii=False)
                except json.JSONDecodeError:
                    formatted_response = response_text
                
                result = f"API请求结果:\n"
                result += f"URL: {url}\n"
                result += f"方法: {method.upper()}\n"
                result += f"状态码: {response.status}\n"
                result += f"响应头: {dict(response.headers)}\n\n"
                result += f"响应内容:\n{formatted_response[:1000]}..."
                
                return result
                
    except json.JSONDecodeError as e:
        return f"JSON解析错误: {str(e)}"
    except aiohttp.ClientError as e:
        return f"请求失败: {str(e)}"
    except Exception as e:
        return f"API请求失败: {str(e)}"

# ============================================================================
# 数据库操作工具
# ============================================================================

@mcp.tool()
def create_sqlite_db(db_path: str, tables: str) -> str:
    """创建SQLite数据库
    
    Args:
        db_path: 数据库文件路径
        tables: 表定义SQL (JSON格式的表名到SQL的映射)
        
    Returns:
        创建结果
    """
    try:
        # 解析表定义
        table_definitions = json.loads(tables)
        
        conn = sqlite3.connect(db_path)
        cursor = conn.cursor()
        
        created_tables = []
        for table_name, sql in table_definitions.items():
            cursor.execute(sql)
            created_tables.append(table_name)
        
        conn.commit()
        conn.close()
        
        return f"数据库创建成功: {db_path}\n创建的表: {', '.join(created_tables)}"
        
    except json.JSONDecodeError:
        return "表定义格式错误，请提供有效的JSON格式"
    except sqlite3.Error as e:
        return f"数据库操作失败: {str(e)}"
    except Exception as e:
        return f"创建数据库失败: {str(e)}"

@mcp.tool()
def execute_sql_query(db_path: str, query: str, params: Optional[str] = None) -> str:
    """执行SQL查询
    
    Args:
        db_path: 数据库文件路径
        query: SQL查询语句
        params: 查询参数 (JSON数组格式)
        
    Returns:
        查询结果
    """
    try:
        if not os.path.exists(db_path):
            return f"数据库文件不存在: {db_path}"
        
        # 解析参数
        query_params = json.loads(params) if params else []
        
        conn = sqlite3.connect(db_path)
        conn.row_factory = sqlite3.Row  # 返回字典格式的行
        cursor = conn.cursor()
        
        cursor.execute(query, query_params)
        
        if query.strip().upper().startswith('SELECT'):
            rows = cursor.fetchall()
            
            if not rows:
                result = "查询结果为空"
            else:
                # 格式化结果
                headers = list(rows[0].keys())
                result = f"查询结果 (共 {len(rows)} 行):\n\n"
                
                # 表头
                result += " | ".join(headers) + "\n"
                result += "-" * (len(" | ".join(headers))) + "\n"
                
                # 数据行
                for row in rows[:50]:  # 限制显示行数
                    result += " | ".join(str(row[h]) for h in headers) + "\n"
                
                if len(rows) > 50:
                    result += f"\n... (仅显示前50行，总共 {len(rows)} 行)"
        else:
            conn.commit()
            result = f"SQL执行成功，影响行数: {cursor.rowcount}"
        
        conn.close()
        return result
        
    except json.JSONDecodeError:
        return "参数格式错误，请提供有效的JSON数组格式"
    except sqlite3.Error as e:
        return f"SQL执行失败: {str(e)}"
    except Exception as e:
        return f"数据库操作失败: {str(e)}"

@mcp.tool()
def import_csv_to_db(db_path: str, table_name: str, csv_data: str, 
                    create_table: bool = True) -> str:
    """将CSV数据导入数据库
    
    Args:
        db_path: 数据库文件路径
        table_name: 目标表名
        csv_data: CSV格式的数据
        create_table: 是否自动创建表
        
    Returns:
        导入结果
    """
    try:
        # 解析CSV数据
        csv_reader = csv.DictReader(io.StringIO(csv_data))
        rows = list(csv_reader)
        
        if not rows:
            return "CSV数据为空"
        
        conn = sqlite3.connect(db_path)
        cursor = conn.cursor()
        
        # 获取列名
        columns = list(rows[0].keys())
        
        if create_table:
            # 自动创建表（所有列都是TEXT类型）
            columns_def = ", ".join([f"{col} TEXT" for col in columns])
            create_sql = f"CREATE TABLE IF NOT EXISTS {table_name} ({columns_def})"
            cursor.execute(create_sql)
        
        # 插入数据
        placeholders = ", ".join(["?" for _ in columns])
        insert_sql = f"INSERT INTO {table_name} ({', '.join(columns)}) VALUES ({placeholders})"
        
        inserted_count = 0
        for row in rows:
            values = [row[col] for col in columns]
            cursor.execute(insert_sql, values)
            inserted_count += 1
        
        conn.commit()
        conn.close()
        
        result = f"CSV导入成功:\n"
        result += f"数据库: {db_path}\n"
        result += f"表名: {table_name}\n"
        result += f"导入行数: {inserted_count}\n"
        result += f"列: {', '.join(columns)}"
        
        return result
        
    except csv.Error as e:
        return f"CSV解析失败: {str(e)}"
    except sqlite3.Error as e:
        return f"数据库操作失败: {str(e)}"
    except Exception as e:
        return f"导入失败: {str(e)}"

# ============================================================================
# 文件处理工具
# ============================================================================

@mcp.tool()
def create_zip_archive(files: List[str], archive_path: str) -> str:
    """创建ZIP压缩包
    
    Args:
        files: 要压缩的文件路径列表
        archive_path: 压缩包输出路径
        
    Returns:
        压缩结果
    """
    try:
        with zipfile.ZipFile(archive_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
            added_files = []
            total_size = 0
            
            for file_path in files:
                if os.path.exists(file_path):
                    if os.path.isfile(file_path):
                        # 添加文件
                        arcname = os.path.basename(file_path)
                        zipf.write(file_path, arcname)
                        added_files.append(file_path)
                        total_size += os.path.getsize(file_path)
                    elif os.path.isdir(file_path):
                        # 添加目录中的所有文件
                        for root, dirs, filenames in os.walk(file_path):
                            for filename in filenames:
                                full_path = os.path.join(root, filename)
                                arcname = os.path.relpath(full_path, os.path.dirname(file_path))
                                zipf.write(full_path, arcname)
                                added_files.append(full_path)
                                total_size += os.path.getsize(full_path)
        
        archive_size = os.path.getsize(archive_path)
        compression_ratio = (1 - archive_size / total_size) * 100 if total_size > 0 else 0
        
        result = f"ZIP压缩包创建成功:\n"
        result += f"压缩包路径: {archive_path}\n"
        result += f"包含文件数: {len(added_files)}\n"
        result += f"原始大小: {total_size} 字节\n"
        result += f"压缩后大小: {archive_size} 字节\n"
        result += f"压缩率: {compression_ratio:.1f}%\n\n"
        result += "包含的文件:\n"
        for file_path in added_files[:20]:  # 限制显示文件数
            result += f"  - {file_path}\n"
        
        if len(added_files) > 20:
            result += f"  ... (还有 {len(added_files) - 20} 个文件)"
        
        return result
        
    except Exception as e:
        return f"创建压缩包失败: {str(e)}"

@mcp.tool()
def extract_zip_archive(archive_path: str, extract_to: str) -> str:
    """解压ZIP压缩包
    
    Args:
        archive_path: 压缩包路径
        extract_to: 解压目标目录
        
    Returns:
        解压结果
    """
    try:
        if not os.path.exists(archive_path):
            return f"压缩包不存在: {archive_path}"
        
        os.makedirs(extract_to, exist_ok=True)
        
        with zipfile.ZipFile(archive_path, 'r') as zipf:
            # 获取压缩包信息
            file_list = zipf.namelist()
            
            # 解压所有文件
            zipf.extractall(extract_to)
            
            # 计算解压后的总大小
            total_size = 0
            for file_name in file_list:
                file_path = os.path.join(extract_to, file_name)
                if os.path.isfile(file_path):
                    total_size += os.path.getsize(file_path)
        
        result = f"ZIP解压成功:\n"
        result += f"压缩包: {archive_path}\n"
        result += f"解压到: {extract_to}\n"
        result += f"解压文件数: {len(file_list)}\n"
        result += f"解压后大小: {total_size} 字节\n\n"
        result += "解压的文件:\n"
        
        for file_name in file_list[:20]:  # 限制显示文件数
            result += f"  - {file_name}\n"
        
        if len(file_list) > 20:
            result += f"  ... (还有 {len(file_list) - 20} 个文件)"
        
        return result
        
    except zipfile.BadZipFile:
        return f"无效的ZIP文件: {archive_path}"
    except Exception as e:
        return f"解压失败: {str(e)}"

@mcp.tool()
def file_search(directory: str, pattern: str, content_search: Optional[str] = None,
               max_results: int = 50) -> str:
    """文件搜索工具
    
    Args:
        directory: 搜索目录
        pattern: 文件名模式 (支持通配符)
        content_search: 文件内容搜索关键词
        max_results: 最大结果数
        
    Returns:
        搜索结果
    """
    try:
        import fnmatch
        
        if not os.path.exists(directory):
            return f"目录不存在: {directory}"
        
        matches = []
        
        for root, dirs, files in os.walk(directory):
            for file in files:
                # 文件名匹配
                if fnmatch.fnmatch(file, pattern):
                    file_path = os.path.join(root, file)
                    match_info = {
                        "path": file_path,
                        "name": file,
                        "size": os.path.getsize(file_path),
                        "modified": datetime.fromtimestamp(os.path.getmtime(file_path)),
                        "content_match": False
                    }
                    
                    # 内容搜索
                    if content_search:
                        try:
                            with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                                content = f.read()
                                if content_search.lower() in content.lower():
                                    match_info["content_match"] = True
                                    # 找到匹配行
                                    lines = content.split('\n')
                                    matching_lines = [
                                        (i+1, line.strip()) for i, line in enumerate(lines)
                                        if content_search.lower() in line.lower()
                                    ]
                                    match_info["matching_lines"] = matching_lines[:5]  # 最多5行
                        except:
                            pass  # 忽略无法读取的文件
                    
                    # 如果指定了内容搜索，只返回内容匹配的文件
                    if not content_search or match_info["content_match"]:
                        matches.append(match_info)
                    
                    if len(matches) >= max_results:
                        break
            
            if len(matches) >= max_results:
                break
        
        if not matches:
            search_desc = f"文件名模式 '{pattern}'"
            if content_search:
                search_desc += f" 且内容包含 '{content_search}'"
            return f"未找到匹配 {search_desc} 的文件"
        
        result = f"文件搜索结果 (目录: {directory}):\n"
        result += f"搜索模式: {pattern}\n"
        if content_search:
            result += f"内容关键词: {content_search}\n"
        result += f"找到 {len(matches)} 个文件\n\n"
        
        for i, match in enumerate(matches, 1):
            result += f"{i}. {match['name']}\n"
            result += f"   路径: {match['path']}\n"
            result += f"   大小: {match['size']} 字节\n"
            result += f"   修改时间: {match['modified'].strftime('%Y-%m-%d %H:%M:%S')}\n"
            
            if content_search and match["content_match"] and "matching_lines" in match:
                result += "   匹配行:\n"
                for line_num, line_content in match["matching_lines"]:
                    result += f"     {line_num}: {line_content[:100]}...\n"
            
            result += "\n"
        
        return result
        
    except Exception as e:
        return f"搜索失败: {str(e)}"

# ============================================================================
# 性能监控工具
# ============================================================================

@mcp.tool()
@timed_cache(60)  # 缓存60秒
def system_performance() -> str:
    """系统性能监控
    
    Returns:
        系统性能信息
    """
    try:
        import psutil
        
        # CPU信息
        cpu_percent = psutil.cpu_percent(interval=1)
        cpu_count = psutil.cpu_count()
        
        # 内存信息
        memory = psutil.virtual_memory()
        
        # 磁盘信息
        disk = psutil.disk_usage('/')
        
        # 网络信息
        network = psutil.net_io_counters()
        
        result = "系统性能监控:\n\n"
        
        # CPU
        result += f"CPU:\n"
        result += f"  使用率: {cpu_percent}%\n"
        result += f"  核心数: {cpu_count}\n\n"
        
        # 内存
        result += f"内存:\n"
        result += f"  总量: {memory.total // (1024**3)} GB\n"
        result += f"  已用: {memory.used // (1024**3)} GB ({memory.percent}%)\n"
        result += f"  可用: {memory.available // (1024**3)} GB\n\n"
        
        # 磁盘
        result += f"磁盘:\n"
        result += f"  总量: {disk.total // (1024**3)} GB\n"
        result += f"  已用: {disk.used // (1024**3)} GB ({disk.used/disk.total*100:.1f}%)\n"
        result += f"  可用: {disk.free // (1024**3)} GB\n\n"
        
        # 网络
        result += f"网络 (累计):\n"
        result += f"  发送: {network.bytes_sent // (1024**2)} MB\n"
        result += f"  接收: {network.bytes_recv // (1024**2)} MB\n"
        
        return result
        
    except ImportError:
        return "需要安装 psutil 库: pip install psutil"
    except Exception as e:
        return f"获取性能信息失败: {str(e)}"

@mcp.tool()
def benchmark_tool(operation: str, iterations: int = 1000) -> str:
    """性能基准测试工具
    
    Args:
        operation: 测试操作 (cpu, memory, disk, network)
        iterations: 迭代次数
        
    Returns:
        基准测试结果
    """
    try:
        start_time = time.time()
        
        if operation == "cpu":
            # CPU密集型测试
            total = 0
            for i in range(iterations):
                total += sum(j * j for j in range(100))
            
        elif operation == "memory":
            # 内存测试
            data = []
            for i in range(iterations):
                data.append([j for j in range(100)])
            
        elif operation == "disk":
            # 磁盘I/O测试
            with tempfile.NamedTemporaryFile(mode='w+', delete=True) as f:
                for i in range(iterations):
                    f.write(f"测试数据 {i}\n")
                    f.flush()
                f.seek(0)
                content = f.read()
            
        elif operation == "string":
            # 字符串操作测试
            text = "Hello, World! " * 100
            for i in range(iterations):
                result = text.upper().lower().replace("Hello", "Hi")
        
        else:
            return f"不支持的测试操作: {operation}"
        
        end_time = time.time()
        duration = end_time - start_time
        
        result = f"基准测试结果:\n"
        result += f"操作类型: {operation}\n"
        result += f"迭代次数: {iterations}\n"
        result += f"总耗时: {duration:.4f} 秒\n"
        result += f"平均耗时: {duration/iterations*1000:.4f} 毫秒/次\n"
        result += f"操作频率: {iterations/duration:.2f} 次/秒\n"
        
        return result
        
    except Exception as e:
        return f"基准测试失败: {str(e)}"

# ============================================================================
# 启动服务
# ============================================================================

if __name__ == "__main__":
    mcp.run(transport="stdio")