"""外部工具集成服务"""

import asyncio
import time
from typing import Dict, List, Any, Optional, Tuple
import aiohttp
import requests
from bs4 import BeautifulSoup
from github import Github
from urllib.parse import urljoin, quote
import re

from config.settings import config
from core.base_tools import BaseExternalTool, SearchResult, ExternalToolCache
from core.mcp import MCPGitHubTool


class CairoDocsSearchTool(BaseExternalTool):
    """Cairo/Starknet 官方文档搜索工具"""
    
    def __init__(self, cache: ExternalToolCache):
        super().__init__(cache)
        self.docs_urls = [
            config.external_tools.cairo_docs_base,
            config.external_tools.starknet_docs_base
        ]
    
    async def search(self, query: str) -> List[SearchResult]:
        """搜索官方文档"""
        print(f"🔍 [Cairo文档搜索] 开始搜索: '{query}'")
        
        # 检查缓存
        cached_results = self.cache.get("cairo_docs", query)
        if cached_results:
            print(f"📋 [Cairo文档搜索] 从缓存获取结果: {len(cached_results)} 条")
            return cached_results
        
        results = []
        search_start_time = time.time()
        
        try:
            # 使用更短的超时时间，避免长时间等待
            timeout = min(self.timeout, 8)  # 最多8秒
            print(f"⏱️ [Cairo文档搜索] 设置超时时间: {timeout}秒")
            
            async with aiohttp.ClientSession(
                timeout=aiohttp.ClientTimeout(total=timeout, connect=3)
            ) as session:
                # 并行搜索Cairo和Starknet文档，每个搜索有独立的超时
                print(f"🚀 [Cairo文档搜索] 启动并行搜索 (Cairo + Starknet)")
                search_tasks = [
                    self._search_cairo_docs_with_timeout(session, query, timeout//2),
                    self._search_starknet_docs_with_timeout(session, query, timeout//2)
                ]
                
                try:
                    # 等待所有搜索完成，但不超过总超时时间
                    search_results = await asyncio.wait_for(
                        asyncio.gather(*search_tasks, return_exceptions=True),
                        timeout=timeout
                    )
                    
                    # 处理搜索结果
                    cairo_results = 0
                    starknet_results = 0
                    for i, result in enumerate(search_results):
                        if isinstance(result, list):
                            results.extend(result)
                            if i == 0:  # Cairo搜索结果
                                cairo_results = len(result)
                            else:  # Starknet搜索结果
                                starknet_results = len(result)
                        elif isinstance(result, Exception):
                            search_type = "Cairo" if i == 0 else "Starknet"
                            print(f"❌ [Cairo文档搜索] {search_type}搜索失败: {result}")
                    
                    print(f"📊 [Cairo文档搜索] 搜索结果统计 - Cairo: {cairo_results}条, Starknet: {starknet_results}条")
                            
                except asyncio.TimeoutError:
                    print(f"⏰ [Cairo文档搜索] 整体搜索超时 ({timeout}秒)")
                except Exception as e:
                    print(f"❌ [Cairo文档搜索] 搜索过程出错: {e}")
        
        except Exception as e:
            print(f"❌ [Cairo文档搜索] 初始化搜索失败: {e}")
        
        # 按相关性排序
        original_count = len(results)
        results.sort(key=lambda x: x.relevance_score, reverse=True)
        results = results[:self.max_results]
        
        search_duration = time.time() - search_start_time
        print(f"✅ [Cairo文档搜索] 搜索完成 - 耗时: {search_duration:.2f}秒, 原始结果: {original_count}条, 最终返回: {len(results)}条")
        
        if results:
            print(f"🏆 [Cairo文档搜索] 最佳结果: '{results[0].title}' (相关性: {results[0].relevance_score:.3f})")
        
        # 缓存结果
        self.cache.set("cairo_docs", query, results)
        print(f"💾 [Cairo文档搜索] 结果已缓存")
        
        return results
    
    async def _search_cairo_docs_with_timeout(self, session: aiohttp.ClientSession, query: str, timeout: int) -> List[SearchResult]:
        """带超时的Cairo文档搜索"""
        try:
            return await asyncio.wait_for(
                self._search_cairo_docs(session, query), 
                timeout=timeout
            )
        except asyncio.TimeoutError:
            print(f"Cairo docs search timeout after {timeout}s")
            return []
        except Exception as e:
            print(f"Cairo docs search error: {e}")
            return []
    
    async def _search_starknet_docs_with_timeout(self, session: aiohttp.ClientSession, query: str, timeout: int) -> List[SearchResult]:
        """带超时的Starknet文档搜索"""
        try:
            return await asyncio.wait_for(
                self._search_starknet_docs(session, query), 
                timeout=timeout
            )
        except asyncio.TimeoutError:
            print(f"Starknet docs search timeout after {timeout}s")
            return []
        except Exception as e:
            print(f"Starknet docs search error: {e}")
            return []
    
    async def _search_cairo_docs(self, session: aiohttp.ClientSession, query: str) -> List[SearchResult]:
        """搜索Cairo文档"""
        print(f"📖 [Cairo文档] 开始搜索Cairo官方文档")
        results = []
        
        # 预定义的Cairo文档页面，基于常见主题
        cairo_pages = [
            ("https://book.cairo-lang.org/ch01-00-getting-started.html", "Getting Started"),
            ("https://book.cairo-lang.org/ch02-00-common-programming-concepts.html", "Programming Concepts"),
            ("https://book.cairo-lang.org/ch03-00-common-collections.html", "Collections"),
            ("https://book.cairo-lang.org/ch04-00-understanding-ownership.html", "Ownership"),
            ("https://book.cairo-lang.org/ch05-00-using-structs-to-structure-related-data.html", "Structs"),
            ("https://book.cairo-lang.org/ch06-00-enums-and-pattern-matching.html", "Enums"),
            ("https://book.cairo-lang.org/ch07-00-managing-cairo-projects-with-packages-crates-and-modules.html", "Modules"),
            ("https://book.cairo-lang.org/ch08-00-generic-types-and-traits.html", "Generics and Traits"),
            ("https://book.cairo-lang.org/ch09-00-error-handling.html", "Error Handling"),
            ("https://book.cairo-lang.org/ch10-00-testing-cairo-programs.html", "Testing"),
            ("https://book.cairo-lang.org/ch11-00-advanced-features.html", "Advanced Features"),
            ("https://book.cairo-lang.org/ch12-00-a-simple-guessing-game.html", "Examples"),
        ]
        
        try:
            # 首先尝试预定义页面搜索
            print(f"📋 [Cairo文档] 搜索预定义页面 (前4个页面)")
            predefined_results = 0
            for url, title in cairo_pages[:4]:  # 限制搜索数量
                try:
                    print(f"🔗 [Cairo文档] 正在获取: {title}")
                    content = await self._fetch_page_content(session, url, query)
                    if content:
                        relevance = self._calculate_relevance(content, query)
                        print(f"📊 [Cairo文档] {title} - 相关性: {relevance:.3f}")
                        if relevance > 0.1:
                            results.append(SearchResult(
                                title=f"Cairo Book - {title}",
                                content=content,
                                url=url,
                                source="cairo_docs",
                                relevance_score=relevance,
                                metadata={"doc_type": "cairo", "search_method": "predefined"}
                            ))
                            predefined_results += 1
                            print(f"✅ [Cairo文档] 添加结果: {title}")
                        else:
                            print(f"⚠️ [Cairo文档] 相关性过低，跳过: {title}")
                    else:
                        print(f"❌ [Cairo文档] 无法获取内容: {title}")
                except Exception as e:
                    print(f"❌ [Cairo文档] 获取页面失败 {url}: {e}")
                    continue
            
            print(f"📈 [Cairo文档] 预定义页面搜索完成，找到 {predefined_results} 个相关结果")
            
            # 如果预定义页面搜索结果不足，尝试备选搜索
            if len(results) < 2:
                print(f"🔄 [Cairo文档] 结果不足，启动备选搜索")
                try:
                    fallback_results = await self._google_search_fallback(session, query, "book.cairo-lang.org")
                    if fallback_results:
                        results.extend(fallback_results)
                        print(f"✅ [Cairo文档] 备选搜索找到 {len(fallback_results)} 个结果")
                    else:
                        print(f"⚠️ [Cairo文档] 备选搜索未找到结果")
                except Exception as e:
                    print(f"❌ [Cairo文档] 备选搜索失败: {e}")
        
        except Exception as e:
            print(f"❌ [Cairo文档] 搜索过程出错: {e}")
        
        print(f"🏁 [Cairo文档] 搜索完成，共找到 {len(results)} 个结果")
        return results
    
    async def _search_starknet_docs(self, session: aiohttp.ClientSession, query: str) -> List[SearchResult]:
        """搜索Starknet文档"""
        print(f"🌐 [Starknet文档] 开始搜索Starknet官方文档")
        results = []
        
        # 预定义的Starknet文档页面，基于常见主题
        starknet_pages = [
            ("https://docs.starknet.io/documentation/getting_started/", "Getting Started"),
            ("https://docs.starknet.io/documentation/develop/", "Development Guide"),
            ("https://docs.starknet.io/documentation/architecture_and_concepts/", "Architecture & Concepts"),
            ("https://docs.starknet.io/documentation/tools/", "Tools & Resources"),
            ("https://docs.starknet.io/documentation/quick_start/", "Quick Start"),
            ("https://docs.starknet.io/documentation/develop/smart_contracts/", "Smart Contracts"),
            ("https://docs.starknet.io/documentation/develop/cairo/", "Cairo Development"),
            ("https://docs.starknet.io/documentation/architecture_and_concepts/accounts/", "Account Abstraction"),
        ]
        
        try:
            # 首先尝试预定义页面搜索
            print(f"📋 [Starknet文档] 搜索预定义页面 (前4个页面)")
            predefined_results = 0
            for url, title in starknet_pages[:4]:  # 限制搜索数量
                try:
                    print(f"🔗 [Starknet文档] 正在获取: {title}")
                    content = await self._fetch_page_content(session, url, query)
                    if content:
                        relevance = self._calculate_relevance(content, query)
                        print(f"📊 [Starknet文档] {title} - 相关性: {relevance:.3f}")
                        if relevance > 0.1:
                            results.append(SearchResult(
                                title=f"Starknet Docs - {title}",
                                content=content,
                                url=url,
                                source="starknet_docs",
                                relevance_score=relevance,
                                metadata={"doc_type": "starknet", "search_method": "predefined"}
                            ))
                            predefined_results += 1
                            print(f"✅ [Starknet文档] 添加结果: {title}")
                        else:
                            print(f"⚠️ [Starknet文档] 相关性过低，跳过: {title}")
                    else:
                        print(f"❌ [Starknet文档] 无法获取内容: {title}")
                except Exception as e:
                    print(f"❌ [Starknet文档] 获取页面失败 {url}: {e}")
                    continue
            
            print(f"📈 [Starknet文档] 预定义页面搜索完成，找到 {predefined_results} 个相关结果")
            
            # 如果预定义页面搜索结果不足，尝试备选搜索
            if len(results) < 2:
                print(f"🔄 [Starknet文档] 结果不足，启动备选搜索")
                try:
                    fallback_results = await self._google_search_fallback(session, query, "docs.starknet.io")
                    if fallback_results:
                        results.extend(fallback_results)
                        print(f"✅ [Starknet文档] 备选搜索找到 {len(fallback_results)} 个结果")
                    else:
                        print(f"⚠️ [Starknet文档] 备选搜索未找到结果")
                except Exception as e:
                    print(f"❌ [Starknet文档] 备选搜索失败: {e}")
        
        except Exception as e:
            print(f"❌ [Starknet文档] 搜索过程出错: {e}")
        
        print(f"🏁 [Starknet文档] 搜索完成，共找到 {len(results)} 个结果")
        return results
    
    async def _google_search_fallback(self, session: aiohttp.ClientSession, query: str, site: str) -> List[SearchResult]:
        """Google搜索备选方案，使用更可靠的搜索策略"""
        results = []
        
        # 构建搜索URL，使用更简单的查询格式
        search_queries = [
            f"{query} site:{site}",
            f"cairo {query} site:{site}",
            f"starknet {query} site:{site}"
        ]
        
        for search_query in search_queries[:1]:  # 只尝试第一个查询
            try:
                # 使用DuckDuckGo作为备选搜索引擎
                search_url = f"https://duckduckgo.com/html/?q={quote(search_query)}"
                
                async with session.get(
                    search_url, 
                    headers={
                        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36',
                        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                        'Accept-Language': 'en-US,en;q=0.5',
                        'Accept-Encoding': 'gzip, deflate',
                        'Connection': 'keep-alive',
                    },
                    timeout=aiohttp.ClientTimeout(total=5)
                ) as response:
                    if response.status == 200:
                        html = await response.text()
                        soup = BeautifulSoup(html, 'html.parser')
                        
                        # 解析DuckDuckGo搜索结果
                        for result_div in soup.find_all('div', class_='result')[:3]:
                            link_elem = result_div.find('a', class_='result__a')
                            if link_elem and link_elem.get('href'):
                                url = link_elem['href']
                                if site in url:
                                    title_elem = result_div.find('a', class_='result__a')
                                    title = title_elem.get_text() if title_elem else f"{site.title()} Documentation"
                                    
                                    # 获取页面内容
                                    content = await self._fetch_page_content(session, url, query)
                                    
                                    if content:
                                        source = "cairo_docs" if "cairo" in site else "starknet_docs"
                                        results.append(SearchResult(
                                            title=title,
                                            content=content,
                                            url=url,
                                            source=source,
                                            relevance_score=self._calculate_relevance(content, query),
                                            metadata={"doc_type": site.split('.')[0], "search_method": "duckduckgo"}
                                        ))
                        
                        if results:  # 如果找到结果就停止
                            break
                            
            except Exception as e:
                print(f"DuckDuckGo search error for {search_query}: {e}")
                continue
        
        return results
    
    async def _fetch_page_content(self, session: aiohttp.ClientSession, url: str, query: str) -> str:
        """获取页面内容"""
        try:
            # 设置更短的超时时间
            timeout = aiohttp.ClientTimeout(total=3, connect=1)
            async with session.get(
                url, 
                headers={
                    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36',
                    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
                    'Accept-Language': 'en-US,en;q=0.5',
                    'Connection': 'keep-alive',
                },
                timeout=timeout
            ) as response:
                if response.status == 200:
                    # 限制内容大小，避免处理过大的页面
                    content_length = response.headers.get('content-length')
                    if content_length and int(content_length) > 1024 * 1024:  # 1MB限制
                        print(f"Page too large, skipping: {url}")
                        return ""
                    
                    html = await response.text()
                    soup = BeautifulSoup(html, 'html.parser')
                    
                    # 移除脚本和样式标签
                    for script in soup(["script", "style", "nav", "footer", "header"]):
                        script.decompose()
                    
                    # 优先获取主要内容区域
                    main_content = None
                    for selector in ['main', 'article', '.content', '#content', '.main-content']:
                        main_content = soup.select_one(selector)
                        if main_content:
                            break
                    
                    # 如果找到主要内容区域，使用它；否则使用整个body
                    if main_content:
                        content = main_content.get_text()
                    else:
                        content = soup.get_text()
                    
                    content = self._clean_text(content)
                    
                    # 查找与查询相关的段落
                    relevant_content = self._extract_relevant_content(content, query)
                    return relevant_content
                else:
                    print(f"HTTP {response.status} for {url}")
                    
        except asyncio.TimeoutError:
            print(f"Timeout fetching content from {url}")
        except Exception as e:
            print(f"Error fetching page content: {e}")
        
        return ""
    
    def _extract_relevant_content(self, content: str, query: str) -> str:
        """提取相关内容"""
        if not content:
            return ""
        
        # 分割成段落
        paragraphs = content.split('\n')
        relevant_paragraphs = []
        
        query_words = query.lower().split()
        
        for paragraph in paragraphs:
            if len(paragraph.strip()) < 20:  # 跳过太短的段落
                continue
            
            paragraph_lower = paragraph.lower()
            score = sum(1 for word in query_words if word in paragraph_lower)
            
            if score > 0:
                relevant_paragraphs.append((paragraph.strip(), score))
        
        # 按相关性排序并取前几个段落
        relevant_paragraphs.sort(key=lambda x: x[1], reverse=True)
        
        result = ""
        for paragraph, _ in relevant_paragraphs[:3]:
            result += paragraph + "\n\n"
        
        return self._clean_text(result)
    
    def _calculate_relevance(self, content: str, query: str) -> float:
        """计算内容与查询的相关性"""
        if not content or not query:
            return 0.0
        
        content_lower = content.lower()
        query_lower = query.lower()
        
        # 分词处理
        query_words = [word.strip() for word in query_lower.split() if len(word.strip()) > 2]
        if not query_words:
            return 0.0
        
        # 计算不同类型的匹配
        exact_matches = 0
        partial_matches = 0
        total_score = 0.0
        
        for word in query_words:
            # 完全匹配（权重最高）
            if word in content_lower:
                exact_matches += 1
                total_score += 1.0
                
                # 检查是否在重要位置（标题、开头等）
                if content_lower.find(word) < 200:  # 在前200字符内
                    total_score += 0.5
            
            # 部分匹配
            elif any(word in content_word for content_word in content_lower.split()):
                partial_matches += 1
                total_score += 0.3
        
        # 计算最终相关性分数
        base_score = total_score / len(query_words)
        
        # 奖励多个关键词匹配
        if exact_matches > 1:
            base_score *= (1 + 0.1 * (exact_matches - 1))
        
        # 考虑内容长度（避免过短或过长的内容）
        content_length = len(content)
        if 100 <= content_length <= 5000:
            length_bonus = 1.0
        elif content_length < 100:
            length_bonus = 0.5
        else:
            length_bonus = 0.8
        
        return min(base_score * length_bonus, 1.0)


class GitHubSearchTool(BaseExternalTool):
    """GitHub 代码搜索工具"""
    
    def __init__(self, cache: ExternalToolCache):
        super().__init__(cache)
        self.github = Github(config.external_tools.github_token) if config.external_tools.github_token else None
        self.repos = config.external_tools.github_repos
    
    async def search(self, query: str) -> List[SearchResult]:
        """搜索GitHub代码"""
        print(f"🐙 [GitHub搜索] 开始搜索: '{query}'")
        
        # 检查缓存
        cached_results = self.cache.get("github", query)
        if cached_results:
            print(f"📋 [GitHub搜索] 从缓存获取结果: {len(cached_results)} 条")
            return cached_results
        
        results = []
        search_start_time = time.time()
        
        if not self.github:
            print("❌ [GitHub搜索] GitHub token未配置，跳过GitHub搜索")
            return results
        
        print(f"🔑 [GitHub搜索] GitHub API已配置，开始搜索")
        
        try:
            # 搜索代码
            print(f"💻 [GitHub搜索] 开始搜索代码...")
            code_results = await self._search_code(query)
            if code_results:
                results.extend(code_results)
                print(f"✅ [GitHub搜索] 代码搜索完成，找到 {len(code_results)} 个结果")
            else:
                print(f"⚠️ [GitHub搜索] 代码搜索未找到结果")
            
            # 搜索Issues（错误相关）
            error_keywords = ['error', 'bug', 'issue', 'problem', 'exception', 'fail']
            if any(word in query.lower() for word in error_keywords):
                print(f"🐛 [GitHub搜索] 检测到错误相关查询，开始搜索Issues...")
                issue_results = await self._search_issues(query)
                if issue_results:
                    results.extend(issue_results)
                    print(f"✅ [GitHub搜索] Issues搜索完成，找到 {len(issue_results)} 个结果")
                else:
                    print(f"⚠️ [GitHub搜索] Issues搜索未找到结果")
            else:
                print(f"ℹ️ [GitHub搜索] 非错误相关查询，跳过Issues搜索")
        
        except Exception as e:
            print(f"❌ [GitHub搜索] 搜索过程出错: {e}")
        
        # 按相关性排序
        original_count = len(results)
        results.sort(key=lambda x: x.relevance_score, reverse=True)
        results = results[:self.max_results]
        
        search_duration = time.time() - search_start_time
        print(f"✅ [GitHub搜索] 搜索完成 - 耗时: {search_duration:.2f}秒, 原始结果: {original_count}条, 最终返回: {len(results)}条")
        
        if results:
            print(f"🏆 [GitHub搜索] 最佳结果: '{results[0].title}' (相关性: {results[0].relevance_score:.3f})")
        
        # 缓存结果
        self.cache.set("github", query, results)
        print(f"💾 [GitHub搜索] 结果已缓存")
        
        return results
    
    async def _search_code(self, query: str) -> List[SearchResult]:
        """搜索代码"""
        results = []
        
        try:
            if not self.github:
                print("GitHub token not configured, skipping code search")
                return results
                
            # 构建搜索查询
            search_query = f"{query} language:cairo"
            
            # 在指定仓库中搜索
            for repo_name in self.repos[:2]:  # 限制搜索仓库数量
                try:
                    # 先验证仓库是否存在
                    repo = self.github.get_repo(repo_name)
                    print(f"Searching in repo: {repo_name}")
                    
                    contents = self.github.search_code(f"{search_query} repo:{repo_name}")
                    
                    for content in list(contents)[:3]:  # 每个仓库最多3个结果
                        try:
                            file_content = content.decoded_content.decode('utf-8')
                            relevant_lines = self._extract_relevant_code(file_content, query)
                            
                            if relevant_lines:
                                results.append(SearchResult(
                                    title=f"{content.name} - {repo_name}",
                                    content=relevant_lines,
                                    url=content.html_url,
                                    source="github_code",
                                    relevance_score=self._calculate_code_relevance(file_content, query),
                                    metadata={
                                        "repo": repo_name,
                                        "file_path": content.path,
                                        "file_type": "code"
                                    }
                                ))
                        except Exception as e:
                            print(f"Error processing file {content.name}: {e}")
                            continue
                            
                except Exception as e:
                    print(f"Error searching repo {repo_name}: {e}")
                    # 如果是404错误，跳过这个仓库继续搜索其他仓库
                    if "404" in str(e):
                        print(f"Repository {repo_name} not found, skipping...")
                    continue
        
        except Exception as e:
            print(f"GitHub code search error: {e}")
        
        return results
    
    async def _search_issues(self, query: str) -> List[SearchResult]:
        """搜索Issues"""
        results = []
        
        try:
            if not self.github:
                print("GitHub token not configured, skipping issues search")
                return results
                
            # 搜索Issues
            search_query = f"{query} is:issue"
            
            for repo_name in self.repos[:2]:
                try:
                    issues = self.github.search_issues(f"{search_query} repo:{repo_name}")
                    
                    for issue in list(issues)[:2]:  # 每个仓库最多2个Issue
                        body = issue.body or ""
                        content = f"{issue.title}\n\n{body[:300]}..."
                        
                        results.append(SearchResult(
                            title=f"Issue: {issue.title}",
                            content=self._clean_text(content),
                            url=issue.html_url,
                            source="github_issues",
                            relevance_score=self._calculate_relevance(content, query),
                            metadata={
                                "repo": repo_name,
                                "issue_number": issue.number,
                                "state": issue.state,
                                "file_type": "issue"
                            }
                        ))
                
                except Exception as e:
                    print(f"Error searching issues in {repo_name}: {e}")
                    continue
        
        except Exception as e:
            print(f"GitHub issues search error: {e}")
        
        return results
    
    def _extract_relevant_code(self, code: str, query: str) -> str:
        """提取相关代码片段"""
        lines = code.split('\n')
        relevant_lines = []
        query_words = query.lower().split()
        
        for i, line in enumerate(lines):
            line_lower = line.lower()
            if any(word in line_lower for word in query_words):
                # 包含上下文（前后各2行）
                start = max(0, i - 2)
                end = min(len(lines), i + 3)
                context = lines[start:end]
                relevant_lines.extend(context)
                relevant_lines.append("---")  # 分隔符
        
        result = '\n'.join(relevant_lines)
        return self._clean_text(result)
    
    def _calculate_code_relevance(self, code: str, query: str) -> float:
        """计算代码相关性"""
        if not code:
            return 0.0
        
        code_lower = code.lower()
        query_words = query.lower().split()
        
        score = 0.0
        for word in query_words:
            # 函数名、变量名匹配权重更高
            if f"fn {word}" in code_lower or f"struct {word}" in code_lower:
                score += 0.3
            elif word in code_lower:
                score += 0.1
        
        return min(score, 1.0)


class StackOverflowSearchTool(BaseExternalTool):
    """Stack Overflow 搜索工具"""
    
    def __init__(self, cache: ExternalToolCache):
        super().__init__(cache)
        self.tags = config.external_tools.stackoverflow_tags
        self.base_url = "https://api.stackexchange.com/2.3"
    
    async def search(self, query: str) -> List[SearchResult]:
        """搜索Stack Overflow"""
        # 检查缓存
        cached_results = self.cache.get("stackoverflow", query)
        if cached_results:
            return cached_results
        
        results = []
        
        try:
            async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=self.timeout)) as session:
                # 搜索问题
                questions = await self._search_questions(session, query)
                results.extend(questions)
        
        except Exception as e:
            print(f"Stack Overflow search error: {e}")
        
        # 按相关性排序
        results.sort(key=lambda x: x.relevance_score, reverse=True)
        results = results[:self.max_results]
        
        # 缓存结果
        self.cache.set("stackoverflow", query, results)
        
        return results
    
    async def _search_questions(self, session: aiohttp.ClientSession, query: str) -> List[SearchResult]:
        """搜索Stack Overflow问题"""
        results = []
        
        try:
            # 构建搜索URL
            tags = ";".join(self.tags)
            params = {
                "order": "desc",
                "sort": "relevance",
                "q": query,
                "tagged": tags,
                "site": "stackoverflow",
                "pagesize": self.max_results,
                "filter": "withbody"  # 包含问题内容
            }
            
            async with session.get("https://api.stackexchange.com/2.3/search/advanced", params=params) as response:
                if response.status == 200:
                    data = await response.json()
                    
                    for item in data.get("items", []):
                        try:
                            title = item.get('title', '')
                            body = item.get('body', '')
                            question_id = item.get('question_id')
                            
                            # 清理HTML标签
                            clean_body = BeautifulSoup(body, 'html.parser').get_text()
                            content = f"{title}\n\n{clean_body[:400]}..."
                            
                            results.append(SearchResult(
                                title=f"SO: {title}",
                                content=self._clean_text(content),
                                url=f"https://stackoverflow.com/questions/{question_id}",
                                source="stackoverflow",
                                relevance_score=self._calculate_so_relevance(item, query),
                                metadata={
                                    "type": "external",
                                    "question_id": question_id,
                                    "score": item.get('score', 0),
                                    "answer_count": item.get('answer_count', 0),
                                    "view_count": item.get('view_count', 0),
                                    "tags": item.get('tags', [])
                                }
                            ))
                        except Exception as e:
                            print(f"Error processing SO question: {e}")
                            continue
                else:
                    print(f"Stack Overflow API error: {response.status}")
        
        except Exception as e:
            print(f"Stack Overflow search error: {e}")
        
        return results
    
    def _calculate_so_relevance(self, item: Dict, query: str) -> float:
        """计算Stack Overflow相关性"""
        score = 0.0
        
        # 基于问题分数
        question_score = item.get('score', 0)
        score += min(question_score / 10.0, 0.3)
        
        # 基于回答数量
        answer_count = item.get('answer_count', 0)
        score += min(answer_count / 5.0, 0.2)
        
        # 基于标题匹配
        title = item.get('title', '').lower()
        query_words = query.lower().split()
        title_matches = sum(1 for word in query_words if word in title)
        score += (title_matches / len(query_words)) * 0.5
        
        return min(score, 1.0)


class ExternalToolsService:
    """外部工具服务管理器 - 支持传统工具和MCP工具的混合架构"""
    
    def __init__(self):
        self.cache = ExternalToolCache()
        
        # 传统自定义工具
        self.custom_tools = {
            'cairo_docs': CairoDocsSearchTool(self.cache),
            'stackoverflow': StackOverflowSearchTool(self.cache)
        }
        
        # MCP工具
        self.mcp_tools = {}
        self._init_mcp_tools()
        
        # 合并所有工具
        self.tools = {**self.custom_tools, **self.mcp_tools}
    
    def _init_mcp_tools(self):
        """初始化MCP工具"""
        try:
            # 使用MCP GitHub工具替代传统GitHub工具
            self.mcp_tools['github'] = MCPGitHubTool(self.cache)
            print("✅ [外部工具服务] MCP GitHub工具初始化成功")
        except Exception as e:
            print(f"⚠️ [外部工具服务] MCP GitHub工具初始化失败，回退到传统工具: {e}")
            # 如果MCP工具初始化失败，回退到传统工具
            self.custom_tools['github'] = GitHubSearchTool(self.cache)
    
    async def parallel_search(self, query: str, tool_names: List[str] = None) -> Dict[str, List[SearchResult]]:
        """并行搜索多个工具"""
        print(f"🚀 [外部工具服务] 开始并行搜索: '{query}'")
        search_start_time = time.time()
        
        if tool_names is None:
            tool_names = list(self.tools.keys())
        
        print(f"🔧 [外部工具服务] 请求的工具: {tool_names}")
        
        # 过滤可用工具
        available_tools = {name: tool for name, tool in self.tools.items() if name in tool_names}
        
        if not available_tools:
            print(f"❌ [外部工具服务] 没有可用的工具")
            return {}
        
        print(f"✅ [外部工具服务] 可用工具: {list(available_tools.keys())}")
        
        # 创建并行任务
        tasks = []
        for name, tool in available_tools.items():
            print(f"📋 [外部工具服务] 创建任务: {name}")
            task = asyncio.create_task(tool.search(query))
            tasks.append((name, task))
        
        print(f"⏱️ [外部工具服务] 设置超时时间: {config.external_tools.search_timeout}秒")
        
        # 等待所有任务完成
        results = {}
        completed_tools = 0
        failed_tools = 0
        
        for name, task in tasks:
            try:
                tool_start_time = time.time()
                tool_results = await asyncio.wait_for(task, timeout=config.external_tools.search_timeout)
                tool_duration = time.time() - tool_start_time
                results[name] = tool_results
                completed_tools += 1
                print(f"✅ [外部工具服务] {name} 搜索完成 - 耗时: {tool_duration:.2f}秒, 结果: {len(tool_results)}条")
            except asyncio.TimeoutError:
                print(f"⏰ [外部工具服务] {name} 搜索超时 ({config.external_tools.search_timeout}秒)")
                results[name] = []
                failed_tools += 1
            except Exception as e:
                print(f"❌ [外部工具服务] {name} 搜索失败: {e}")
                results[name] = []
                failed_tools += 1
        
        total_duration = time.time() - search_start_time
        total_results = sum(len(tool_results) for tool_results in results.values())
        
        print(f"🏁 [外部工具服务] 并行搜索完成 - 总耗时: {total_duration:.2f}秒")
        print(f"📊 [外部工具服务] 统计: 成功 {completed_tools} 个工具, 失败 {failed_tools} 个工具, 总结果 {total_results} 条")
        
        # 打印每个工具的结果统计
        for tool_name, tool_results in results.items():
            if tool_results:
                best_score = max(result.relevance_score for result in tool_results)
                print(f"🎯 [外部工具服务] {tool_name}: {len(tool_results)}条结果, 最高相关性: {best_score:.3f}")
            else:
                print(f"⚪ [外部工具服务] {tool_name}: 无结果")
        
        return results
    
    def merge_and_rank_results(self, all_results: Dict[str, List[SearchResult]], 
                              max_total: int = 10) -> List[SearchResult]:
        """合并和排序所有结果"""
        print(f"🔄 [结果处理] 开始合并和排序结果, 最大返回数量: {max_total}")
        
        merged_results = []
        tool_result_counts = {}
        
        for tool_name, results in all_results.items():
            tool_result_counts[tool_name] = len(results)
            print(f"📥 [结果处理] 处理 {tool_name} 的 {len(results)} 条结果")
            
            for result in results:
                # 根据来源调整权重
                weight = self._get_source_weight(result.source)
                original_score = result.relevance_score
                result.relevance_score *= weight
                print(f"⚖️ [结果处理] {tool_name} 结果权重调整: {original_score:.3f} -> {result.relevance_score:.3f} (权重: {weight})")
                merged_results.append(result)
        
        print(f"📊 [结果处理] 合并前总结果数: {len(merged_results)}")
        
        # 去重（基于URL）
        seen_urls = set()
        unique_results = []
        duplicate_count = 0
        
        for result in merged_results:
            if result.url not in seen_urls:
                seen_urls.add(result.url)
                unique_results.append(result)
            else:
                duplicate_count += 1
                print(f"🔄 [结果处理] 发现重复URL: {result.url}")
        
        print(f"🗑️ [结果处理] 去重完成: 移除 {duplicate_count} 个重复结果, 剩余 {len(unique_results)} 个")
        
        # 按相关性排序并限制数量
        unique_results.sort(key=lambda x: x.relevance_score, reverse=True)
        final_results = unique_results[:max_total]
        
        print(f"🏆 [结果处理] 最终结果: {len(final_results)} 条")
        
        # 打印最终结果的详细信息
        for i, result in enumerate(final_results, 1):
            print(f"🎯 [结果处理] 第{i}名: {result.title[:50]}... (相关性: {result.relevance_score:.3f}, 来源: {result.source})")
        
        return final_results
    
    def _get_source_weight(self, source: str) -> float:
        """获取来源权重"""
        weights = {
            'cairo_docs': 1.0,      # 官方文档权重最高
            'starknet_docs': 1.0,   # 官方文档权重最高
            'github_code': 0.8,     # 代码示例权重较高
            'github_issues': 0.6,   # Issues权重中等
            'stackoverflow': 0.7    # SO权重中等
        }
        return weights.get(source, 0.5)
    
    async def cleanup(self):
        """清理资源，特别是MCP工具"""
        print("🧹 [外部工具服务] 开始清理资源")
        
        # 清理MCP工具
        for tool_name, tool in self.mcp_tools.items():
            try:
                if hasattr(tool, 'cleanup'):
                    await tool.cleanup()
                    print(f"✅ [外部工具服务] {tool_name} MCP工具清理完成")
            except Exception as e:
                print(f"❌ [外部工具服务] {tool_name} MCP工具清理失败: {e}")
        
        print("🏁 [外部工具服务] 资源清理完成")
    
    def get_tool_info(self) -> Dict[str, Dict[str, Any]]:
        """获取工具信息"""
        info = {
            'custom_tools': list(self.custom_tools.keys()),
            'mcp_tools': list(self.mcp_tools.keys()),
            'total_tools': len(self.tools)
        }
        return info


# 全局外部工具服务实例
external_tools_service = ExternalToolsService()