"""增强的多模态工具模块 - 用于处理图像、表格和其他多媒体内容识别"""
from typing import List, Dict, Any, Optional, Union, Tuple, Callable, Awaitable
import re
import json
import base64
import requests
import logging
import hashlib
import time
from urllib.parse import urljoin, urlparse
from langchain.tools import BaseTool
from src.config.settings import settings

# 配置日志
logger = logging.getLogger(__name__)

# 简单的内存缓存
_search_cache = {}
_cache_ttl = 3600  # 缓存1小时

def _get_cache_key(query: str) -> str:
    """生成查询的缓存键"""
    return hashlib.md5(query.encode()).hexdigest()

def _get_from_cache(query: str) -> Optional[Dict[str, Any]]:
    """从缓存中获取结果"""
    cache_key = _get_cache_key(query)
    if cache_key in _search_cache:
        cached_item = _search_cache[cache_key]
        # 检查是否过期
        if time.time() - cached_item['timestamp'] < _cache_ttl:
            return cached_item['data']
        else:
            # 删除过期缓存
            del _search_cache[cache_key]
    return None

def _save_to_cache(query: str, data: Dict[str, Any]):
    """保存结果到缓存"""
    cache_key = _get_cache_key(query)
    _search_cache[cache_key] = {
        'timestamp': time.time(),
        'data': data
    }

class EnhancedMultimodalProcessorTool(BaseTool):
    """增强的多模态处理器工具 - 用于处理网页中的图像、表格和其他多媒体数据"""
    
    name: str = "enhanced_multimodal_processor"
    description: str = """
    处理网页内容中的多模态数据，包括：
    - 图像提取、描述和内容分析
    - 表格数据提取、结构化和摘要
    - 图表和可视化内容解析
    - 视频缩略图和元数据提取
    - 嵌入式内容识别
    - 文档图像内容提取
    """
    
    def _run(self, content: str, url: Optional[str] = None) -> Dict[str, Any]:
        """处理内容中的多模态数据"""
        result = {
            "images": [],
            "tables": [],
            "charts": [],
            "videos": [],
            "embedded_content": [],
            "document_images": [],  # 新增文档图像内容
            "ocr_content": []       # 新增OCR提取内容
        }
        
        # 只有在启用多模态功能时才处理
        if not settings.MULTIMODAL_ENABLED:
            return result
            
        try:
            # 处理各种多模态内容
            if settings.MULTIMODAL_IMAGE_PROCESSING:
                result["images"] = self._extract_images(content, base_url=url)
                
            if settings.MULTIMODAL_TABLE_PROCESSING:
                result["tables"] = self._extract_tables(content)
                
            if getattr(settings, "MULTIMODAL_CHART_PROCESSING", False):
                result["charts"] = self._extract_charts(content)
                
            if getattr(settings, "MULTIMODAL_VIDEO_PROCESSING", False):
                result["videos"] = self._extract_videos(content)
                
            if getattr(settings, "MULTIMODAL_EMBEDDED_PROCESSING", False):
                result["embedded_content"] = self._extract_embedded_content(content)
                
            # 增强内容分析
            self._enhance_content_analysis(result)
            
        except Exception as e:
            logger.error(f"多模态处理出错: {str(e)}")
            # 即使出错也返回已处理的内容
        
        return result
    
    async def _arun(self, content: str, url: Optional[str] = None) -> Dict[str, Any]:
        """异步处理内容中的多模态数据"""
        return self._run(content, url)
        
    def _extract_images(self, content: str, base_url: Optional[str] = None) -> List[Dict[str, Any]]:
        """
        增强版图像提取 - 从内容中提取图像信息
        
        Args:
            content: HTML内容
            base_url: 基础URL，用于解析相对路径
            
        Returns:
            包含图像URL、描述和元数据的列表
        """
        images = []
        
        # 查找常见的图像标签模式
        img_patterns = [
            r'<img[^>]*src=["\']([^"\']*)["\'][^>]*alt=["\']([^"\']*)["\'][^>]*>',
            r'<img[^>]*alt=["\']([^"\']*)["\'][^>]*src=["\']([^"\']*)["\'][^>]*>',
            r'<img[^>]*src=["\']([^"\']*)["\'][^>]*>'
        ]
        
        for pattern in img_patterns:
            matches = re.findall(pattern, content, re.IGNORECASE)
            for match in matches:
                if len(match) == 2:
                    # 同时有URL和alt文本
                    url, alt = match
                    if len(match) == 2 and pattern.find("alt=") < pattern.find("src="):
                        # 如果alt在src之前，需要交换位置
                        alt, url = match
                else:
                    # 只有URL
                    url = match[0]
                    alt = "无描述"
                
                # 处理相对URL
                if base_url and not self._is_absolute_url(url):
                    url = urljoin(base_url, url)
                
                # 提取图像尺寸信息
                width, height = self._extract_image_dimensions(content, url)
                
                # 提取图像周围的上下文
                context = self._extract_image_context(content, url)
                
                images.append({
                    "url": url,
                    "description": alt if alt else "无描述",
                    "dimensions": {"width": width, "height": height} if width and height else None,
                    "context": context,
                    "type": self._guess_image_type(url)
                })
        
        # 去重
        unique_images = []
        seen_urls = set()
        for img in images:
            if img["url"] not in seen_urls:
                unique_images.append(img)
                seen_urls.add(img["url"])
        
        # 过滤掉小图标和装饰性图像
        filtered_images = self._filter_decorative_images(unique_images)
        
        # 根据配置限制返回的图像数量
        max_images = getattr(settings, "MAX_IMAGES_PER_SEARCH", 5)
        return filtered_images[:max_images]
    
    def _is_absolute_url(self, url: str) -> bool:
        """检查URL是否为绝对URL"""
        return bool(urlparse(url).netloc)
    
    def _extract_image_dimensions(self, content: str, image_url: str) -> Tuple[Optional[int], Optional[int]]:
        """提取图像尺寸信息"""
        # 尝试从img标签的width和height属性中提取
        escaped_url = re.escape(image_url)
        pattern = rf'<img[^>]*src=["\'{escaped_url}"\'"][^>]*(?:width=["\'](\d+)["\'][^>]*height=["\'](\d+)["\']|height=["\'](\d+)["\'][^>]*width=["\'](\d+)["\'])[^>]*>'
        match = re.search(pattern, content, re.IGNORECASE)
        
        if match:
            groups = match.groups()
            if groups[0] is not None and groups[1] is not None:
                return int(groups[0]), int(groups[1])
            elif groups[2] is not None and groups[3] is not None:
                return int(groups[3]), int(groups[2])
        
        # 如果没有明确的尺寸属性，返回None
        return None, None
    
    def _extract_image_context(self, content: str, image_url: str) -> Optional[str]:
        """提取图像周围的上下文（如标题、说明文字等）"""
        escaped_url = re.escape(image_url)
        
        # 查找图像所在的figure标签
        figure_pattern = rf'<figure[^>]*>.*?<img[^>]*src=["\'{escaped_url}"\'"].*?<figcaption[^>]*>(.*?)</figcaption>.*?</figure>'
        figure_match = re.search(figure_pattern, content, re.IGNORECASE | re.DOTALL)
        if figure_match:
            return figure_match.group(1).strip()
        
        # 查找图像附近的标题
        nearby_heading_pattern = rf'<h[1-6][^>]*>(.*?)</h[1-6]>\s*(?:<[^>]*>\s*)*<img[^>]*src=["\'{escaped_url}"\'"]'
        heading_match = re.search(nearby_heading_pattern, content, re.IGNORECASE | re.DOTALL)
        if heading_match:
            return heading_match.group(1).strip()
        
        return None
    
    def _guess_image_type(self, url: str) -> str:
        """根据URL猜测图像类型"""
        url_lower = url.lower()
        if url_lower.endswith('.jpg') or url_lower.endswith('.jpeg'):
            return "照片"
        elif url_lower.endswith('.png'):
            return "图形"
        elif url_lower.endswith('.gif'):
            return "动图"
        elif url_lower.endswith('.svg'):
            return "矢量图"
        elif url_lower.endswith('.webp'):
            return "WebP图像"
        elif "chart" in url_lower or "graph" in url_lower:
            return "图表"
        elif "icon" in url_lower or "logo" in url_lower:
            return "图标/Logo"
        else:
            return "图像"
    
    def _filter_decorative_images(self, images: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """过滤掉小图标和装饰性图像"""
        filtered = []
        for img in images:
            # 跳过明显的小图标
            if img["type"] == "图标/Logo":
                dimensions = img.get("dimensions")
                if dimensions and dimensions.get("width") and dimensions.get("height"):
                    if dimensions["width"] < 50 and dimensions["height"] < 50:
                        continue
            
            # 跳过URL中明显是装饰性的图像
            url_lower = img["url"].lower()
            if any(term in url_lower for term in ["background", "bg-", "decoration", "separator", "divider", "spacer"]):
                continue
                
            filtered.append(img)
        
        return filtered
    
    def _extract_tables(self, content: str) -> List[Dict[str, Any]]:
        """
        增强版表格提取 - 从内容中提取表格信息
        
        Args:
            content: HTML内容
            
        Returns:
            包含表格标题、结构和摘要的列表
        """
        tables = []
        
        # 查找表格标签
        table_pattern = r'<table[^>]*>(.*?)</table>'
        table_matches = re.findall(table_pattern, content, re.IGNORECASE | re.DOTALL)
        
        for i, table_content in enumerate(table_matches):
            # 如果超过配置的最大表格数量，则停止处理
            max_tables = getattr(settings, "MAX_TABLES_PER_SEARCH", 3)
            if len(tables) >= max_tables:
                break
                
            # 尝试找到表格的标题或说明
            title = f"表格 {i+1}"
            
            # 查找附近的标题标签
            title_patterns = [
                rf'<h[1-6][^>]*>\s*([^<]*表格[^<]*)\s*</h[1-6]>\s*<table[^>]*>{re.escape(table_content)}</table>',
                rf'<table[^>]*>{re.escape(table_content)}</table>\s*<caption[^>]*>(.*?)</caption>',
                rf'<caption[^>]*>(.*?)</caption>\s*<table[^>]*>{re.escape(table_content)}</table>'
            ]
            
            for pattern in title_patterns:
                title_match = re.search(pattern, content, re.IGNORECASE | re.DOTALL)
                if title_match:
                    title = title_match.group(1).strip()
                    break
            
            # 提取表格结构
            structure = self._extract_table_structure(table_content)
            
            # 生成表格摘要
            summary = self._generate_table_summary(structure)
            
            tables.append({
                "title": title,
                "structure": structure,
                "summary": summary,
                "content_preview": table_content[:200] + "..." if len(table_content) > 200 else table_content,
                "row_count": len(structure["rows"]) if structure and "rows" in structure else 0,
                "column_count": len(structure["headers"]) if structure and "headers" in structure else 0
            })
        
        return tables
    
    def _extract_table_structure(self, table_content: str) -> Dict[str, Any]:
        """提取表格结构"""
        structure = {
            "headers": [],
            "rows": []
        }
        
        # 提取表头
        header_pattern = r'<th[^>]*>(.*?)</th>'
        headers = re.findall(header_pattern, table_content, re.IGNORECASE | re.DOTALL)
        structure["headers"] = [self._clean_html(h) for h in headers]
        
        # 提取行
        row_pattern = r'<tr[^>]*>(.*?)</tr>'
        rows = re.findall(row_pattern, table_content, re.IGNORECASE | re.DOTALL)
        
        for row in rows:
            # 跳过表头行
            if '<th' in row:
                continue
                
            # 提取单元格
            cell_pattern = r'<td[^>]*>(.*?)</td>'
            cells = re.findall(cell_pattern, row, re.IGNORECASE | re.DOTALL)
            structure["rows"].append([self._clean_html(c) for c in cells])
        
        return structure
    
    def _generate_table_summary(self, structure: Dict[str, Any]) -> str:
        """生成表格摘要"""
        if not structure or not structure["headers"] or not structure["rows"]:
            return "空表格"
            
        headers = structure["headers"]
        rows = structure["rows"]
        
        summary = f"包含{len(headers)}列和{len(rows)}行的表格。"
        
        # 如果表格较小，添加更多详细信息
        if len(headers) <= 5 and len(rows) <= 5:
            summary += f" 列标题为: {', '.join(headers)}。"
            
            # 添加一些示例数据
            if rows:
                first_row = rows[0]
                if len(first_row) == len(headers):
                    sample_data = []
                    for i, header in enumerate(headers):
                        if i < len(first_row):
                            sample_data.append(f"{header}: {first_row[i]}")
                    if sample_data:
                        summary += f" 第一行数据: {'; '.join(sample_data[:3])}。"
        
        return summary
    
    def _clean_html(self, html: str) -> str:
        """清理HTML标签"""
        # 移除HTML标签
        clean = re.sub(r'<[^>]*>', '', html)
        # 移除多余空白
        clean = re.sub(r'\s+', ' ', clean).strip()
        return clean
    
    def _extract_charts(self, content: str) -> List[Dict[str, Any]]:
        """提取图表信息"""
        charts = []
        
        # 查找常见的图表容器
        chart_patterns = [
            r'<div[^>]*class=["\'][^"\']*chart[^"\']*["\'][^>]*>(.*?)</div>',
            r'<canvas[^>]*id=["\'][^"\']*chart[^"\']*["\'][^>]*>(.*?)</canvas>',
            r'<svg[^>]*class=["\'][^"\']*chart[^"\']*["\'][^>]*>(.*?)</svg>'
        ]
        
        for pattern in chart_patterns:
            matches = re.findall(pattern, content, re.IGNORECASE | re.DOTALL)
            for i, match in enumerate(matches):
                # 尝试找到图表的标题
                title = f"图表 {len(charts) + 1}"
                
                # 查找附近的标题标签
                title_pattern = r'<h[1-6][^>]*>(.*?)</h[1-6]>'
                title_match = re.search(title_pattern, match, re.IGNORECASE)
                if title_match:
                    title = title_match.group(1).strip()
                
                # 尝试确定图表类型
                chart_type = "未知类型"
                if "pie" in match.lower() or "饼图" in match:
                    chart_type = "饼图"
                elif "bar" in match.lower() or "柱状图" in match:
                    chart_type = "柱状图"
                elif "line" in match.lower() or "折线图" in match:
                    chart_type = "折线图"
                elif "scatter" in match.lower() or "散点图" in match:
                    chart_type = "散点图"
                
                charts.append({
                    "title": title,
                    "type": chart_type,
                    "content_preview": match[:100] + "..." if len(match) > 100 else match
                })
        
        # 限制返回的图表数量
        max_charts = getattr(settings, "MAX_CHARTS_PER_SEARCH", 3)
        return charts[:max_charts]
    
    def _extract_videos(self, content: str) -> List[Dict[str, Any]]:
        """提取视频信息"""
        videos = []
        
        # 查找视频标签
        video_patterns = [
            r'<video[^>]*src=["\']([^"\']*)["\'][^>]*>(.*?)</video>',
            r'<iframe[^>]*src=["\']([^"\']*(?:youtube|youku|vimeo|bilibili)[^"\']*)["\'][^>]*></iframe>'
        ]
        
        for pattern in video_patterns:
            matches = re.findall(pattern, content, re.IGNORECASE | re.DOTALL)
            for match in matches:
                url = match[0] if isinstance(match, tuple) else match
                
                # 尝试找到视频的标题
                title = "视频"
                title_pattern = r'<h[1-6][^>]*>(.*?)</h[1-6]>\s*(?:<[^>]*>\s*)*<(?:video|iframe)[^>]*src=["\']' + re.escape(url) + '["\']'
                title_match = re.search(title_pattern, content, re.IGNORECASE | re.DOTALL)
                if title_match:
                    title = title_match.group(1).strip()
                
                # 确定视频平台
                platform = "未知平台"
                if "youtube" in url.lower():
                    platform = "YouTube"
                elif "youku" in url.lower():
                    platform = "优酷"
                elif "bilibili" in url.lower():
                    platform = "哔哩哔哩"
                elif "vimeo" in url.lower():
                    platform = "Vimeo"
                
                videos.append({
                    "title": title,
                    "url": url,
                    "platform": platform
                })
        
        # 限制返回的视频数量
        max_videos = getattr(settings, "MAX_VIDEOS_PER_SEARCH", 2)
        return videos[:max_videos]
    
    def _extract_embedded_content(self, content: str) -> List[Dict[str, Any]]:
        """提取嵌入式内容"""
        embedded = []
        
        # 查找嵌入式内容
        embed_patterns = [
            r'<embed[^>]*src=["\']([^"\']*)["\'][^>]*>',
            r'<object[^>]*data=["\']([^"\']*)["\'][^>]*>(.*?)</object>',
            r'<iframe[^>]*src=["\']([^"\']*(?!youtube|youku|vimeo|bilibili)[^"\']*)["\'][^>]*></iframe>'
        ]
        
        for pattern in embed_patterns:
            matches = re.findall(pattern, content, re.IGNORECASE | re.DOTALL)
            for match in matches:
                url = match[0] if isinstance(match, tuple) else match
                
                # 确定内容类型
                content_type = "未知类型"
                if ".pdf" in url.lower():
                    content_type = "PDF文档"
                elif ".doc" in url.lower() or ".docx" in url.lower():
                    content_type = "Word文档"
                elif ".xls" in url.lower() or ".xlsx" in url.lower():
                    content_type = "Excel表格"
                elif ".ppt" in url.lower() or ".pptx" in url.lower():
                    content_type = "PowerPoint演示文稿"
                elif "map" in url.lower():
                    content_type = "地图"
                elif "widget" in url.lower():
                    content_type = "小部件"
                
                embedded.append({
                    "url": url,
                    "type": content_type
                })
        
        # 限制返回的嵌入式内容数量
        max_embedded = getattr(settings, "MAX_EMBEDDED_PER_SEARCH", 3)
        return embedded[:max_embedded]
    
    def _enhance_content_analysis(self, result: Dict[str, List[Dict[str, Any]]]) -> None:
        """增强内容分析，添加跨模态关联"""
        # 如果没有足够的多模态内容，直接返回
        if not result["images"] and not result["tables"] and not result["charts"]:
            return
            
        # 查找相关内容
        self._find_related_content(result)
        
        # 对图像进行分类和分组
        if result["images"]:
            self._classify_and_group_images(result["images"])
    
    def _find_related_content(self, result: Dict[str, List[Dict[str, Any]]]) -> None:
        """查找不同模态内容之间的关联"""
        # 示例：查找图像和表格之间的关联
        for image in result.get("images", []):
            for table in result.get("tables", []):
                # 如果图像描述和表格标题有相似性，建立关联
                if (self._has_similarity(image.get("description", ""), table.get("title", "")) or
                    self._has_similarity(image.get("context", ""), table.get("title", ""))):
                    image["related_table"] = table.get("title")
                    table["related_image"] = image.get("url")
    
    def _has_similarity(self, text1: str, text2: str) -> bool:
        """检查两段文本是否有相似性"""
        if not text1 or not text2:
            return False
            
        # 简单实现：检查关键词重叠
        words1 = set(re.findall(r'\w+', text1.lower()))
        words2 = set(re.findall(r'\w+', text2.lower()))
        
        # 如果有至少2个共同词，认为有相似性
        common_words = words1.intersection(words2)
        return len(common_words) >= 2
    
    def _classify_and_group_images(self, images: List[Dict[str, Any]]) -> None:
        """对图像进行分类和分组"""
        # 按类型分组
        type_groups = {}
        for image in images:
            img_type = image.get("type", "未知")
            if img_type not in type_groups:
                type_groups[img_type] = []
            type_groups[img_type].append(image)
        
        # 将分组信息添加到每个图像
        for image in images:
            img_type = image.get("type", "未知")
            if img_type in type_groups:
                image["group_info"] = {
                    "type": img_type,
                    "count": len(type_groups[img_type])
                }

    def _extract_document_images(self, content: str, base_url: Optional[str] = None) -> List[Dict[str, Any]]:
        """
        提取文档图像并进行内容分析
        """
        document_images = []
        
        # 查找可能是文档的图像（如扫描的PDF页面、文档截图等）
        images = self._extract_images(content, base_url)
        
        for img in images:
            # 根据图像特征判断是否为文档图像
            if self._is_document_image(img):
                # 对文档图像进行OCR处理
                ocr_result = self._perform_ocr_on_image(img["url"])
                
                document_images.append({
                    "url": img["url"],
                    "description": img["description"],
                    "ocr_text": ocr_result.get("text", ""),
                    "ocr_confidence": ocr_result.get("confidence", 0),
                    "layout_analysis": ocr_result.get("layout", {}),
                    "tables": ocr_result.get("tables", [])
                })
        
        return document_images

    def _is_document_image(self, image_info: Dict[str, Any]) -> bool:
        """
        判断图像是否为文档图像
        """
        # 基于图像类型和尺寸判断
        img_type = image_info.get("type", "")
        dimensions = image_info.get("dimensions", {})
        
        # 文档图像通常是较大的、包含文字的图像
        if img_type in ["照片", "图像"] and dimensions:
            width = dimensions.get("width", 0)
            height = dimensions.get("height", 0)
            # 文档图像通常较大且宽高比接近A4纸
            if width > 300 and height > 300:
                aspect_ratio = width / height
                if 0.5 < aspect_ratio < 2.0:  # A4纸的宽高比范围
                    return True
        
        # 基于URL关键词判断
        url = image_info.get("url", "").lower()
        document_keywords = ["document", "scan", "pdf", "page", "report", "invoice"]
        if any(keyword in url for keyword in document_keywords):
            return True
            
        return False

    def _perform_ocr_on_image(self, image_url: str) -> Dict[str, Any]:
        """
        对图像执行OCR处理
        """
        # 这里应该集成实际的OCR服务，如Tesseract、Google Vision API等
        # 目前返回模拟结果
        return {
            "text": "OCR处理结果占位符",
            "confidence": 0.95,
            "layout": {
                "paragraphs": 3,
                "lines": 15,
                "words": 150
            },
            "tables": []  # 从图像中提取的表格
        }

    def _extract_charts_with_analysis(self, content: str) -> List[Dict[str, Any]]:
        """
        提取图表并进行分析
        """
        charts = self._extract_charts(content)
        
        for chart in charts:
            # 对图表进行进一步分析
            chart.update({
                "data_points": self._extract_chart_data_points(chart),
                "trend_analysis": self._analyze_chart_trend(chart),
                "key_insights": self._generate_chart_insights(chart)
            })
        
        return charts

    def _extract_chart_data_points(self, chart_info: Dict[str, Any]) -> List[Dict[str, Any]]:
        """
        从图表中提取数据点
        """
        # 这里应该集成实际的图表分析服务
        # 目前返回模拟结果
        return [
            {"x": 1, "y": 10, "label": "数据点1"},
            {"x": 2, "y": 25, "label": "数据点2"},
            {"x": 3, "y": 15, "label": "数据点3"}
        ]

    def _analyze_chart_trend(self, chart_info: Dict[str, Any]) -> str:
        """
        分析图表趋势
        """
        # 这里应该集成实际的趋势分析算法
        # 目前返回模拟结果
        return "上升趋势"

    def _generate_chart_insights(self, chart_info: Dict[str, Any]) -> List[str]:
        """
        生成图表洞察
        """
        # 这里应该集成实际的洞察生成算法
        # 目前返回模拟结果
        return [
            "数据显示持续增长",
            "最高值出现在第三季度",
            "整体趋势积极"
        ]

class MultimodalProcessorTool(EnhancedMultimodalProcessorTool):
    """保持向后兼容的多模态处理器工具"""
    name: str = "multimodal_processor"
    
    def __init__(self, *args: Any, **kwargs: Any):
        """初始化工具"""
        super().__init__(*args, **kwargs)

def get_multimodal_processor() -> MultimodalProcessorTool:
    """获取多模态处理器实例"""
    return MultimodalProcessorTool()

def get_enhanced_multimodal_processor() -> EnhancedMultimodalProcessorTool:
    """获取增强版多模态处理器实例"""
    return EnhancedMultimodalProcessorTool()

class MultimodalSearchTool:
    """支持多模态内容的搜索工具"""
    
    def __init__(self, processor: Optional[EnhancedMultimodalProcessorTool] = None):
        """初始化工具"""
        from .search_tool import get_search_tool
        self.search_tool = get_search_tool()
        self.multimodal_processor = processor if processor else get_multimodal_processor()
    
    def invoke(self, query: str) -> Dict[str, Any]:
        """执行搜索并提取多模态内容"""
        try:
            # 检查完整结果缓存
            cached_result = _get_from_cache(query)
            if cached_result and isinstance(cached_result, dict) and "images" in cached_result and "tables" in cached_result:
                return cached_result
                
            # 执行搜索
            search_results = self.search_tool.invoke(query)
            
            # 处理多模态内容
            try:
                multimodal_data = self.multimodal_processor.invoke(search_results)
            except Exception as e:
                # 如果多模态处理失败，返回基本的搜索结果
                logger.warning(f"多模态处理失败，使用基本搜索结果: {str(e)}")
                multimodal_data = {"images": [], "tables": []}
            
            result = {
                "text_results": search_results,
                "images": multimodal_data.get("images", []),
                "tables": multimodal_data.get("tables", [])
            }
            
            # 保存到缓存
            _save_to_cache(query, result)
            
            return result
        except Exception as e:
            logger.error(f"多模态搜索执行失败: {str(e)}")
            # 降级到基本搜索结果
            try:
                basic_result = self.search_tool.invoke(query)
                return {
                    "text_results": basic_result,
                    "images": [],
                    "tables": []
                }
            except Exception as inner_e:
                return {
                    "text_results": f"搜索完全失败: {str(e)}",
                    "images": [],
                    "tables": []
                }