"""
完整内容识别模块 v2.3
采用全图识别+智能裁剪策略，彻底解决遗漏问题
"""

import base64
import json
import requests
from typing import Dict, List, Any, Optional, Tuple
from PIL import Image, ImageDraw, ImageFilter, ImageStat
import logging
import asyncio
import time
from pathlib import Path
import subprocess
import math
from modules.manga_panel_splitter import MangaPanelSplitter

class CompleteContentRecognition:
    """完整内容识别器 - 全图识别+智能裁剪"""
    
    def __init__(self, config: Dict[str, Any]):
        """初始化识别器"""
        self.config = config
        self.logger = logging.getLogger('image_cropping_system')
        
        # Ollama 配置
        self.ollama_config = config.get('ollama', {})
        self.base_url = self.ollama_config.get('base_url', 'http://localhost:6399')
        self.default_model = self.ollama_config.get('model', 'qwen2.5vl:7b')
        self.timeout = self.ollama_config.get('timeout', 120)
        
        # 处理配置
        self.confidence_threshold = config.get('processing', {}).get('confidence_threshold', 0.85)
        self.max_retries = config.get('processing', {}).get('max_retries', 3)
        
        # 全图识别配置
        self.full_image_analysis = config.get('complete_recognition', {}).get('full_image_analysis', True)
        self.intelligent_cropping = config.get('complete_recognition', {}).get('intelligent_cropping', True)
        self.content_padding = config.get('complete_recognition', {}).get('content_padding', 0.02)
        
        # 漫画内容过滤配置
        self.manga_mode = config.get('complete_recognition', {}).get('manga_mode', True)
        self.black_threshold = config.get('complete_recognition', {}).get('black_threshold', 30)
        self.gap_threshold = config.get('complete_recognition', {}).get('gap_threshold', 50)
        self.min_content_size = config.get('complete_recognition', {}).get('min_content_size', 100)
        
        # 当前使用的模型
        self.current_model = None
        
        # 初始化漫画分镜分割器
        self.manga_splitter = MangaPanelSplitter(config)
    
    def get_current_model(self) -> str:
        """获取当前使用的模型"""
        if not self.current_model:
            self.current_model = self.default_model
        return self.current_model
    
    async def recognize_content(self, files: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """识别图片内容区域（全图识别+智能裁剪）"""
        results = []
        
        for file_info in files:
            try:
                self.logger.info(f"开始全图识别+智能裁剪: {file_info['processed_name']}")
                
                # 步骤1：全图AI识别，获取所有内容区域
                full_content_regions = await self._full_image_content_analysis(file_info['processed_path'])
                
                # 步骤2：基于识别结果进行智能裁剪
                if full_content_regions:
                    final_bbox = await self._intelligent_crop_based_on_content(
                        file_info['processed_path'], 
                        full_content_regions,
                        file_info['width'],
                        file_info['height']
                    )
                else:
                    # 如果识别失败，使用全图
                    final_bbox = [0, 0, file_info['width'], file_info['height']]
                    self.logger.warning(f"全图识别失败，使用全图: {file_info['processed_name']}")
                
                if final_bbox:
                    self.logger.info(f"智能裁剪最终边界: {final_bbox[:4]}")
                else:
                    final_bbox = [0, 0, file_info['width'], file_info['height']]
                    self.logger.warning(f"智能裁剪失败，使用全图: {file_info['processed_name']}")
                
                result = {
                    **file_info,
                    'content_bbox': final_bbox,
                    'confidence': final_bbox[4] if len(final_bbox) > 4 else 1.0,
                    'recognition_status': 'success',
                    'recognition_method': 'full_image_intelligent_crop'
                }
                
                results.append(result)
                
                # 保存可视化结果
                if self.config.get('output', {}).get('save_bbox_visualization', True):
                    await self._save_full_analysis_visualization(file_info, final_bbox, full_content_regions)
                
            except Exception as e:
                self.logger.error(f"全图识别+智能裁剪失败 {file_info['processed_name']}: {e}")
                results.append({
                    **file_info,
                    'content_bbox': [0, 0, file_info['width'], file_info['height']],
                    'confidence': 0.0,
                    'recognition_status': 'error',
                    'error': str(e),
                    'recognition_method': 'fallback_full_image'
                })
        
        return results
    
    async def _full_image_content_analysis(self, image_path: str) -> Optional[List[Dict[str, Any]]]:
        """全图内容分析 - 识别图片中的所有内容区域"""
        try:
            self.logger.info("开始全图内容分析...")
            
            # 使用全图分析的提示词
            full_image_prompt = """
请对这张完整的图片进行全面的内容分析。要求：

1. 识别图片中所有的内容区域，包括：
   - 主要内容（文字、图像、图标等）
   - 次要内容（装饰元素、背景图案等）
   - 边缘内容（可能被忽略的内容）
   - 淡色或半透明内容

2. 标记所有内容区域的边界框
3. 给每个区域一个内容类型标签
4. 评估每个区域的重要性

请返回详细的分析结果：
{
  "content_regions": [
    {
      "bbox": [x1, y1, x2, y2],
      "content_type": "text/image/icon/decoration/other",
      "importance": "high/medium/low",
      "description": "区域内容描述"
    }
  ],
  "overall_confidence": 0.95,
  "analysis_summary": "整体分析摘要"
}

重要原则：
- 不要遗漏任何可见内容
- 宁可包含空白区域，也不能遗漏内容
- 特别注意图片边缘和角落
- 识别所有可能的内容元素
"""
            
            analysis_result = await self._call_ollama_for_full_analysis(image_path, full_image_prompt)
            
            if analysis_result and 'content_regions' in analysis_result:
                self.logger.info(f"全图分析识别到 {len(analysis_result['content_regions'])} 个内容区域")
                return analysis_result['content_regions']
            
            return None
            
        except Exception as e:
            self.logger.error(f"全图内容分析失败: {e}")
            return None
    
    async def _intelligent_crop_based_on_content(self, image_path: str, 
                                               content_regions: List[Dict[str, Any]], 
                                               image_width: int, 
                                               image_height: int) -> List[float]:
        """基于内容识别结果进行智能裁剪"""
        try:
            self.logger.info("开始基于内容的智能裁剪...")
            
            # 步骤1：合并所有内容区域
            merged_bbox = self._merge_content_regions(content_regions, image_width, image_height)
            
            # 步骤2：漫画内容智能过滤（新增）
            if self.manga_mode:
                filtered_bbox = await self._filter_manga_boundaries(image_path, merged_bbox)
            else:
                filtered_bbox = merged_bbox
            
            # 步骤3：应用内容填充
            padded_bbox = self._apply_content_padding(filtered_bbox, image_width, image_height)
            
            # 步骤4：验证裁剪结果
            validated_bbox = self._validate_crop_result(image_path, padded_bbox, content_regions)
            
            # 步骤5：最终安全检查
            final_bbox = self._final_safety_check(validated_bbox, image_width, image_height)
            
            self.logger.info(f"智能裁剪完成: {final_bbox[:4]}")
            return final_bbox
            
        except Exception as e:
            self.logger.error(f"智能裁剪失败: {e}")
            return [0, 0, image_width, image_height, 0.8]
    
    def _merge_content_regions(self, content_regions: List[Dict[str, Any]], 
                             image_width: int, image_height: int) -> List[float]:
        """合并所有内容区域为一个边界框"""
        try:
            if not content_regions:
                return [0, 0, image_width, image_height]
            
            # 初始化边界
            min_x = image_width
            min_y = image_height
            max_x = 0
            max_y = 0
            
            # 遍历所有内容区域
            for region in content_regions:
                bbox = region.get('bbox', [0, 0, 0, 0])
                importance = region.get('importance', 'medium')
                
                # 根据重要性调整权重
                weight = self._get_importance_weight(importance)
                
                x1, y1, x2, y2 = bbox
                
                # 应用权重扩展边界（高重要性内容扩展更多）
                expansion_x = int((x2 - x1) * weight * 0.1)
                expansion_y = int((y2 - y1) * weight * 0.1)
                
                min_x = min(min_x, max(0, x1 - expansion_x))
                min_y = min(min_y, max(0, y1 - expansion_y))
                max_x = max(max_x, min(image_width, x2 + expansion_x))
                max_y = max(max_y, min(image_height, y2 + expansion_y))
            
            merged_bbox = [min_x, min_y, max_x, max_y, 0.9]
            
            self.logger.info(f"内容区域合并结果: {merged_bbox[:4]}")
            return merged_bbox
            
        except Exception as e:
            self.logger.error(f"内容区域合并失败: {e}")
            return [0, 0, image_width, image_height, 0.8]
    
    def _get_importance_weight(self, importance: str) -> float:
        """获取重要性权重"""
        weights = {
            'high': 1.0,
            'medium': 0.7,
            'low': 0.4
        }
        return weights.get(importance, 0.7)
    
    def _apply_content_padding(self, bbox: List[float], image_width: int, image_height: int) -> List[float]:
        """应用内容填充"""
        try:
            x1, y1, x2, y2, confidence = bbox
            
            # 计算填充边距
            padding_x = int(image_width * self.content_padding)
            padding_y = int(image_height * self.content_padding)
            
            # 应用填充
            padded_x1 = max(0, x1 - padding_x)
            padded_y1 = max(0, y1 - padding_y)
            padded_x2 = min(image_width, x2 + padding_x)
            padded_y2 = min(image_height, y2 + padding_y)
            
            padded_bbox = [padded_x1, padded_y1, padded_x2, padded_y2, confidence]
            
            self.logger.info(f"内容填充应用: {bbox[:4]} -> {padded_bbox[:4]}")
            return padded_bbox
            
        except Exception as e:
            self.logger.error(f"内容填充失败: {e}")
            return bbox
    
    def _validate_crop_result(self, image_path: str, bbox: List[float], 
                            content_regions: List[Dict[str, Any]]) -> List[float]:
        """验证裁剪结果是否包含所有内容"""
        try:
            x1, y1, x2, y2, confidence = bbox
            
            # 检查是否所有内容区域都在裁剪框内
            missing_regions = []
            for region in content_regions:
                region_bbox = region.get('bbox', [0, 0, 0, 0])
                rx1, ry1, rx2, ry2 = region_bbox
                
                # 如果内容区域不完全在裁剪框内
                if not (x1 <= rx1 and y1 <= ry1 and x2 >= rx2 and y2 >= ry2):
                    missing_regions.append(region)
            
            if missing_regions:
                self.logger.warning(f"发现 {len(missing_regions)} 个区域在裁剪框外，调整边界...")
                
                # 扩展边界以包含遗漏的区域
                for region in missing_regions:
                    rx1, ry1, rx2, ry2 = region.get('bbox', [0, 0, 0, 0])
                    x1 = min(x1, rx1)
                    y1 = min(y1, ry1)
                    x2 = max(x2, rx2)
                    y2 = max(y2, ry2)
                
                validated_bbox = [x1, y1, x2, y2, confidence * 0.9]  # 略微降低置信度
                self.logger.info(f"验证后调整边界: {validated_bbox[:4]}")
                return validated_bbox
            
            return bbox
            
        except Exception as e:
            self.logger.error(f"裁剪结果验证失败: {e}")
            return bbox
    
    def _final_safety_check(self, bbox: List[float], image_width: int, image_height: int) -> List[float]:
        """最终安全检查"""
        try:
            x1, y1, x2, y2, confidence = bbox
            
            # 确保边界在图像范围内
            safe_x1 = max(0, min(x1, image_width - 1))
            safe_y1 = max(0, min(y1, image_height - 1))
            safe_x2 = max(safe_x1 + 1, min(x2, image_width))
            safe_y2 = max(safe_y1 + 1, min(y2, image_height))
            
            # 确保最小尺寸
            min_size = 50
            if safe_x2 - safe_x1 < min_size:
                center_x = (safe_x1 + safe_x2) / 2
                safe_x1 = max(0, center_x - min_size // 2)
                safe_x2 = min(image_width, center_x + min_size // 2)
            
            if safe_y2 - safe_y1 < min_size:
                center_y = (safe_y1 + safe_y2) / 2
                safe_y1 = max(0, center_y - min_size // 2)
                safe_y2 = min(image_height, center_y + min_size // 2)
            
            final_bbox = [int(safe_x1), int(safe_y1), int(safe_x2), int(safe_y2), confidence]
            
            self.logger.info(f"最终安全检查: {final_bbox[:4]}")
            return final_bbox
            
        except Exception as e:
            self.logger.error(f"最终安全检查失败: {e}")
            return [0, 0, image_width, image_height, 0.7]
    
    async def _filter_manga_boundaries(self, image_path: str, bbox: List[float]) -> List[float]:
        """漫画边界智能过滤 - 移除分镜间隔和纯黑边界"""
        try:
            self.logger.info("开始漫画边界智能过滤...")
            
            with Image.open(image_path) as img:
                x1, y1, x2, y2, confidence = bbox
                
                # 转换为灰度图便于分析
                gray_img = img.convert('L')
                
                # 步骤1：检测并移除顶部黑色/空白边界
                new_y1 = await self._detect_top_content_boundary(gray_img, y1, y2)
                
                # 步骤2：检测并移除底部黑色/空白边界
                new_y2 = await self._detect_bottom_content_boundary(gray_img, new_y1, y2)
                
                # 步骤3：检测并移除左侧黑色/空白边界
                new_x1 = await self._detect_left_content_boundary(gray_img, x1, x2, new_y1, new_y2)
                
                # 步骤4：检测并移除右侧黑色/空白边界
                new_x2 = await self._detect_right_content_boundary(gray_img, new_x1, x2, new_y1, new_y2)
                
                # 步骤5：检测并移除中间的水平分镜间隔
                filtered_y_regions = await self._filter_horizontal_gaps(gray_img, new_x1, new_x2, new_y1, new_y2)
                
                # 步骤6：检测并移除中间的垂直分镜间隔
                filtered_x_regions = await self._filter_vertical_gaps(gray_img, new_y1, new_y2, new_x1, new_x2)
                
                # 如果检测到多个内容区域，选择最大的一个
                if len(filtered_y_regions) > 1:
                    # 选择最大的连续内容区域
                    largest_region = max(filtered_y_regions, key=lambda region: region[1] - region[0])
                    new_y1, new_y2 = largest_region
                    self.logger.info(f"检测到水平分镜间隔，选择最大内容区域: {new_y1}-{new_y2}")
                
                if len(filtered_x_regions) > 1:
                    # 选择最大的连续内容区域
                    largest_region = max(filtered_x_regions, key=lambda region: region[1] - region[0])
                    new_x1, new_x2 = largest_region
                    self.logger.info(f"检测到垂直分镜间隔，选择最大内容区域: {new_x1}-{new_x2}")
                
                filtered_bbox = [new_x1, new_y1, new_x2, new_y2, confidence]
                
                # 记录过滤结果
                original_area = (x2 - x1) * (y2 - y1)
                filtered_area = (new_x2 - new_x1) * (new_y2 - new_y1)
                reduction_ratio = (original_area - filtered_area) / original_area
                
                self.logger.info(f"漫画边界过滤完成: {bbox[:4]} -> {filtered_bbox[:4]}")
                self.logger.info(f"过滤掉空白区域比例: {reduction_ratio:.2%}")
                
                return filtered_bbox
                
        except Exception as e:
            self.logger.error(f"漫画边界过滤失败: {e}")
            return bbox
    
    async def _detect_top_content_boundary(self, gray_img: Image.Image, 
                                        start_y: int, end_y: int) -> int:
        """检测顶部内容边界"""
        try:
            width, height = gray_img.size
            
            # 从上往下扫描，找到第一个有内容的行
            for y in range(start_y, min(end_y, start_y + 200)):  # 最多检查200像素
                # 检查这一行是否有内容
                row_pixels = list(gray_img.crop((0, y, width, y + 1)).getdata())
                
                # 计算这一行的平均亮度
                avg_brightness = sum(row_pixels) / len(row_pixels)
                
                # 如果平均亮度低于黑色阈值，说明是黑色边界
                if avg_brightness < self.black_threshold:
                    continue
                
                # 检查是否有足够多的非黑色像素
                non_black_count = sum(1 for pixel in row_pixels if pixel > self.black_threshold)
                non_black_ratio = non_black_count / len(row_pixels)
                
                # 如果非黑色像素比例超过阈值，认为找到内容边界
                if non_black_ratio > 0.1:  # 至少10%的像素不是黑色
                    self.logger.info(f"检测到顶部内容边界: y={y}")
                    return y
            
            # 如果没找到，返回原始位置
            return start_y
            
        except Exception as e:
            self.logger.error(f"检测顶部边界失败: {e}")
            return start_y
    
    async def _detect_bottom_content_boundary(self, gray_img: Image.Image, 
                                           start_y: int, end_y: int) -> int:
        """检测底部内容边界"""
        try:
            width, height = gray_img.size
            
            # 从下往上扫描，找到最后一个有内容的行
            for y in range(end_y - 1, max(start_y, end_y - 200), -1):  # 最多检查200像素
                # 检查这一行是否有内容
                row_pixels = list(gray_img.crop((0, y, width, y + 1)).getdata())
                
                # 计算这一行的平均亮度
                avg_brightness = sum(row_pixels) / len(row_pixels)
                
                # 如果平均亮度低于黑色阈值，说明是黑色边界
                if avg_brightness < self.black_threshold:
                    continue
                
                # 检查是否有足够多的非黑色像素
                non_black_count = sum(1 for pixel in row_pixels if pixel > self.black_threshold)
                non_black_ratio = non_black_count / len(row_pixels)
                
                # 如果非黑色像素比例超过阈值，认为找到内容边界
                if non_black_ratio > 0.1:  # 至少10%的像素不是黑色
                    self.logger.info(f"检测到底部内容边界: y={y + 1}")
                    return y + 1
            
            # 如果没找到，返回原始位置
            return end_y
            
        except Exception as e:
            self.logger.error(f"检测底部边界失败: {e}")
            return end_y
    
    async def _detect_left_content_boundary(self, gray_img: Image.Image, 
                                          start_x: int, end_x: int, 
                                          start_y: int, end_y: int) -> int:
        """检测左侧内容边界"""
        try:
            # 从左往右扫描，找到第一个有内容的列
            for x in range(start_x, min(end_x, start_x + 200)):  # 最多检查200像素
                # 检查这一列在内容区域内是否有内容
                column_pixels = list(gray_img.crop((x, start_y, x + 1, end_y)).getdata())
                
                # 计算这一列的平均亮度
                avg_brightness = sum(column_pixels) / len(column_pixels)
                
                # 如果平均亮度低于黑色阈值，说明是黑色边界
                if avg_brightness < self.black_threshold:
                    continue
                
                # 检查是否有足够多的非黑色像素
                non_black_count = sum(1 for pixel in column_pixels if pixel > self.black_threshold)
                non_black_ratio = non_black_count / len(column_pixels)
                
                # 如果非黑色像素比例超过阈值，认为找到内容边界
                if non_black_ratio > 0.1:  # 至少10%的像素不是黑色
                    self.logger.info(f"检测到左侧内容边界: x={x}")
                    return x
            
            # 如果没找到，返回原始位置
            return start_x
            
        except Exception as e:
            self.logger.error(f"检测左侧边界失败: {e}")
            return start_x
    
    async def _detect_right_content_boundary(self, gray_img: Image.Image, 
                                           start_x: int, end_x: int,
                                           start_y: int, end_y: int) -> int:
        """检测右侧内容边界"""
        try:
            # 从右往左扫描，找到最后一个有内容的列
            for x in range(end_x - 1, max(start_x, end_x - 200), -1):  # 最多检查200像素
                # 检查这一列在内容区域内是否有内容
                column_pixels = list(gray_img.crop((x, start_y, x + 1, end_y)).getdata())
                
                # 计算这一列的平均亮度
                avg_brightness = sum(column_pixels) / len(column_pixels)
                
                # 如果平均亮度低于黑色阈值，说明是黑色边界
                if avg_brightness < self.black_threshold:
                    continue
                
                # 检查是否有足够多的非黑色像素
                non_black_count = sum(1 for pixel in column_pixels if pixel > self.black_threshold)
                non_black_ratio = non_black_count / len(column_pixels)
                
                # 如果非黑色像素比例超过阈值，认为找到内容边界
                if non_black_ratio > 0.1:  # 至少10%的像素不是黑色
                    self.logger.info(f"检测到右侧内容边界: x={x + 1}")
                    return x + 1
            
            # 如果没找到，返回原始位置
            return end_x
            
        except Exception as e:
            self.logger.error(f"检测右侧边界失败: {e}")
            return end_x
    
    async def _filter_horizontal_gaps(self, gray_img: Image.Image, 
                                    x1: int, x2: int, y1: int, y2: int) -> List[Tuple[int, int]]:
        """过滤水平分镜间隔"""
        try:
            content_regions = []
            current_region_start = y1
            in_gap = False
            
            # 逐行扫描，检测水平间隔
            for y in range(y1, y2 + 1):
                # 检查这一行在指定x范围内是否有内容
                row_pixels = list(gray_img.crop((x1, y, x2, y + 1)).getdata())
                
                # 计算这一行的平均亮度
                avg_brightness = sum(row_pixels) / len(row_pixels)
                
                # 判断是否为分镜间隔（黑色或接近黑色）
                is_gap = avg_brightness < self.black_threshold
                
                if is_gap and not in_gap:
                    # 进入分镜间隔
                    in_gap = True
                    if y - current_region_start >= self.min_content_size:
                        # 保存之前的内容区域
                        content_regions.append((current_region_start, y - 1))
                
                elif not is_gap and in_gap:
                    # 离开分镜间隔，开始新的内容区域
                    in_gap = False
                    current_region_start = y
            
            # 处理最后一个区域
            if not in_gap and y2 - current_region_start >= self.min_content_size:
                content_regions.append((current_region_start, y2))
            
            # 如果没有检测到分镜间隔，返回整个区域
            if not content_regions:
                content_regions = [(y1, y2)]
            
            self.logger.info(f"水平分镜间隔检测完成，发现 {len(content_regions)} 个内容区域")
            return content_regions
            
        except Exception as e:
            self.logger.error(f"过滤水平分镜间隔失败: {e}")
            return [(y1, y2)]
    
    async def _filter_vertical_gaps(self, gray_img: Image.Image, 
                                  y1: int, y2: int, x1: int, x2: int) -> List[Tuple[int, int]]:
        """过滤垂直分镜间隔"""
        try:
            content_regions = []
            current_region_start = x1
            in_gap = False
            
            # 逐列扫描，检测垂直间隔
            for x in range(x1, x2 + 1):
                # 检查这一列在指定y范围内是否有内容
                column_pixels = list(gray_img.crop((x, y1, x + 1, y2)).getdata())
                
                # 计算这一列的平均亮度
                avg_brightness = sum(column_pixels) / len(column_pixels)
                
                # 判断是否为分镜间隔（黑色或接近黑色）
                is_gap = avg_brightness < self.black_threshold
                
                if is_gap and not in_gap:
                    # 进入分镜间隔
                    in_gap = True
                    if x - current_region_start >= self.min_content_size:
                        # 保存之前的内容区域
                        content_regions.append((current_region_start, x - 1))
                
                elif not is_gap and in_gap:
                    # 离开分镜间隔，开始新的内容区域
                    in_gap = False
                    current_region_start = x
            
            # 处理最后一个区域
            if not in_gap and x2 - current_region_start >= self.min_content_size:
                content_regions.append((current_region_start, x2))
            
            # 如果没有检测到分镜间隔，返回整个区域
            if not content_regions:
                content_regions = [(x1, x2)]
            
            self.logger.info(f"垂直分镜间隔检测完成，发现 {len(content_regions)} 个内容区域")
            return content_regions
            
        except Exception as e:
            self.logger.error(f"过滤垂直分镜间隔失败: {e}")
            return [(x1, x2)]
    
    async def _call_ollama_for_full_analysis(self, image_path: str, prompt: str) -> Optional[Dict[str, Any]]:
        """调用Ollama API进行全图分析"""
        try:
            # 编码图片
            with open(image_path, 'rb') as f:
                image_data = f.read()
            base64_image = base64.b64encode(image_data).decode('utf-8')
            
            # 准备请求数据
            payload = {
                "model": self.get_current_model(),
                "prompt": prompt,
                "images": [base64_image],
                "stream": False,
                "options": {
                    "temperature": 0.1,
                    "top_p": 0.9,
                    "max_tokens": 2048
                }
            }
            
            # 发送请求
            response = requests.post(
                f"{self.base_url}/api/generate",
                json=payload,
                timeout=self.timeout
            )
            
            if response.status_code == 200:
                result = response.json()
                content = result.get('response', '')
                
                # 尝试解析JSON响应
                try:
                    # 提取JSON部分
                    start_idx = content.find('{')
                    end_idx = content.rfind('}') + 1
                    
                    if start_idx != -1 and end_idx > start_idx:
                        json_str = content[start_idx:end_idx]
                        analysis_result = json.loads(json_str)
                        return analysis_result
                    else:
                        self.logger.warning("无法从响应中提取JSON")
                        return None
                        
                except json.JSONDecodeError as e:
                    self.logger.error(f"JSON解析失败: {e}")
                    self.logger.debug(f"原始响应: {content}")
                    return None
            else:
                self.logger.error(f"Ollama API请求失败: {response.status_code}")
                return None
                
        except Exception as e:
            self.logger.error(f"调用Ollama API失败: {e}")
            return None
    
    async def _save_full_analysis_visualization(self, file_info: Dict[str, Any], 
                                             final_bbox: List[float],
                                             content_regions: List[Dict[str, Any]]):
        """保存全图分析可视化结果"""
        try:
            if not self.config.get('output', {}).get('save_bbox_visualization', True):
                return
            
            with Image.open(file_info['processed_path']) as img:
                draw = ImageDraw.Draw(img)
                
                # 绘制所有内容区域
                colors = {
                    'text': 'red',
                    'image': 'blue', 
                    'icon': 'green',
                    'decoration': 'yellow',
                    'other': 'purple'
                }
                
                for region in content_regions:
                    bbox = region.get('bbox', [0, 0, 0, 0])
                    content_type = region.get('content_type', 'other')
                    importance = region.get('importance', 'medium')
                    
                    x1, y1, x2, y2 = bbox
                    
                    # 根据重要性设置线条宽度
                    width = 3 if importance == 'high' else 2 if importance == 'medium' else 1
                    
                    # 绘制内容区域边界
                    draw.rectangle([x1, y1, x2, y2], 
                                 outline=colors.get(content_type, 'gray'), 
                                 width=width)
                
                # 绘制最终裁剪框（粗线）
                fx1, fy1, fx2, fy2 = final_bbox[:4]
                draw.rectangle([fx1, fy1, fx2, fy2], 
                             outline='white', width=4)
                
                # 保存可视化结果
                vis_filename = f"full_analysis_{file_info['processed_name']}"
                vis_path = Path(self.config['output']['output_dir']) / 'visualizations' / vis_filename
                
                vis_path.parent.mkdir(parents=True, exist_ok=True)
                img.save(vis_path)
                
                self.logger.info(f"全图分析可视化已保存: {vis_path}")
                
        except Exception as e:
            self.logger.error(f"保存全图分析可视化失败: {e}")