# backend/medical_analysis.py
"""
医学影像分析服务模块
从demo4.py提取的核心功能，适配为API服务
"""
import os
import base64
import json
import time
import requests
import re
from datetime import datetime
from PIL import Image
from typing import Dict, Any, Optional, List
from fastapi import HTTPException
import dashscope
import io
import traceback
import torch

# PyTorch 2.6+ weights_only兼容性补丁
original_torch_load = torch.load
def patched_torch_load(*args, **kwargs):
    if 'weights_only' not in kwargs:
        kwargs['weights_only'] = False
    return original_torch_load(*args, **kwargs)
torch.load = patched_torch_load

# DashScope SDK 配置
dashscope.api_key = "sk-f3d1ba248f4e47c3877fc410683a8875"
dashscope.base_http_api_url = 'https://dashscope.aliyuncs.com/api/v1'

# 配置
TEST_MODE = False
ENABLE_VERBOSE_LOGGING = False
MAX_HISTORY_LENGTH = 5

def log_verbose(message: str):
    """条件日志输出"""
    if ENABLE_VERBOSE_LOGGING:
        print(message)

# 模型推理配置
MODEL_INFERENCE_SERVICE_URL = os.environ.get("MODEL_INFERENCE_SERVICE_URL", "http://localhost:8010")
MODEL_INFERENCE_API_TOKEN = os.environ.get("MODEL_INFERENCE_API_TOKEN", None)
USE_REMOTE_INFERENCE = os.environ.get("USE_REMOTE_INFERENCE", "true").lower() == "true"

class MedicalImageAnalyzer:
    def __init__(self):
        self.patient_info = {
            "name": "未填写",
            "age": "未知", 
            "gender": "未填写",
            "history": "无"
        }
    
    
    def _call_remote_inference(self, input_text: str, image_path: str, 
                               history: List = None, temperature: float = 0.7, 
                               top_p: float = 0.7, stream: bool = False) -> str:
        """
        调用远程模型推理服务
        
        Args:
            input_text: 输入文本
            image_path: 图片路径
            history: 对话历史（API格式）
            temperature: 温度参数
            top_p: top_p参数
            stream: 是否流式返回
            
        Returns:
            生成的文本
        """
        try:
            # 转换历史记录格式（从API格式转为XrayGLM格式）
            xrayglm_history = []
            if history:
                temp_question = None
                for msg in history:
                    role = msg.get('role', '').lower()
                    content = msg.get('content', '')
                    
                    if role == 'system':
                        continue
                        
                    if role in ['user', 'human']:
                        temp_question = content
                    elif role in ['assistant', 'ai']:
                        if temp_question:
                            xrayglm_history.append([temp_question, content])
                            temp_question = None
            
            # 限制历史记录长度
            if len(xrayglm_history) > MAX_HISTORY_LENGTH:
                xrayglm_history = xrayglm_history[-MAX_HISTORY_LENGTH:]
            
            # 构建请求
            url = f"{MODEL_INFERENCE_SERVICE_URL}/generate"
            if stream:
                url = f"{MODEL_INFERENCE_SERVICE_URL}/generate_stream"
            
            payload = {
                "input_text": input_text,
                "image_path": image_path,
                "history": xrayglm_history,
                "temperature": temperature,
                "top_p": top_p,
                "top_k": 30,
                "max_length": 512,
                "repetition_penalty": 1.2
            }
            
            headers = {"Content-Type": "application/json"}
            if MODEL_INFERENCE_API_TOKEN:
                headers["Authorization"] = f"Bearer {MODEL_INFERENCE_API_TOKEN}"
            
            if stream:
                # 流式请求
                response = requests.post(url, json=payload, headers=headers, stream=True, timeout=300)
                response.raise_for_status()
                
                # 收集所有chunk
                full_text = ""
                for chunk in response.iter_content(chunk_size=None, decode_unicode=True):
                    if chunk:
                        full_text += chunk
                return full_text
            else:
                # 非流式请求
                response = requests.post(url, json=payload, headers=headers, timeout=300)
                response.raise_for_status()
                result = response.json()
                
                if result.get("success", False):
                    return result.get("text", "")
                else:
                    error_msg = result.get("error", "Unknown error")
                    raise Exception(f"Remote inference failed: {error_msg}")
                    
        except requests.exceptions.RequestException as e:
            print(f"❌ 远程推理服务调用失败: {str(e)}")
            raise Exception(f"Failed to call remote inference service: {str(e)}")
        except Exception as e:
            print(f"❌ 远程推理处理失败: {str(e)}")
            raise
    
    def preprocess_medical_image(self, image_path: str, target_size: int = 512) -> str:
        """医学影像预处理"""
        try:
            image = Image.open(image_path)
            
            # 处理EXIF方向信息
            if hasattr(image, '_getexif'):
                exif = image._getexif()
                if exif:
                    orientation = exif.get(0x0112, 1)
                    if orientation == 3:
                        image = image.rotate(180, expand=True)
                    elif orientation == 6:
                        image = image.rotate(270, expand=True)
                    elif orientation == 8:
                        image = image.rotate(90, expand=True)
            
            # 调整大小
            width, height = image.size
            scale = target_size / max(width, height)
            new_size = (int(width*scale), int(height*scale))
            image = image.resize(new_size, Image.LANCZOS)
            
            # 确保RGB模式
            if image.mode != 'RGB':
                image = image.convert('RGB')
            
            # 保存处理后的图像
            processed_path = image_path.replace('.', '_processed.')
            image.save(processed_path, quality=95)
            return processed_path
            
        except Exception as e:
            raise HTTPException(status_code=400, detail=f"图像预处理失败: {str(e)}")
    
    def preprocess_image_for_api(self, image_path: str) -> str:
        """
        预处理图像以加速API调用
        - 缩放到1024x1024（保持宽高比）
        - 转换为JPEG格式（quality=90）
        - 返回base64编码字符串
        """
        try:
            print("⚡ 开始图像预处理...")
            image = Image.open(image_path)
            
            # 1. 缩放到1024x1024（保持宽高比）
            max_size = 1024
            width, height = image.size
            
            if width > max_size or height > max_size:
                scale = min(max_size / width, max_size / height)
                new_width = int(width * scale)
                new_height = int(height * scale)
                image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
                print(f"   ✅ 图像缩放: {width}x{height} → {new_width}x{new_height}")
            else:
                print(f"   ℹ️  图像尺寸({width}x{height})已小于1024，无需缩放")
            
            # 2. 转换为RGB（确保兼容JPEG）
            if image.mode != 'RGB':
                image = image.convert('RGB')
                print(f"   ✅ 图像模式转换为RGB")
            
            # 3. 转换为JPEG并压缩（quality=90）
            from io import BytesIO
            buffered = BytesIO()
            image.save(buffered, format="JPEG", quality=90, optimize=True)
            buffered.seek(0)
            
            # 获取压缩后的大小
            original_size = os.path.getsize(image_path) / 1024  # KB
            compressed_size = len(buffered.getvalue()) / 1024  # KB
            compression_ratio = (1 - compressed_size / original_size) * 100 if original_size > 0 else 0
            
            print(f"   ✅ 图像压缩: {original_size:.1f}KB → {compressed_size:.1f}KB (压缩{compression_ratio:.1f}%)")
            
            # 4. Base64编码
            image_data = base64.b64encode(buffered.getvalue()).decode('utf-8')
            print("   ✅ 图像预处理完成！")
            
            return image_data
            
        except Exception as e:
            print(f"   ⚠️  图像预处理失败，使用原图: {str(e)}")
            # 降级处理：使用原图
            with open(image_path, "rb") as f:
                return base64.b64encode(f.read()).decode('utf-8')
    
    def _build_image_data_uri(self, image_path: str) -> str:
        """生成符合 Qwen 要求的 data:image/jpeg;base64,... URI"""
        image_data = self.preprocess_image_for_api(image_path)
        base64_data_only = image_data.split(',', 1)[-1] if ',' in image_data else image_data
        if base64_data_only.startswith('data:'):
            base64_data_only = base64_data_only.split(',', 1)[-1] if ',' in base64_data_only else base64_data_only.replace('data:image/jpeg;base64,', '').replace('data:image/png;base64,', '')
        image_data_uri = f"data:image/jpeg;base64,{base64_data_only}"
        print(f"   ✅ Data URI构建完成: {image_data_uri[:60]}...")
        return image_data_uri
    
    def _build_qwen_messages(self, prompt: str, image_data_uri: str) -> List[Dict[str, Any]]:
        """构造通义千问多模态消息"""
        return [
            {
                "role": "user",
                "content": [
                    {"image": image_data_uri},
                    {"text": prompt}
                ]
            }
        ]
    
    def _extract_qwen_text(self, content: Any) -> str:
        if isinstance(content, list):
            if not content:
                return ""
            first = content[0]
            if isinstance(first, dict) and 'text' in first:
                return first['text']
            return str(first)
        if isinstance(content, str):
            return content
        return str(content) if content is not None else ""
    
    def _mock_stream_response(self, prompt: str):
        """测试模式下生成流式结果"""
        mock_result = self.get_mock_analysis_result(prompt)
        def generator():
            chunk_size = 20
            for i in range(0, len(mock_result), chunk_size):
                yield mock_result[i:i+chunk_size]
                time.sleep(0.05)
        return generator()
    
    def call_qwen_api(self, image_path: str, prompt: str, stream: bool = False):
        """统一的通义千问多模态API入口，支持流式与非流式"""
        if TEST_MODE:
            return self._mock_stream_response(prompt) if stream else self.get_mock_analysis_result(prompt)
        try:
            image_data_uri = self._build_image_data_uri(image_path)
            messages = self._build_qwen_messages(prompt, image_data_uri)
            if stream:
                return self._call_qwen_api_stream(messages, prompt, image_path=image_path)
            return self._call_qwen_api_normal(messages, prompt, image_path=image_path)
        except Exception as e:
            print(f"❌ Qwen API 调用准备失败: {e}")
            import traceback
            traceback.print_exc()
            if stream:
                def error_generator():
                    yield f"❌ 生成失败: {str(e)}"
                return error_generator()
            return f"❌ Qwen API 调用失败: {str(e)}"
    
    def _call_qwen_api_normal(self, messages: List[Dict[str, Any]], prompt: str, image_path: Optional[str] = None) -> str:
        """标准的非流式通义千问请求，输入为预构建的 messages。"""
        try:
            if TEST_MODE:
                print("使用测试模式，返回模拟分析结果")
                return self.get_mock_analysis_result(prompt)

            print("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
            print("🚀 开始调用通义千问API（非流式模式）")
            if image_path:
                print(f"   图像路径: {image_path}")
            print(f"   提示词: {prompt[:100]}...")

            payload = {
                "model": "qwen-vl-plus",
                "input": {"messages": messages},
                "parameters": {
                    "temperature": 0.5,
                    "top_p": 0.9,
                    "max_tokens": 1500,
                    "incremental_output": False
                }
            }

            qwen_api_key = dashscope.api_key
            qwen_api_url = f"{dashscope.base_http_api_url}/services/aigc/multimodal-generation/generation"
            headers = {
                "Authorization": f"Bearer {qwen_api_key}",
                "Content-Type": "application/json"
            }

            log_verbose(f"📡 发送非流式API请求到: {qwen_api_url}")
            response = requests.post(qwen_api_url, headers=headers, json=payload, timeout=60)
            log_verbose(f"API响应状态码: {response.status_code}")

            if response.status_code == 200:
                result = response.json()
                print("✅ API调用成功")
                log_verbose(f"完整API响应: {json.dumps(result, ensure_ascii=False, indent=2)}")

                content = result["output"]["choices"][0]["message"]["content"]
                extracted = self._extract_qwen_text(content)
                if not extracted.strip():
                    print("⚠️ API返回了空内容")
                    return "API返回了空内容，请检查图像是否有效或稍后重试"
                return extracted

            error_msg = f"""
            API请求失败！
            状态码：{response.status_code}
            错误信息：{response.text}
            可能原因：
            1. API密钥无效或过期
            2. 账户未开通多模态服务
            3. 请求参数不符合要求
            4. 账号余额不足
            """
            print(f"❌ API调用失败: {error_msg}")
            return error_msg

        except requests.exceptions.Timeout:
            error_msg = "\n\n❌ 【超时错误】\nAPI调用超时（60秒）\n请检查:\n- 网络连接是否正常\n- 图像文件是否过大\n"
            print(f"❌ {error_msg}")
            return error_msg

        except requests.exceptions.ConnectionError as conn_err:
            error_msg = f"\n\n❌ 【网络连接错误】\n无法连接到通义千问API\n详情: {str(conn_err)}\n"
            print(f"❌ {error_msg}")
            return error_msg

        except Exception as e:
            import traceback
            tb_str = traceback.format_exc()
            error_msg = f"""

❌ 【系统致命错误】非流式API调用失败

错误类型: {type(e).__name__}
错误详情: {str(e)}

可能原因:
1. 请求构建失败
2. 图像预处理异常
3. API响应解析失败

完整堆栈:
{tb_str}
"""
            print("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
            print(f"❌ 非流式API致命异常:\n{error_msg}")
            print("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
            return error_msg
    
    def _call_qwen_api_stream(self, messages: List[Dict[str, Any]], prompt: str, image_path: Optional[str] = None):
        """流式通义千问调用，输入为预构建的 messages。"""
        try:
            if TEST_MODE:
                print("使用测试模式，返回模拟分析结果（流式）")
                mock_result = self.get_mock_analysis_result(prompt)
                chunk_size = 20
                for i in range(0, len(mock_result), chunk_size):
                    yield mock_result[i:i+chunk_size]
                    time.sleep(0.05)
                return

            print("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
            print("🚀 开始调用通义千问API（流式模式）")
            if image_path:
                print(f"   图像路径: {image_path}")
            print(f"   提示词: {prompt[:100]}...")

            print("📦 使用预构建的消息调用 DashScope SDK...")
            responses = dashscope.MultiModalConversation.call(
                model='qwen-vl-plus',
                messages=messages,
                stream=True
            )

            if not responses:
                error_msg = "\n\n❌ 【API调用失败】\nSDK返回了空响应\n"
                print(f"❌ {error_msg}")
                yield error_msg
                return

            print("✅ 流式API连接建立成功，开始接收数据...")
            full_content = ""
            has_content = False

            try:
                for response in responses:
                    if response.status_code != 200:
                        error_code = getattr(response, 'code', 'Unknown')
                        error_message = getattr(response, 'message', '未知错误')
                        error_msg = f"\n\n❌ 【API错误】\n状态码: {response.status_code}\n代码: {error_code}\n消息: {error_message}\n"
                        print(f"❌ Qwen API错误: {error_msg}")
                        yield error_msg
                        return

                    output = getattr(response, 'output', None)
                    if not output or 'choices' not in output:
                        continue

                    choices = output['choices']
                    if not choices:
                        continue

                    message = choices[0].get('message', {})
                    content = message.get('content', '')
                    chunk_text = self._extract_qwen_text(content)

                    if chunk_text:
                        if len(chunk_text) > len(full_content):
                            delta = chunk_text[len(full_content):]
                            full_content = chunk_text
                            has_content = True
                            yield delta
                            log_verbose(f"   📝 增量: {delta[:50]}...")

                if not has_content:
                    error_msg = "\n\n❌ 【警告】未收到任何有效响应内容\n可能原因：\n- API返回了空响应\n- 图像格式不被支持\n- 请求参数有误\n"
                    print(f"⚠️  {error_msg}")
                    yield error_msg

            except Exception as inner_e:
                error_msg = f"\n\n❌ 【系统错误】流式处理失败\n错误: {str(inner_e)}\n"
                print(f"❌ 流式循环异常: {error_msg}")
                import traceback
                traceback.print_exc()
                yield error_msg

        except Exception as e:
            import traceback
            tb_str = traceback.format_exc()
            error_msg = f"""

❌ 【系统致命错误】流式API调用失败

错误类型: {type(e).__name__}
错误详情: {str(e)}

可能原因:
1. 请求构建失败
2. 图像预处理异常
3. DashScope SDK错误
4. 网络连接问题

完整堆栈:
{tb_str}
"""
            print("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
            print(f"❌ 流式API致命异常:\n{error_msg}")
            print("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
            yield error_msg
    
    def get_mock_analysis_result(self, custom_prompt: str = None) -> str:
        """获取模拟分析结果"""
        if custom_prompt and "请分析这张图片" in custom_prompt:
            return """## 影像表现
1. 主要异常：
- 部位：图像中心区域
- 形态：规则圆形结构
- 密度/信号特征：均匀分布

2. 重要阴性发现
- 无明显占位性病变
- 无异常钙化灶
- 无积液征象

3. 对比既往变化
- 无法对比既往影像

## 初步诊断
1. 最可能诊断（按概率排序）
- 正常影像表现（90%）
- 良性病变（8%）
- 需进一步检查（2%）

2. 鉴别诊断及排除依据
- 排除恶性肿瘤：无恶性征象
- 排除炎症：无炎症表现

## 建议
1. 推荐影像检查
- 建议定期复查
- 如有症状可考虑增强检查

2. 实验室检查
- 常规血液检查
- 肿瘤标志物检查

3. 临床处理方案
- 定期随访观察
- 如有不适及时就医

4. 随访计划
- 建议6个月后复查
- 如有异常及时就诊"""
        else:
            return """## 影像表现
1. 主要异常：
- 部位：待确定
- 形态：需进一步分析
- 密度/信号特征：需详细评估

2. 重要阴性发现
- 无明显急性病变
- 无严重结构异常

3. 对比既往变化
- 无法对比既往影像

## 初步诊断
1. 最可能诊断（按概率排序）
- 需进一步检查确认
- 建议结合临床病史

2. 鉴别诊断及排除依据
- 需排除器质性病变
- 建议完善相关检查

## 建议
1. 推荐影像检查
- 建议完善相关影像检查
- 必要时行增强扫描

2. 实验室检查
- 完善相关实验室检查
- 结合临床症状分析

3. 临床处理方案
- 建议专科会诊
- 根据检查结果制定治疗方案

4. 随访计划
- 建议定期复查
- 密切观察病情变化"""
    
    def sanitize_response(self, response: str) -> str:
        """清洗和格式化响应内容"""
        text = str(response)
        
        # 处理JSON格式的响应（如：[{'text': '内容'}]）
        # 尝试解析为JSON
        try:
            # 如果是字符串形式的JSON列表
            if text.startswith('[') and text.endswith(']'):
                import ast
                parsed = ast.literal_eval(text)
                if isinstance(parsed, list) and len(parsed) > 0:
                    if isinstance(parsed[0], dict) and 'text' in parsed[0]:
                        text = parsed[0]['text']
        except:
            pass
        
        # 清理JSON格式符号（正则方式作为备选）
        text = re.sub(r'^\[\{\{?[\'"]text[\'"]:\s*[\'"]', '', text)  # 开头的 [{'text': '
        text = re.sub(r'[\'"][,\s]*\}\}\]$', '', text)  # 结尾的 '}}]
        text = re.sub(r'^\[\{[\'"]text[\'"]:\s*[\'"]', '', text)  # 简化版开头
        text = re.sub(r'[\'"][,\s]*\}\]$', '', text)  # 简化版结尾
        
        # 清理特殊字符和格式符号
        text = re.sub(r'\*{5,}', '', text)
        text = re.sub(r'\bv[nm]\b', '', text)
        text = re.sub(r'\u3000', ' ', text)
        text = text.replace('\\n', '\n')
        
        # 结构化格式优化
        text = re.sub(r'【(.*?)】', r'\n## \1\n', text)
        text = re.sub(r'\*{3,}(\d+\.)', r'\n\1', text)
        text = re.sub(r'(\d+\.)([^\n])', r'\1 \2', text)
        
        # 转义特殊字符
        text = text.replace("{", "{{").replace("}", "}}")
        text = text.replace("data:image/", "data-image-")
        
        # 智能分段处理
        text = '\n'.join([line.strip() for line in text.split('\n') if line.strip()])
        
        return text[:2500]
    
    def format_diagnosis(self, text: str) -> str:
        """二次结构化处理"""
        text = re.sub(r'(\d+\.\s*)([^\n]+)', r'- \2\n', text)
        text = re.sub(r'\n##', r'\n\n##', text)
        text = re.sub(r'\n{3,}', r'\n\n', text)
        return text
    
    def generate_text_with_local_model(self, input_text: str, image_path: str, 
                                       history: List = None, temperature: float = 0.7, 
                                       top_p: float = 0.7) -> str:
        """使用远程 XrayGLM 推理服务生成回复（后续对话）"""
        try:
            if USE_REMOTE_INFERENCE:
                try:
                    print("🔄 使用远程模型推理服务...")
                    return self._call_remote_inference(
                        input_text, image_path, history, temperature, top_p, stream=False
                    )
                except Exception as e:
                    print(f"⚠️ 远程推理服务调用失败: {str(e)}")
                    print("🔄 回退到API模式...")
            
            conversation_prompt = self._build_conversation_prompt(input_text, history)
            return self.call_qwen_api(image_path, conversation_prompt, stream=False)
        except Exception as e:
            print(f"❌ 推理失败: {str(e)}")
            import traceback
            traceback.print_exc()
            print("📌 回退到API模式")
            conversation_prompt = self._build_conversation_prompt(input_text, history)
            return self.call_qwen_api(image_path, conversation_prompt, stream=False)
    
    def generate_text_with_local_model_stream(self, input_text: str, image_path: str, 
                                              history: List = None, temperature: float = 0.7, 
                                              top_p: float = 0.7):
        """流式生成：优先调用远程 XrayGLM 服务，失败时回退到通义千问 API"""
        try:
            if USE_REMOTE_INFERENCE:
                try:
                    print("🔄 使用远程模型推理服务（流式）...")
                    full_text = self._call_remote_inference(
                        input_text, image_path, history, temperature, top_p, stream=True
                    )
                    for char in full_text:
                        yield char
                    return
                except Exception as e:
                    print(f"⚠️ 远程推理服务调用失败: {str(e)}")
                    print("🔄 回退到API流式模式...")
            
            conversation_prompt = self._build_conversation_prompt(input_text, history)
            for chunk in self.call_qwen_api(image_path, conversation_prompt, stream=True):
                yield chunk
        except Exception as e:
            print(f"❌ [流式推理] 失败: {str(e)}")
            import traceback
            traceback.print_exc()
            yield f"❌ 生成失败: {str(e)}"
    
    def analyze_medical_image(self, image_path: str, custom_prompt: str = None) -> Dict[str, Any]:
        """医学影像分析主函数"""
        try:
            # 预处理图像
            processed_image = self.preprocess_medical_image(image_path)
            
            # 构建分析提示词
            if custom_prompt:
                diagnosis_prompt = custom_prompt
            else:
                diagnosis_prompt = """作为资深放射科医生，请严格按以下结构化格式分析：

## 影像表现
1. 主要异常：
- 部位：[明确解剖位置]
- 形态：[具体形态描述]
- 密度/信号特征：[量化指标]
2. 重要阴性发现
[列出重要阴性发现]
3. 对比既往变化
[对比结论/无法对比说明]

## 初步诊断
1. 最可能诊断（按概率排序）
[按概率降序排列，每个诊断单独一行]
2. 鉴别诊断及排除依据
[列出需排除的疾病及依据]

## 建议
1. 推荐影像检查
[具体检查项目]
2. 实验室检查
[检验项目]
3. 临床处理方案
[治疗方案]
4. 随访计划
[复查时间及方式]"""
            
            # 调用API进行分析
            raw_result = self.call_qwen_api(processed_image, diagnosis_prompt, stream=False)
            
            # 清洗和格式化结果
            clean_result = self.format_diagnosis(self.sanitize_response(raw_result))
            
            # 清理临时文件
            if os.path.exists(processed_image):
                os.remove(processed_image)
            
            return {
                "success": True,
                "analysis_result": clean_result,
                "timestamp": datetime.now().isoformat(),
                "patient_info": self.patient_info.copy()
            }
            
        except Exception as e:
            return {
                "success": False,
                "error": str(e),
                "timestamp": datetime.now().isoformat()
            }
    
    def analyze_medical_image_complete(self, image_path: str, input_text: str = None, 
                                     temperature: float = 0.7, top_p: float = 0.7) -> Dict[str, Any]:
        """完整的医学影像分析（基于demo4.py）- 非流式版本"""
        try:
            # 预处理图像
            processed_image = self.preprocess_medical_image(image_path)
            
            # 构建分析提示词
            if input_text and input_text.strip() == "AI初步诊断":
                # 使用demo4.py中的标准诊断提示词
                diagnosis_prompt = """作为资深放射科医生，请严格按以下结构化格式分析：

## 影像表现
1. 主要异常：
- 部位：[明确解剖位置]
- 形态：[具体形态描述]
- 密度/信号特征：[量化指标]
2. 重要阴性发现
[列出重要阴性发现]
3. 对比既往变化
[对比结论/无法对比说明]

## 初步诊断
1. 最可能诊断（按概率排序）
[按概率降序排列，每个诊断单独一行]
2. 鉴别诊断及排除依据
[列出需排除的疾病及依据]

## 建议
1. 推荐影像检查
[具体检查项目]
2. 实验室检查
[检验项目]
3. 临床处理方案
[治疗方案]
4. 随访计划
[复查时间及方式]"""
            else:
                # 使用用户自定义的提示词
                diagnosis_prompt = input_text or "请分析这张医学影像"
            
            # 调用API进行分析
            raw_result = self.call_qwen_api(processed_image, diagnosis_prompt, stream=False)
            
            # 清洗和格式化结果
            clean_result = self.format_diagnosis(self.sanitize_response(raw_result))
            
            # 清理临时文件
            if os.path.exists(processed_image):
                os.remove(processed_image)
            
            return {
                "success": True,
                "analysis_result": clean_result,
                "timestamp": datetime.now().isoformat(),
                "patient_info": self.patient_info.copy()
            }
            
        except Exception as e:
            return {
                "success": False,
                "error": str(e),
                "timestamp": datetime.now().isoformat()
            }
    
    def analyze_medical_image_complete_stream(self, image_path: str, input_text: str = None, 
                                     temperature: float = 0.7, top_p: float = 0.7):
        """
        完整的医学影像分析 - 流式版本（⚡任务2优化）
        
        Returns:
            Generator[str, None, None]: 逐块yield分析结果
        """
        print(f"🔍 [流式分析] 进入函数，图像: {image_path}")
        print(f"   输入文本: {input_text}")
        try:
            # 预处理图像
            processed_image = self.preprocess_medical_image(image_path)
            print(f"   ✅ 图像预处理完成: {processed_image}")
            
            # 构建分析提示词
            if input_text and input_text.strip() == "AI初步诊断":
                # 使用demo4.py中的标准诊断提示词
                diagnosis_prompt = """作为资深放射科医生，请严格按以下结构化格式分析：

## 影像表现
1. 主要异常：
- 部位：[明确解剖位置]
- 形态：[具体形态描述]
- 密度/信号特征：[量化指标]
2. 重要阴性发现
[列出重要阴性发现]
3. 对比既往变化
[对比结论/无法对比说明]

## 初步诊断
1. 最可能诊断（按概率排序）
[按概率降序排列，每个诊断单独一行]
2. 鉴别诊断及排除依据
[列出需排除的疾病及依据]

## 建议
1. 推荐影像检查
[具体检查项目]
2. 实验室检查
[检验项目]
3. 临床处理方案
[治疗方案]
4. 随访计划
[复查时间及方式]"""
            else:
                # 使用用户自定义的提示词
                diagnosis_prompt = input_text or "请分析这张医学影像"
            
            # ⚡ 关键：调用流式API，逐块yield结果
            full_result = ""
            for chunk in self.call_qwen_api(processed_image, diagnosis_prompt, stream=True):
                full_result += chunk
                # 实时返回增量内容（前端会立即显示）
                yield chunk
            
            # 清理临时文件
            if os.path.exists(processed_image):
                os.remove(processed_image)
            
        except Exception as e:
            yield f"\n\n❌ 分析失败：{str(e)}"
    
    def continue_medical_conversation(self, image_path: str, input_text: str, 
                                     history: List = None, temperature: float = 0.7, 
                                     top_p: float = 0.7) -> Dict[str, Any]:
        """继续医学影像对话（后续问答）"""
        try:
            # 预处理图像（如果需要）
            if not os.path.exists(image_path):
                raise HTTPException(status_code=404, detail="图像文件不存在")
            
            if USE_REMOTE_INFERENCE:
                print("使用远程XrayGLM推理服务进行后续对话")
            else:
                print("使用通义千问API进行后续对话")
            
            answer = self.generate_text_with_local_model(
                input_text, image_path, history, temperature, top_p
            )
            
            # 格式化回复
            formatted_answer = self.format_diagnosis(self.sanitize_response(answer))
            model_type = "remote" if USE_REMOTE_INFERENCE else "api"
            
            return {
                "success": True,
                "answer": formatted_answer,
                "timestamp": datetime.now().isoformat(),
                "model_type": model_type
            }
            
        except Exception as e:
            return {
                "success": False,
                "error": str(e),
                "timestamp": datetime.now().isoformat()
            }
    
    def _build_conversation_prompt(self, current_question: str, history: List = None) -> str:
        """构建包含对话历史的prompt"""
        if not history or len(history) == 0:
            return current_question
        
        # 构建对话上下文
        prompt = "以下是之前的对话记录：\n\n"
        
        for msg in history:
            role = msg.get('role', '')
            content = msg.get('content', '')
            
            if role == 'user':
                prompt += f"问：{content}\n"
            elif role == 'assistant':
                prompt += f"答：{content}\n\n"
        
        # 添加当前问题
        prompt += f"\n现在的问题：{current_question}\n\n"
        prompt += "请基于之前的诊断结果，针对性地回答当前问题。不要重新分析整张图片，只需回答当前问题。"
        
        return prompt
    
    def update_patient_info(self, name: str = None, age: str = None, 
                           gender: str = None, history: str = None) -> Dict[str, Any]:
        """更新患者信息"""
        if name:
            self.patient_info["name"] = name
        if age:
            self.patient_info["age"] = age
        if gender:
            self.patient_info["gender"] = gender
        if history:
            self.patient_info["history"] = history
        
        return {
            "success": True,
            "message": "患者信息已更新",
            "patient_info": self.patient_info.copy()
        }
    
    def generate_medical_report(self, analysis_history: List[Dict[str, Any]]) -> str:
        """根据分析历史生成结构化医疗报告"""
        try:
            report = "# 医学影像诊疗报告\n\n"
            
            # 患者信息部分
            report += "## 患者基本信息\n"
            report += f"- **姓名**: {self.patient_info['name']}\n"
            report += f"- **年龄**: {self.patient_info['age']}\n"
            report += f"- **性别**: {self.patient_info['gender']}\n"
            report += f"- **病史**: {self.patient_info['history']}\n\n"
            
            # 分析历史部分
            if analysis_history:
                report += "## 分析历史\n"
                for i, analysis in enumerate(analysis_history, 1):
                    report += f"### 第{i}次分析\n"
                    report += f"**时间**: {analysis.get('timestamp', '未知')}\n"
                    report += f"**结果**: {analysis.get('result', '无结果')}\n\n"
            
            return report
            
        except Exception as e:
            return f"报告生成失败: {str(e)}"
    
def generate_medical_report_from_chat(self, chat_history: List[Dict[str, Any]], patient_info: Dict[str, Any]) -> str:
    """根据聊天历史生成结构化医疗报告（基于demo4.py）"""
    try:
        report = "# 医学影像诊疗报告\n\n"
        
        # 患者信息部分
        report += "## 患者基本信息\n"
        report += f"- **姓名**: {patient_info.get('name', '未填写')}\n"
        report += f"- **年龄**: {patient_info.get('age', '未知')}\n"
        report += f"- **性别**: {patient_info.get('gender', '未填写')}\n"
        report += f"- **病史**: {patient_info.get('history', '无')}\n\n"
        
        # 提取AI分析结果
        ai_messages = [msg for msg in chat_history if msg.get('role') == 'AI']
        if ai_messages:
            # 使用最后一个AI消息作为主要分析结果
            latest_ai_message = ai_messages[-1]
            analysis_content = latest_ai_message.get('content', '无分析结果')
            
            # 直接使用AI分析结果作为报告内容
            report += "## 影像分析结果\n\n"
            report += analysis_content + "\n\n"
        else:
            report += "## 影像分析结果\n\n"
            report += "暂无分析结果\n\n"
        
        # 报告元数据部分
        report += "---\n"
        report += f"**报告生成时间**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n"
        report += f"**生成系统**: Med-DistillFL医学影像分析系统\n"
        
        return report

    except Exception as e:
        return f"报告生成异常，错误信息：{str(e)}\n原始聊天历史：{chat_history}"

# 创建全局分析器实例
medical_analyzer = MedicalImageAnalyzer()