from paddleocr import PaddleOCR
from openai import OpenAI
import cv2
import numpy as np
import json
import os
import base64
import re
import time
from typing import List, Dict, Any
from prompts import PromptTemplate

class MedicalReportAnalyzer:
    def __init__(self, api_key: str = None, report_type: str = 'lab'):
        """初始化分析器
        Args:
            api_key: API密钥
            report_type: 报告类型，'lab'为检验报告，'exam'为检查报告
        """
        self.api_key = api_key or os.getenv('DASHSCOPE_API_KEY')
        self.report_type = report_type
        self.ocr = PaddleOCR(use_angle_cls=True, lang="ch", show_log=False)
        self.client = OpenAI(
            api_key=self.api_key,
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
        )
        
    def _enhance_image(self, image: np.ndarray) -> np.ndarray:
        """图像增强处理"""
        # 降噪
        img = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
        # 锐化
        kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
        return cv2.filter2D(img, -1, kernel)
        
    def _image_to_base64(self, image_path: str) -> str:
        """将图片转换为base64编码"""
        with open(image_path, "rb") as f:
            return base64.b64encode(f.read()).decode('utf-8')
        
    def _merge_text_by_row(self, text_regions: List[dict], y_threshold: int = 15) -> List[dict]:
        """将Y轴坐标相近的文本合并成行"""
        if not text_regions:
            return []
            
        # 按y坐标排序
        text_regions.sort(key=lambda x: x['center_y'])
        
        # 合并相近行
        merged_lines = []
        current_line = [text_regions[0]]
        base_y = text_regions[0]['center_y']
        
        for region in text_regions[1:]:
            if abs(region['center_y'] - base_y) <= y_threshold:
                current_line.append(region)
            else:
                # 当前行处理完毕，按x坐标排序并合并
                current_line.sort(key=lambda x: x['center_x'])
                
                # 合并行内文本和坐标
                merged_text = " ".join(item['text'] for item in current_line)
                merged_pos = []
                for item in current_line:
                    merged_pos.extend(item['position'])
                    
                merged_lines.append({
                    'text': merged_text,
                    'position': merged_pos,
                    'confidence': sum(item['confidence'] for item in current_line) / len(current_line),
                    'center_x': sum(item['center_x'] for item in current_line) / len(current_line),
                    'center_y': sum(item['center_y'] for item in current_line) / len(current_line)
                })
                
                # 开始新的一行
                current_line = [region]
                base_y = region['center_y']
        
        # 处理最后一行
        if current_line:
            current_line.sort(key=lambda x: x['center_x'])
            merged_text = " ".join(item['text'] for item in current_line)
            merged_pos = []
            for item in current_line:
                merged_pos.extend(item['position'])
                
            merged_lines.append({
                'text': merged_text,
                'position': merged_pos,
                'confidence': sum(item['confidence'] for item in current_line) / len(current_line),
                'center_x': sum(item['center_x'] for item in current_line) / len(current_line),
                'center_y': sum(item['center_y'] for item in current_line) / len(current_line)
            })
        
        return merged_lines

    def _detect_text(self, image_path: str) -> List[dict]:
        """识别图片中的文本"""
        start_time = time.time()
        result = self.ocr.ocr(image_path, cls=True)
        ocr_time = time.time() - start_time
        print(f"\n✦ OCR识别耗时: {ocr_time:.2f}秒")
        
        if not result or not result[0]:
            return []
            
        # 提取文本区域并计算中心点
        text_regions = []
        for line in result[0]:
            points = np.array(line[0])
            center_x = points[:, 0].mean()
            center_y = points[:, 1].mean()
            
            text_regions.append({
                'text': line[1][0],
                'confidence': float(line[1][1]),
                'position': line[0],
                'center_x': center_x,
                'center_y': center_y
            })
        
        # 合并相同行的文本
        merged_regions = self._merge_text_by_row(text_regions)
        print(f"✦ 原始文本数量: {len(text_regions)}")
        print(f"✦ 合并后行数: {len(merged_regions)}")
        
        return merged_regions

    def _clean_json_string(self, json_str: str) -> str:
        """清理并修正JSON字符串格式"""
        # 移除可能的markdown代码块标记
        json_str = re.sub(r'```json\s*|\s*```', '', json_str)
        # 修正常见的格式问题
        json_str = json_str.replace('\n', '')  # 移除换行符
        json_str = re.sub(r'\s+', ' ', json_str)  # 规范化空白字符
        return json_str

    def analyze_report(self, image_path: str) -> Dict[str, Any]:
        """完整的报告分析流程"""
        try:
            start_total = time.time()
            if not os.path.exists(image_path):
                raise FileNotFoundError(f"找不到图片文件: {image_path}")
                
            # OCR识别
            text_regions = self._detect_text(image_path)
            if not text_regions:
                raise ValueError("未检测到文本内容")
                
            # 构建提示词
            prompt = (PromptTemplate.get_lab_report_prompt(text_regions) 
                     if self.report_type == 'lab' 
                     else PromptTemplate.get_exam_report_prompt(text_regions))
                     
            # 准备图片数据
            image_base64 = self._image_to_base64(image_path)
            print(prompt)
            
            # 调用大模型分析
            model_start = time.time()
            completion = self.client.chat.completions.create(
                model="qwen2.5-vl-7b-instruct",
                messages=[{
                    "role": "user",
                    "content": [
                        {"type": "text", "text": prompt},
                        {"type": "image_url",
                         "image_url": {"url": f"data:image/jpeg;base64,{image_base64}"}}
                    ]
                }]
            )
            model_time = time.time() - model_start
            print(f"✦ 大模型分析耗时: {model_time:.2f}秒")
            
            # 解析响应
            response_json = json.loads(completion.model_dump_json())
            if response_json.get("choices"):
                result_text = response_json["choices"][0]["message"]["content"]
                cleaned_text = self._clean_json_string(result_text)
                try:
                    result = json.loads(cleaned_text)
                    total_time = time.time() - start_total
                    print(f"✦ 总耗时: {total_time:.2f}秒\n")
                    return result
                except json.JSONDecodeError as e:
                    print(f"JSON解析错误: {e}")
                    print(f"清理后的文本: {cleaned_text}")
                    raise Exception("JSON格式无效")
            raise Exception("模型未返回有效结果")
            
        except Exception as e:
            raise Exception(f"报告分析失败: {str(e)}")

def analyze_medical_report(image_path: str, api_key: str = None, report_type: str = 'lab') -> Dict[str, Any]:
    """便捷分析函数"""
    analyzer = MedicalReportAnalyzer(api_key, report_type)
    return analyzer.analyze_report(image_path)
