import torch
from transformers import AutoModelForCausalLM
from janus.models import MultiModalityCausalLM, VLChatProcessor
from janus.utils.io import load_pil_images
import json

class MultimodalAnalyzer:
    def __init__(self, model_path, med_info):
        """
        初始化多模态分析器。
        
        Args:
            model_path (str): 模型路径
            med_info (dict): 临床描述字典
        """
        self.vl_chat_processor = VLChatProcessor.from_pretrained(model_path)
        self.tokenizer = self.vl_chat_processor.tokenizer
        self.vl_gpt = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True).to(torch.bfloat16).cuda().eval()
        self.med_info = med_info

    def preprocess_regions(self, regions):
        """
        前置处理 regions 数据，转换为 analyze_regions 所需的格式。
        
        Args:
            regions (dict): 从 extract_regions_from_mask 返回的 regions 数据，
                           格式为 {'region_dir': str, 'region_record': list[dict]}
                           其中每个 dict 包含 'save_path' 和 'bbox'
        
        Returns:
            list: 转换后的 regions 列表，每个元素为包含 'save_path' 和 'bbox' 的字典
        """
        if not isinstance(regions, dict) or 'region_record' not in regions:
            raise ValueError("Regions must be a dictionary with 'region_record' key containing a list of region data.")
        
        # 提取 region_record 列表
        region_records = regions['region_record']
        
        # 确保 region_records 是非空的列表
        if not region_records:
            print("No regions found in region_record, returning empty list.")
            return []
        
        # 检查格式并转换为所需格式（这里已经是 {'save_path': ..., 'bbox': ...} 的格式）
        processed_regions = []
        for record in region_records:
            if 'save_path' not in record or 'bbox' not in record:
                print(f"Skipping invalid region record: {record}")
                continue
            processed_regions.append({
                'save_path': record['save_path'],
                'bbox': record['bbox']
            })
        
        return processed_regions

    def analyze_regions(self, regions, diagnosis, is_ground_truth=True):
        """
        分析区域并生成病灶描述。
        
        Args:
            regions (dict or list): 从 extract_regions_from_mask 返回的 regions 数据，
                                   或已经预处理过的 regions 列表
            diagnosis (str): 诊断结果
            is_ground_truth (bool): 是否为真实诊断
        
        Returns:
            str: JSON 格式的分析结果
        """
        # 前置处理 regions 数据
        if isinstance(regions, dict):
            processed_regions = self.preprocess_regions(regions)
        else:
            processed_regions = regions

        # 如果没有有效的区域，返回空结果
        if not processed_regions:
            return json.dumps({})

        # 构建 Prompt
        diag_term = "诊断结果为" if is_ground_truth else "可能的诊断结果为"
        diag_term_en = "diagnosis" if is_ground_truth else "possible diagnosis"
        clinical_desc = self.med_info.get(diagnosis, "未知")
        prompt_cn = (
            f"请分析以下眼底照片的局部区域，判断是否存在病灶。整张图片的{diag_term}“{diagnosis}”，"
            f"其临床表现为“{clinical_desc}”。请关注以下几个重点区域，找到可能含有病灶的区域，"
            f"并描述其特征。输出格式为 JSON，例如：\n"
            "{\n  \"region_0\": {\"positive\": true, \"description\": \"微动脉瘤\"},\n"
            "  \"region_1\": {\"positive\": false, \"description\": \"无明显病灶\"}\n}\n"
            "请基于图片内容进行判断，不要完全依赖提供的诊断结果。"
        )
        prompt_en = (
            f"Please analyze the following partial regions of the fundus photo to determine if there are any lesions. "
            f"The {diag_term_en} of the entire image is \"{diagnosis}\", and its clinical manifestations are \"{clinical_desc}\". "
            f"Focus on the following key areas, identify regions that may contain lesions, and describe their features. "
            f"The output should be in JSON format, e.g.:\n"
            "{\n  \"region_0\": {\"positive\": true, \"description\": \"microaneurysms\"},\n"
            "  \"region_1\": {\"positive\": false, \"description\": \"no obvious lesion\"}\n}\n"
            "Please make judgments based on the image content and do not fully rely on the provided diagnosis."
        )
        prompt = prompt_cn  # 可根据需要切换为 prompt_en
        
        # 准备 conversation
        conversation = [
            {
                "role": "User",
                "content": f"<image_placeholder>\n{prompt}",
                "images": [region['save_path'] for region in processed_regions],
            },
            {"role": "Assistant", "content": ""}
        ]
        
        # 加载图片并处理输入
        pil_images = load_pil_images(conversation)
        prepare_inputs = self.vl_chat_processor(
            conversations=conversation, images=pil_images, force_batchify=True
        ).to(self.vl_gpt.device)
        
        # 生成嵌入
        inputs_embeds = self.vl_gpt.prepare_inputs_embeds(**prepare_inputs)
        
        # 生成响应
        outputs = self.vl_gpt.language_model.generate(
            inputs_embeds=inputs_embeds,
            attention_mask=prepare_inputs.attention_mask,
            pad_token_id=self.tokenizer.eos_token_id,
            bos_token_id=self.tokenizer.bos_token_id,
            eos_token_id=self.tokenizer.eos_token_id,
            max_new_tokens=512,
            do_sample=False,
            use_cache=True,
        )
        
        answer = self.tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True)
        return answer

def parse_analysis_result(answer, regions):
    """
    解析模型输出的分析结果。
    
    Args:
        answer (str): 模型生成的 JSON 字符串
        regions (dict or list): 从 extract_regions_from_mask 返回的 regions 数据，
                               或已经预处理过的 regions 列表
    
    Returns:
        list: 解析后的结果列表
    """
    # 如果 regions 是字典，先预处理
    if isinstance(regions, dict):
        if 'region_record' in regions:
            processed_regions = regions['region_record']
        else:
            processed_regions = []
    else:
        processed_regions = regions

    try:
        result_dict = json.loads(answer)
        parsed_results = []
        for i, region in enumerate(processed_regions):
            key = f"region_{i}"
            if key in result_dict:
                parsed_results.append({
                    'bbox': region['bbox'],
                    'positive': result_dict[key].get('positive', False),
                    'description': result_dict[key].get('description', '')
                })
            else:
                parsed_results.append({
                    'bbox': region['bbox'],
                    'positive': False,
                    'description': '未找到分析结果'
                })
        return parsed_results
    except json.JSONDecodeError:
        print("Failed to parse JSON, returning default results")
        return [{'bbox': r['bbox'], 'positive': False, 'description': ''} for r in processed_regions]

# Example usage integration with process_predict
def process_predict(model,
                    predict_loader,
                    device,
                    predict_visual_dir,
                    save_visual=True,
                    record_json_save_path='./experiments/region_record.json',
                    original_data_root='../../Dataset/public_dataset',
                    model_path='./pretrained/Janus-Pro-7B',
                    med_info=None):
    """
    处理一个 batch 的数据，生成 Grad-CAM 可视化和提取区域，并进行多模态分析。

    参数：
        model: 模型
        predict_loader: 数据加载器
        device: 设备
        predict_visual_dir: 可视化保存目录
        save_visual: 是否保存可视化结果
        record_json_save_path: 区域记录保存路径
        original_data_root: 原始数据根目录
        model_path: 多模态模型路径
        med_info: 临床描述字典
    """
    if med_info is None:
        med_info = {
            "mild Diabetic Retinopathy": "眼底照片上可能有少量微动脉瘤（小的红色圆点），以及少量的出血（红色或暗红色的点状区域）或渗出（黄色或白色的脂质沉积物）。",
            "moderate Diabetic Retinopathy": "眼底照片上可见较多的微动脉瘤、出血（红色或暗红色的点状或斑状区域）、渗出（黄色或白色的脂质沉积物），以及静脉串珠样改变（静脉局部扩张或扭曲）。",
            "severe Diabetic Retinopathy": "眼底照片上可见广泛的出血（红色或暗红色的点状或斑状区域）、渗出（黄色或白色的脂质沉积物），明显的静脉串珠样改变（静脉局部扩张或扭曲），以及视网膜内微血管异常（异常血管结构）。",
            "proliferative Diabetic Retinopathy": "眼底照片上可见新生血管（红色或粉红色的血管团），可能伴有玻璃体积血（暗红色血块），以及牵拉性视网膜脱离（视网膜抬高或扭曲）。",
            "Glaucoma": "眼底照片上可见视盘凹陷增大，视网膜神经纤维层缺损（视盘周围的白色区域减少）。"
        }

    region_record = {}
    analyzer = MultimodalAnalyzer(model_path=model_path, med_info=med_info)
    diagnosis_mapping = {
        1: "mild Diabetic Retinopathy",
        2: "moderate Diabetic Retinopathy",
        3: "severe Diabetic Retinopathy",
        4: "proliferative Diabetic Retinopathy"
    }

    for batch in predict_loader:
        imgs, labels_dict = batch
        labels = labels_dict['label']
        image_paths = labels_dict['image_path']
        crop_infos = labels_dict['crop_info']
        original_rel_paths = labels_dict['original_rel_path']
        imgs = imgs.to(device)
        labels = labels.to(device)

        with torch.no_grad():
            logits = model(imgs)
            preds = torch.argmax(logits, dim=1)

        for i, (img, label, pred, img_path, original_rel_path, crop_info) in enumerate(zip(imgs, labels, preds, image_paths, original_rel_paths, crop_infos)):
            if label > 0 and pred > 0:
                # Get feature maps and gradients
                feature_maps, gradients = get_feature_maps_and_grads(
                    model, img.unsqueeze(0), label.item())
                cam = compute_gradcam(feature_maps, gradients)
                img_original = cv2.imread(img_path)

                # Generate visualization and mask
                cam_img, mask = generate_visualization(img_original, cam)

                # Save visualization if enabled
                if save_visual:
                    save_path = os.path.join(
                        predict_visual_dir, 'gradcam', f"pred_{pred.item()}_label_{label.item()}_{os.path.basename(img_path)}")
                    os.makedirs(os.path.dirname(save_path), exist_ok=True)
                    cv2.imwrite(save_path, cam_img)

                # Extract regions from mask
                regions = extract_regions_from_mask(mask, os.path.join(
                    original_data_root, original_rel_path), crop_info)
                
                # Perform multimodal analysis
                diagnosis = diagnosis_mapping.get(label.item(), "未知")
                analysis_result = analyzer.analyze_regions(regions, diagnosis, is_ground_truth=True)
                parsed_result = parse_analysis_result(analysis_result, regions)

                # Add analysis results to region record
                region_record_entry = {
                    'region_dir': regions['region_dir'],
                    'region_record': regions['region_record'],
                    'analysis': parsed_result
                }
                region_record[os.path.basename(img_path)] = region_record_entry

    with open(record_json_save_path, 'w') as f:
        json.dump(region_record, f, indent=4)