import json
import os
import sys

# 获取当前脚本所在的目录
current_dir = os.path.dirname(os.path.abspath(__file__))
# 获取上一级目录
parent_dir = os.path.abspath(os.path.join(current_dir, '..'))
# 把上一级目录加入 sys.path
sys.path.append(parent_dir)
from tqdm import tqdm
from typing import List
from utils.med_prompt_constructor import PromptConstructor
import argparse

import random

from typing import List, Dict
from collections import defaultdict, Counter


def load_annotations(data_path: str, disease_map: Dict[str, List[str]]) -> List[dict]:
    """
    加载所有标注，并清洗（完成诊断英文到中文的映射），返回清洗过的数据列表。
    :param data_path: 原始数据目录
    :param disease_map: 英文诊断到中文标签的映射dict
    :return: 清洗后的标注列表，每条数据中 diagnosis.text_list 存中文诊断列表
    """
    # 1. 加载原始标注
    annotations = []
    cleaned_img_label=[] # 辅助数据集，记录image_path和cleaned_label
    if os.path.exists(os.path.join(data_path, 'annotations.json')):
        with open(os.path.join(data_path, 'annotations.json'), encoding='utf-8') as f:
            data = json.load(f)
            for item in data.values():
                item['image_path'] = os.path.abspath(
                    os.path.join(data_path, item['image_path']))
                annotations.append(item)
    else:
        for subdir in os.listdir(data_path):
            subdir_path = os.path.join(data_path, subdir)
            if os.path.isdir(subdir_path):
                ann_file = os.path.join(subdir_path, 'annotations.json')
                if os.path.exists(ann_file):
                    with open(ann_file, encoding='utf-8') as f:
                        sub_annotations = json.load(f)
                        for item in sub_annotations.values():
                            item['image_path'] = os.path.abspath(
                                os.path.join(subdir_path, item['image_path']))
                            annotations.append(item)

    # 2. 清洗诊断，诊断英文→中文、去除无效
    cleaned_data = []
    disease_key_to_images = defaultdict(set)
    diagnosis_check_stats = {
        "missing_field": 0,
        "empty_text": 0,
        "un_processed": defaultdict(int),
        "unmapped": defaultdict(int)
    }

    for record in annotations:
        # 1. 取原始英文诊断
        diagnosis = record.get("diagnosis", {}).get("text", None)
        if diagnosis is None:
            diagnosis_check_stats["missing_field"] += 1
            continue
        if not diagnosis.strip():
            diagnosis_check_stats["empty_text"] += 1
            continue

        # 2. 按逗号和and分割，转小写
        diseases = [d.strip().lower()
                    for d in diagnosis.split(",") if d.strip()]
        if not diseases:
            diagnosis_check_stats["empty_text"] += 1
            continue

        all_diseases = []
        for d in diseases:
            if " and " in d:
                parts = [part.strip()
                         for part in d.split("and") if part.strip()]
                all_diseases.extend(parts)
            else:
                all_diseases.append(d)

        # 3. 英文→中文映射
        mapped_diseases: List[str] = []
        for eng in all_diseases:
            cn_list = disease_map.get(eng, None)
            if cn_list is None:
                diagnosis_check_stats["unmapped"][eng] += 1
            elif len(cn_list) == 0:
                diagnosis_check_stats['un_processed'][eng] += 1
            else:
                mapped_diseases.extend(cn_list)

        if not mapped_diseases:
            continue

        unique_mapped = list(dict.fromkeys(mapped_diseases))
        record["diagnosis"]["text_list"] = unique_mapped
        cleaned_data.append(record)
        # disease_key 构造
        disease_key = '_'.join(unique_mapped)
        disease_key_to_images[disease_key].add(record["image_path"])
        cleaned_img_label.append({
            "image_path": record["image_path"],
            "cleaned_label": unique_mapped
        })
        
    # 3. 输出调试信息
    print(f"[检查报告] 缺失 diagnosis.text 字段：{diagnosis_check_stats['missing_field']} 条")
    print(f"[检查报告] diagnosis.text 为空或无有效疾病：{diagnosis_check_stats['empty_text']} 条")
    if diagnosis_check_stats["unmapped"]:
        print(f"[检查报告] 无法映射的英文标签共 {len(diagnosis_check_stats['unmapped'])} 种（按出现频次排序）：")
        for eng, cnt in sorted(diagnosis_check_stats["unmapped"].items(), key=lambda x: x[1], reverse=True):
            print(f"  - '{eng}'：{cnt} 次")
    if diagnosis_check_stats["un_processed"]:
        print(f"[检查报告] 故意留空处理的共 {len(diagnosis_check_stats['un_processed'])} 种（按出现频次排序）：")
        for eng, cnt in sorted(diagnosis_check_stats["un_processed"].items(), key=lambda x: x[1], reverse=True):
            print(f"  - '{eng}'：{cnt} 次")
    print(f"一共加载有效数据{len(cleaned_data)}条")

    return cleaned_data, disease_key_to_images , cleaned_img_label


def split_train_test(disease_key_to_images, test_ratio=0.1, seed=42):
    random.seed(seed)
    # 1. 汇总所有图片
    all_images = set()
    for imgs in disease_key_to_images.values():
        all_images.update(imgs)
    all_images = list(all_images)
    random.shuffle(all_images)
    n_total = len(all_images)
    n_test = max(1, int(n_total * test_ratio))

    # 2. test集先确保每个disease_key至少有1张
    test_images = set()
    for key, imgset in disease_key_to_images.items():
        if imgset:
            test_images.add(random.choice(list(imgset)))
    # 3. 若test比例还不够，再随机补齐
    if len(test_images) < n_test:
        remain = [img for img in all_images if img not in test_images]
        test_images.update(random.sample(remain, min(n_test-len(test_images), len(remain))))
    # 4. 剩下的都是train
    train_images = set(all_images) - test_images
    return train_images, test_images

def build_multimodal_dataset(data_path, disease_map_path, stage_map_path, save_json_dir, test_ratio=0.05):

    # 加载映射文件
    with open(disease_map_path, encoding='utf-8') as f:
        disease_map = json.load(f)
    with open(stage_map_path, encoding='utf-8') as f:
        stage_map_data = json.load(f)

    all_cn_diseases = [cn for cn_list in disease_map.values()
                       for cn in cn_list]

    # 初始化PromptConstructor
    prompt_constructor = PromptConstructor(
        all_cn_diseases, stage_map_data["diease_map"], stage_map_data["diease_stage"])

    # 1. 加载原始数据和disease_key->images映射
    annotations, disease_key_to_images,cleaned_support_data = load_annotations(data_path, disease_map)
    
    # 1.1 先保存辅助数据集
    support_data_path = os.path.join(save_json_dir, "cleaned_img_label.json")
    os.makedirs(save_json_dir, exist_ok=True)
    with open(support_data_path, 'w', encoding='utf-8') as f:
        json.dump(cleaned_support_data, f, ensure_ascii=False, indent=2)
    print(f"加载数据完成，共{len(annotations)}条标注数据")
    # 2. 数据集划分
    train_images, test_images = split_train_test(disease_key_to_images, test_ratio=test_ratio)
    print(f"划分后train images: {len(train_images)}, test images: {len(test_images)}")
    image_to_split = {}
    for img in train_images:
        image_to_split[img] = 'train'
    for img in test_images:
        image_to_split[img] = 'test'
    # 2.1 保存数据划分
    split_json_data={
        "train": list(train_images),
        "test": list(test_images),
    }
    with open(os.path.join(save_json_dir, "image_split.json"), 'w', encoding='utf-8') as f:
        json.dump(split_json_data, f, ensure_ascii=False, indent=2)
    
    # 3. 生成多模态数据，并分配到集合
    qtype_counter = Counter()
    multimodal_data_train, multimodal_data_test = [], []
    for item in tqdm(annotations, desc='生成多模态数据'):
        diagnosis_list = item["diagnosis"]["text_list"]
        all_prompts = prompt_constructor.construct_prompts(diagnosis_list)
        split = image_to_split.get(item["image_path"], 'train')  # 默认train
        for question, answer, q_type in all_prompts:
            qtype_counter[q_type] += 1
            multimodal_item = {
                "instruction": "<image>" + question,
                "input": "",
                "output": answer,
                "images": [item["image_path"]]
            }
            if split == 'train':
                multimodal_data_train.append(multimodal_item)
            else:
                multimodal_data_test.append(multimodal_item)

    print(f"train数据条数: {len(multimodal_data_train)}, test数据条数: {len(multimodal_data_test)}")

    print("各题型生成数量统计：")
    for k, v in qtype_counter.items():
        print(f"{k}: {v}")
    print(f"总题目数：{sum(qtype_counter.values())}")
    # 4. 保存
    os.makedirs(save_json_dir, exist_ok=True)
    with open(os.path.join(save_json_dir, "med_pub_train.json"), 'w', encoding='utf-8') as f:
        json.dump(multimodal_data_train, f, ensure_ascii=False, indent=2)
    with open(os.path.join(save_json_dir, "med_pub_test.json"), 'w', encoding='utf-8') as f:
        json.dump(multimodal_data_test, f, ensure_ascii=False, indent=2)

    # 5. 保存dataset_info.json
    data_info = {
        "med_pub_train": {
            "file_name": "med_pub_train.json",
            "columns": {
                "prompt": "instruction",
                "query": "input",
                "response": "output",
                "images": "images"
            }
        },
        "med_pub_test": {
            "file_name": "med_pub_test.json",
            "columns": {
                "prompt": "instruction",
                "query": "input",
                "response": "output",
                "images": "images"
            }
        }
    }
    with open(os.path.join(save_json_dir, "dataset_info.json"), 'w', encoding='utf-8') as f:
        json.dump(data_info, f, ensure_ascii=False, indent=2)
    print("数据划分及保存完毕")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="医学图像多模态数据生成")
    parser.add_argument("--data_path", type=str,
                        default="/home/guoyi/Dataset/public_processed", help="原始数据主目录")
    parser.add_argument("--disease_map_path", type=str,
                        default="./configs/public_dataset_aligner.json", help="疾病映射json")
    parser.add_argument("--stage_map_path", type=str,
                        default="./configs/diease_stage.json", help="疾病分期映射json")
    parser.add_argument("--save_json_dir", type=str,
                        default="./experiments/med_pub_data/", help="输出json文件路径")
    parser.add_argument("--test_ratio", type=float, default=0.1, help="test集比例")
    args = parser.parse_args()

    build_multimodal_dataset(
        data_path=args.data_path,
        disease_map_path=args.disease_map_path,
        stage_map_path=args.stage_map_path,
        save_json_dir=args.save_json_dir,
        test_ratio=args.test_ratio
    )