import sys, os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
import json
from preprossData.utils.crop_padding import crop_resize_save
from tqdm import tqdm
def process_diagnosis_text(text):
    # 定义简称到全称的映射
    abbreviation_map = {
        "BRAO": "Branch Retinal Artery Occlusion",
        "CRAO": "Cilio-Retinal Artery Occlusion",
        "BRVO": "Branch Retinal Vein Occlusion",
        "CRVO": "Central Retinal Vein Occlusion",
        "Hemi-CRVO": "Hemi-Central Retinal Vein Occlusion",
        "BDR/NPDR": "Background Diabetic Retinopathy",
        "PDR": "Proliferative Diabetic Retinopathy",
        "ASR": "Arteriosclerotic Retinopathy",
        "HTR": "Hypertensive Retinopathy",
        "CNV": "Choroidal Neovascularization",
        "Emboli": "Hollenhorst Emboli",
        "RPED": "Retinal Pigment Epithelial Detachment",
        "RPEopathy": "Retinal Pigment Epitheliopathy",
        "RPE": "Retinal Pigment Epithelium",
        "Coats'": "Coats' disease"
    }
    
    # 如果包含 "recommend" 或 "?"，返回空字符串
    if "recommend" in text.lower() or "?" in text:
        return ""
    
    # 替换简称
    for abbr, full in abbreviation_map.items():
        text = text.replace(abbr, full)
    
    # 替换 AND 为 ", "
    text = text.replace(" AND ", ", ")
    # 处理 & 符号的情况
    text = text.replace(" &", ", ")
    
    # 移除 OR 及其后的内容（如果有）
    if " OR " in text:
        text = text.split(" OR ")[0]
    
    # 删除双引号
    text = text.replace('"', '')
    
    return text.strip()

def convert_to_dict(file_path):
    """
    读取诊断文本文件，假设每行格式为：image_name <TAB> ... <TAB> diagnosis_text，
    取第一列作为图像名，最后一列作为诊断文本，返回一个字典。
    """
    result_dict = {}
    
    with open(file_path, 'r', encoding='utf-8') as file:
        for line in file:
            parts = line.strip().split('\t')
            if len(parts) >= 3:  # 确保有 image_name 和 diagnosis_text
                image_name = parts[0]
                diagnosis_text = parts[-1]
                processed_text = process_diagnosis_text(diagnosis_text)
                result_dict[image_name] = processed_text
    
    return result_dict

def gather_data(data_path, tar_path, pref='stare_'):
    """
    对新数据集进行预处理：
      - 数据集根目录下存在一个 all-images 文件夹，存放所有图像和诊断文本文件 diagnose_code.txt；
      - 将所有处理后的图像统一保存到 tar_path/images 下，
        新文件名称格式为 {prefix}_{原始文件名}.png（仅修改后缀并加上前缀，以保留原始文件名）；
      - 读取 diagnose_code.txt 文件生成诊断文本映射，并将每个图像的标注信息写入 annotations.json。
    
    参数：
        data_path (str): 新数据集根目录，要求其下存在 all-images 文件夹；
        tar_path (str): 预处理后数据存放目录；
        pref (str): 处理后图片名称的前缀，默认为 "stare_"
    
    返回：
        dict: 标注信息字典。
    """
    os.makedirs(tar_path, exist_ok=True)
    images_dir = os.path.join(tar_path, 'images')
    os.makedirs(images_dir, exist_ok=True)
    
    # all-images 文件夹中包含图像和 diagnose_code.txt 文件
    image_root = os.path.join(data_path, 'all-images')
    # 列出所有图像文件（按照名称排序）
    image_list = sorted([f for f in os.listdir(image_root) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.ppm'))])
    
    # 读取诊断文本文件
    diagnosis_txt_path = os.path.join(data_path, 'diagnose_code.txt')
    result_dict = convert_to_dict(diagnosis_txt_path)
    
    data_dict = {}
    for image_file in tqdm(image_list,desc='Prepocess Images',unit="images"):
        base_name = os.path.splitext(image_file)[0]
        new_image_name = f"{pref}_{base_name}.png"
        src_image_path = os.path.join(image_root, image_file)
        dest_image_path = os.path.join(images_dir, new_image_name)
        
        # 对图像进行裁剪和 resize，统一保存为 PNG 格式
        crop_info=crop_resize_save(
            image_path=src_image_path,
            save_path=dest_image_path,
            resize=(224, 224),
            crop_threshold=25
        )
        
        diag_text = result_dict.get(base_name, "")
        data_dict[new_image_name] = {
            "image_name": new_image_name,
            "image_path": os.path.join("images", new_image_name),
            'original_path': os.path.relpath(src_image_path, data_path), 
            'crop_info':crop_info,    
            "diagnosis": {
                "text": diag_text
            }
        }
    
    annotations_path = os.path.join(tar_path, "annotations.json")
    with open(annotations_path, "w", encoding="utf-8") as f:
        json.dump(data_dict, f, indent=4)
    
    return data_dict

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser(description="STARE 数据集预处理")
    parser.add_argument("--data_path", type=str, default="/mnt/e/Document_Workspace/Dataset/public_dataset/STARE",
                        help="新数据集根目录，要求其下包含 all-images 文件夹")
    parser.add_argument("--tar_path", type=str, default="/mnt/e/Document_Workspace/Dataset/processed224_new/STARE",
                        help="预处理后数据存放目录")
    parser.add_argument("--pref", type=str, default="stare_",
                        help="处理后图片名称的前缀，默认为 'stare_'")
    args = parser.parse_args()
    
    annotations = gather_data(args.data_path, args.tar_path, pref=args.pref)
    print("Preprocessing completed. Annotations saved.")
