import os
import sys
import csv
import json
import argparse
import shutil
from tqdm import tqdm
from PIL import Image
from data_preprocess.utils.crop import crop_resize_save
from data_preprocess.utils.stat import save_classification_stats

import pandas as pd

def load_excel_labels(xlsx_path):
    """
    从 data.xlsx 读取诊断与病人信息，返回：
        { image_name: { diagnosis_text, age, sex } }
    """
    df = pd.read_excel(xlsx_path, engine='openpyxl')
    df = df.fillna("")  # 防止 NaN

    label_dict = {}

    def normalize_diag_text(diag: str) -> str:
        diag = diag.strip().lower()
        if not diag or "normal" in diag:
            return "normal"

        diag = diag.replace("，", ",").replace("fundus", "")
        parts = [p.strip() for p in diag.split(",") if p.strip()]
        parts = sorted(set(parts))
        if not parts:
            return "normal"
        return "，".join(parts)

    for _, row in df.iterrows():
        age = str(row.get('Patient Age', '')).strip()
        sex = str(row.get('Patient Sex', '')).strip()

        for eye in ['Left', 'Right']:
            diag_col = f"{eye}-Diagnostic Keywords"
            img_col = f"{eye}-Fundus"
            diag = str(row.get(diag_col, '')).strip()
            img_name = str(row.get(img_col, '')).strip()

            if not img_name:
                continue

            diag_text = normalize_diag_text(diag)

            label_dict[img_name] = {
                "diagnosis_text": diag_text,
                "age": age,
                "sex": sex
            }

    if not label_dict:
        raise ValueError(f"❌ 未从 {xlsx_path} 中成功读取任何标注，请检查表头格式。")

    print(f"✅ 从 {os.path.basename(xlsx_path)} 成功加载 {len(label_dict)} 条标签记录。")
    return label_dict


def gather_data(data_path, tar_path, prefix='odir'):
    """
    ODIR 数据集预处理
    """
    os.makedirs(tar_path, exist_ok=True)
    images_dir = os.path.join(tar_path, 'images')
    os.makedirs(images_dir, exist_ok=True)

    split_dict = {'train': [], 'test': []}
    data_dict = {}

    # 路径定义
    xlsx_path = os.path.join(data_path, "ODIR-5K", "ODIR-5K", "data.xlsx")


    preproc_dir = os.path.join(data_path, "preprocessed_images")
    train_dir = os.path.join(data_path, "ODIR-5K", "ODIR-5K", "Training Images")
    test_dir = os.path.join(data_path, "ODIR-5K", "ODIR-5K", "Testing Images")

    # 加载标签
    label_dict = load_excel_labels(xlsx_path)

    for split, src_dir in [('train', train_dir), ('test', test_dir)]:
        if not os.path.exists(src_dir):
            print(f"⚠️ 跳过 {split} 因为路径不存在: {src_dir}")
            continue
        image_list = sorted(os.listdir(src_dir))
        for image_name in tqdm(image_list, desc=f"处理 {split} 图片", unit="images"):
            if not image_name.lower().endswith(('.jpg', '.png', '.jpeg')):
                continue

            # 找到源文件：优先 preprocessed
            src_preproc = os.path.join(preproc_dir, image_name)
            src_original = os.path.join(src_dir, image_name)
            if os.path.exists(src_preproc):
                src_path = src_preproc
                with Image.open(src_path) as img:
                    w, h = img.size
                if (w, h) != (512, 512):
                    raise ValueError(f"{src_path} 尺寸错误，应为512×512，实际 {w}×{h}")
                crop_info = {
                    "crop_box": [0, 512, 0, 512],
                    "original_size": [512, 512],
                    "cropped_size": [512, 512]
                }
                # 直接拷贝
                new_name = f"{prefix}_{os.path.splitext(image_name)[0]}.png"
                dst_path = os.path.join(images_dir, new_name)
                shutil.copy(src_path, dst_path)
            else:
                # raise ValueError(f"{image_name},不存在{src_preproc}")
                # 从原始训练/测试集读取并裁剪
                if not os.path.exists(src_original):
                    raise FileNotFoundError(f"图片不存在: {image_name}")
                
                new_name = f"{prefix}_{os.path.splitext(image_name)[0]}.png"
                
                dst_path = os.path.join(images_dir, new_name)
                crop_info = crop_resize_save(
                    image_path=src_original,
                    save_path=dst_path,
                    resize=(512, 512),
                    crop_threshold=25
                )
                src_path = src_original

            # 查找标签
            if image_name not in label_dict:
                entry = {
                    "image_name": new_name,
                    "image_path": dst_path,
                    "original_path": src_path,
                    "crop_info": crop_info,
        
                }
                data_dict[new_name] = entry
                split_dict[split].append(new_name)
                continue

            label_info = label_dict[image_name]
            diag_text = label_info["diagnosis_text"]
            if diag_text == "" or diag_text.lower() == "normal fundus":
                diag_text = "normal"

            # 构建数据项
            entry = {
                "image_name": new_name,
                "image_path": dst_path,
                "original_path": src_path,
                "crop_info": crop_info,
                "patient_info": {
                    "age": label_info["age"],
                    "sex": label_info["sex"]
                },
                "diagnosis": {
                    "classification": {"text": diag_text}
                }
            }

            data_dict[new_name] = entry
            split_dict[split].append(new_name)

    # 保存 split 和 annotations
    with open(os.path.join(tar_path, "split.json"), "w", encoding="utf-8") as f:
        json.dump(split_dict, f, indent=4)

    with open(os.path.join(tar_path, "annotations.json"), "w", encoding="utf-8") as f:
        json.dump(data_dict, f, indent=4)

    os.makedirs('./experiments/stat', exist_ok=True)
    save_classification_stats(data_dict, os.path.join('./experiments/stat', f"{prefix}.json"))

    print(f"✅ 完成 ODIR 数据集预处理: 训练 {len(split_dict['train'])} 张, 测试 {len(split_dict['test'])} 张.")
    return data_dict


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="ODIR 数据集预处理")
    parser.add_argument("--data_path", type=str,
                        default="/home/zhangpinglu/data0/gy/Dataset/public_dataset/ODIR",
                        help="原始 ODIR 数据集路径，包含 full_df.csv, ODIR-5K/ODIR-5K/")
    parser.add_argument("--tar_path", type=str,
                        default="/home/zhangpinglu/data0/gy/Dataset/public_processed/ODIR",
                        help="预处理结果保存目录")
    parser.add_argument("--prefix", type=str, default="odir",
                        help="输出图片前缀名")
    args = parser.parse_args()

    gather_data(args.data_path, args.tar_path, prefix=args.prefix)
