import os
import json
import argparse
from collections import defaultdict

def load_image_ids(file_path):
    """加载每个 split 的图片名（不带路径）"""
    with open(file_path, "r", encoding="utf8") as f:
        return set(line.strip() for line in f)

def main(args):
    IMG_DIR = args.image_dir
    TEXT_DIR = args.text_dir
    TAR_DIR = args.tar_dir
    DATASET_NAME = args.dataset_name
    INSTRUCTION = args.instruction

    os.makedirs(TAR_DIR, exist_ok=True)

    # 自动拼接文本文件路径
    token_file = os.path.join(TEXT_DIR, "Flickr8k.token.txt")
    train_file = os.path.join(TEXT_DIR, "Flickr_8k.trainImages.txt")
    val_file = os.path.join(TEXT_DIR, "Flickr_8k.devImages.txt")
    test_file = os.path.join(TEXT_DIR, "Flickr_8k.testImages.txt")

    # 加载 split
    splits = {
        "train": load_image_ids(train_file),
        "val": load_image_ids(val_file),
        "test": load_image_ids(test_file)
    }

    # 多模态图片占位符
    image_token = "<image>"

    # 处理 token 文件，分配到不同 split
    data_dict = defaultdict(list)
    with open(token_file, "r", encoding="utf8") as f:
        for line in f:
            img_tag, caption = line.strip().split('\t')
            img_name = img_tag.split('#')[0]
            img_path = os.path.join(IMG_DIR, img_name)
            if not os.path.isfile(img_path):
                continue
            # 判断属于哪个 split
            split_found = None
            for split, id_set in splits.items():
                if img_name in id_set:
                    split_found = split
                    break
            if not split_found:
                continue  # 图片没分到任何 split

            # instruction 前加图片 token
            instruction_with_image = f"{image_token}{INSTRUCTION}"

            item = {
                "instruction": instruction_with_image,
                "input": "",
                "output": caption,
                "images": [img_path]
            }
            data_dict[split_found].append(item)

    # 写出各 split 的 data.json
    for split, data_list in data_dict.items():
        data_json_path = os.path.join(TAR_DIR, f"{split}_data.json")
        with open(data_json_path, "w", encoding="utf8") as f:
            json.dump(data_list, f, ensure_ascii=False, indent=2)
        print(f"{split} 集合已生成 {data_json_path}，共 {len(data_list)} 条多模态数据。")

    # 生成 dataset_info.json
    dataset_info = {}
    for split in data_dict:
        dataset_info[f"{DATASET_NAME}_{split}"] = {
            "file_name": f"{split}_data.json",
            "columns": {
                "prompt": "instruction",
                "query": "input",
                "response": "output",
                "images": "images"
            }
        }
    dataset_info_path = os.path.join(TAR_DIR, "dataset_info.json")
    with open(dataset_info_path, "w", encoding="utf8") as f:
        json.dump(dataset_info, f, ensure_ascii=False, indent=2)
    print(f"已生成 {dataset_info_path}")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Flickr8k多模态数据split自动处理，按train/val/test输出json")
    parser.add_argument('--image_dir', type=str, default="/home/ma-user/work/Dataset/Flickr8k_Dataset/images",
                    help="图片主目录，如 ~/work/Dataset/Flickr8k_Dataset/")
    parser.add_argument('--text_dir', type=str, default="/home/ma-user/work/Dataset/Flickr8k_Dataset/Flickr8k_text",
                        help="文本文件目录，如 ~/work/Dataset/Flickr8k_Dataset/Flickr8k_text/")
    parser.add_argument('--tar_dir', type=str, default="./experiments",
                        help="输出目录，将生成 train_data.json、val_data.json、test_data.json 和 dataset_info.json")

    parser.add_argument('--dataset_name', type=str, default="flickr8k_alpaca",
                        help="数据集名称，自动加 split 后缀")
    parser.add_argument('--instruction', type=str, default="Describe the content of the given image in one sentence.",
                        help="多模态指令内容")


    args = parser.parse_args()
    main(args)
