| import os |
| import sys |
| import argparse |
| from pathlib import Path |
| from PIL import Image |
| from typing import Any |
| import torch |
| import torchvision.transforms as T |
| from datasets import load_dataset |
| sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) |
| os.environ["GRADIO_TEMP_DIR"] = "./tmp" |
| from jodi_pipeline import JodiPipeline |
| from model.postprocess import ( |
| ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, |
| NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, |
| ) |
| from transformers import ( |
| Qwen2VLForConditionalGeneration, |
| Qwen2_5_VLForConditionalGeneration, |
| Qwen3VLForConditionalGeneration, |
| Qwen3VLMoeForConditionalGeneration |
| ) |
| from transformers import AutoProcessor, Trainer |
| from pathlib import Path |
| import itertools |
| import ast |
| import re |
| from PIL import Image |
| import json |
| def clean_question(q: str) -> str: |
| if not isinstance(q, str): |
| q = str(q) |
| |
| |
| q = re.sub(r"\s+", " ", q).strip() |
| return q |
| def dump_image(image, save_root): |
| os.makedirs(save_root, exist_ok=True) |
| save_path = os.path.join(save_root, "input.jpg") |
| image.convert("RGB").save(save_path, format="JPEG", quality=95) |
| return save_path |
|
|
| def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): |
| """ 将多个图像拼接成一张大图并保存。 |
| Args: image_paths: List[str] 图像路径列表 |
| save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) |
| image_format: 保存格式 |
| """ |
| from PIL import Image |
| import io |
| |
| images = [Image.open(p).convert("RGB") for p in image_paths] |
|
|
| if images_per_row is None: |
| images_per_row = len(images) |
|
|
| |
| target_size = min(1024, images[0].size[0]) |
| images = [img.resize((target_size, target_size)) for img in images] |
|
|
| |
| widths, heights = zip(*(img.size for img in images)) |
| max_width = max(widths) |
| rows = (len(images) + images_per_row - 1) // images_per_row |
| total_height = sum(heights[:images_per_row]) * rows |
|
|
| new_im = Image.new("RGB", (max_width * images_per_row, total_height)) |
| y_offset = 0 |
| for i in range(0, len(images), images_per_row): |
| row_imgs = images[i:i + images_per_row] |
| x_offset = 0 |
| for img in row_imgs: |
| new_im.paste(img, (x_offset, y_offset)) |
| x_offset += max_width |
| y_offset += heights[0] |
|
|
| os.makedirs(os.path.dirname(save_path), exist_ok=True) |
| new_im.save(save_path, format=image_format.upper()) |
| print(f"🧩 Saved merged image → {save_path}") |
| return save_path |
|
|
|
|
| def build_vqa_message(root, prompt, question): |
| """ |
| Build Qwen3-VL message for multimodal or single-image VQA. |
| Now explicitly tags each modality image before feeding into Qwen3-VL, |
| so that the model can distinguish RGB, edge, depth, normal, etc. |
| """ |
|
|
| root_path = Path(root) |
|
|
| |
| if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]: |
| image_path = str(root) |
| messages = [ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "image", "image": image_path}, |
| {"type": "text", "text": f"Answer the follow question:{question} based on the <image>."}, |
| ], |
| } |
| ] |
| return messages |
|
|
| |
| modality_names = [ |
| "image", |
| "annotation_lineart", |
| "annotation_edge", |
| "annotation_depth", |
| "annotation_normal", |
| "annotation_albedo", |
| "annotation_seg_12colors", |
| |
| ] |
|
|
| |
| available = [] |
| for name in modality_names: |
| for ext in [".png", ".jpg", ".jpeg"]: |
| path = Path(root) / f"{name}{ext}" |
| if path.exists(): |
| available.append((name, str(path))) |
| break |
|
|
|
|
|
|
| |
| readable_map = { |
| "image": "RGB image", |
| "annotation_lineart": "line drawing", |
| "annotation_edge": "edge map", |
| "annotation_depth": "depth map", |
| "annotation_normal": "normal map", |
| "annotation_albedo": "albedo map", |
| "annotation_seg_12colors": "segmentation map", |
| |
| } |
|
|
| present_modalities = [readable_map[n] for n, _ in available] |
|
|
| |
| text_prompt = ( |
| f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " |
| f"The **RGB image** is the primary and most reliable modality that truly represents the scene. " |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| f"Please answer the following question using visual reasoning primarily grounded in the RGB image, " |
| |
| |
| f"Question: \"{question}\"\n" |
| ) |
|
|
| |
| content = [] |
| print(f'available:{available}') |
| for name, path in available: |
| readable = readable_map.get(name, "visual input") |
| |
| content.append({"type": "text", "text": f"This is the {readable}."}) |
| content.append({"type": "image", "image": path}) |
|
|
| |
| content.append({"type": "text", "text": text_prompt}) |
|
|
| messages = [{"role": "user", "content": content}] |
| return messages |
|
|
|
|
|
|
|
|
| def build_multimodal_message(root, coarse_caption="a generic scene", feedback=""): |
| """ |
| Build Qwen3-VL message for multi-modal caption refinement. |
| Explicitly binds each image to its modality name (RGB, edge, depth, etc.) |
| so Qwen3-VL can reason over them correctly and refine the caption faithfully. |
| """ |
|
|
| modality_names = [ |
| "image", |
| "annotation_lineart", |
| "annotation_edge", |
| "annotation_depth", |
| "annotation_normal", |
| "annotation_albedo", |
| "annotation_seg_12colors", |
| |
| ] |
|
|
| |
| available = [] |
| for name in modality_names: |
| for ext in [".png", ".jpg", ".jpeg"]: |
| path = Path(root) / f"{name}{ext}" |
| if path.exists(): |
| available.append((name, str(path))) |
| break |
|
|
| |
| readable_map = { |
| "image": "RGB image", |
| "annotation_lineart": "line drawing", |
| "annotation_edge": "edge map", |
| "annotation_depth": "depth map", |
| "annotation_normal": "normal map", |
| "annotation_albedo": "albedo map", |
| "annotation_seg_12colors": "segmentation map", |
| |
| } |
|
|
| present_modalities = [readable_map[n] for n, _ in available] |
|
|
| |
| text_prompt = ( |
| f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " |
| f"The **RGB image** is the primary modality that provides the most reliable view of the scene. " |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| f"### Your Task:\n" |
| f"Refine the coarse caption into a more accurate, realistic, and visually grounded description " |
| f"of the scene, integrating information from all available modalities.\n\n" |
| f"### Rules:\n" |
| f"1. Describe only what is visible in the images — do NOT hallucinate.\n" |
| |
| f"3. Incorporate the following feedback into your refinement: '{feedback}'\n" |
| f"4. Focus on correcting inaccuracies or missing details from the coarse caption.\n\n" |
| f"### Coarse Caption:\n'{coarse_caption}'\n\n" |
| f"Now refine the caption according to the multimodal evidence below." |
| ) |
|
|
| text_prompt0 = ( |
| f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " |
| f"The **RGB image** provides the most accurate and realistic appearance of the scene, " |
| f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n" |
| f"### Your Task:\n" |
| f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. " |
| f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n" |
| f"### Guidelines:\n" |
| f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n" |
| f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n" |
| f"3. Do NOT invent or assume anything not visually supported.\n" |
| f"4. Avoid including any additional commentary or evaluations.\n" |
| f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n" |
| f"### Coarse Caption:\n'{coarse_caption}'\n\n" |
| f"### Feedback to Incorporate:\n'{feedback}'\n\n" |
| f"Now produce the final refined caption describing the scene based on the multimodal evidence below." |
| ) |
|
|
|
|
| |
| content = [] |
| for name, path in available: |
| readable = readable_map.get(name, "visual input") |
| content.append({ |
| "type": "text", |
| "text": f"This is the {readable}, which provides {get_modality_description(name)}." |
| }) |
| content.append({"type": "image", "image": path}) |
|
|
| |
| content.append({"type": "text", "text": text_prompt}) |
|
|
| messages = [{"role": "user", "content": content}] |
| return messages |
|
|
|
|
| def get_modality_description(name: str) -> str: |
| """为每个模态生成一句说明,用于提示模型理解模态功能""" |
| desc_map = { |
| "image": "the main visual appearance of the scene, including color, texture, and lighting", |
| "annotation_lineart": "structural outlines, object contours, and fine geometry", |
| "annotation_edge": "strong boundaries and contrast edges between objects", |
| "annotation_depth": "distance and perspective information for spatial understanding", |
| "annotation_normal": "surface orientation and geometric curvature cues", |
| "annotation_albedo": "pure surface color without lighting or shading effects", |
| "annotation_seg_12colors": "semantic regions and object categories", |
| "annotation_openpose": "human body keypoints, joints, and orientation", |
| } |
| return desc_map.get(name, "complementary visual evidence") |
|
|
|
|
|
|
|
|
| |
| |
| |
| def get_parser(): |
| parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") |
| parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', |
| help="Path to model checkpoint.") |
| parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") |
| parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', |
| help="Path to model checkpoint.") |
| parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', |
| help="Path to model checkpoint.") |
| parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", |
| help="Prompt text for generation.") |
| parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", |
| help="Optional negative prompt.") |
| parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", |
| help="Prompt text for generation.") |
| parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") |
| parser.add_argument("--question", type=str, default="how many cars in this image?", |
| help="Optional negative prompt.") |
| parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") |
| parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") |
| parser.add_argument("--guidance_scale", type=float, default=4.5) |
| parser.add_argument("--seed", type=int, default=42) |
| parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.") |
| return parser |
|
|
|
|
| |
| |
| |
|
|
|
|
| @torch.inference_mode() |
| def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300): |
| messages = [ |
| { |
| "role": "user", |
| "content": [ |
| { |
| "type": "image", |
| "image": image_path, |
| }, |
| {"type": "text", "text": f"Answer the follow question:{question} based on the <image>."}, |
| ], |
| } |
| ] |
|
|
| print(messages) |
|
|
| inputs = processor.apply_chat_template( |
| messages, |
| tokenize=True, |
| add_generation_prompt=True, |
| return_dict=True, |
| return_tensors="pt" |
| ) |
| inputs = inputs.to(model.device) |
|
|
| |
| generated_ids = model.generate(**inputs, max_new_tokens=max_length) |
| generated_ids_trimmed = [ |
| out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) |
| ] |
| output_text = processor.batch_decode( |
| generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False |
| ) |
| print(output_text) |
|
|
| os.makedirs(args.output_dir, exist_ok=True) |
| save_dir = Path(args.output_dir) / str(vqa_id) |
| save_dir.mkdir(parents=True, exist_ok=True) |
| caption_path = Path(save_dir) / f"caption.txt" |
| with open(caption_path, "w", encoding="utf-8") as f: |
| f.write(output_text[0].strip()) |
|
|
| return output_text[0] |
|
|
|
|
| @torch.inference_mode() |
| def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): |
| messages = [ |
| { |
| "role": "user", |
| "content": [ |
| { |
| "type": "image", |
| "image": image_path, |
| }, |
| {"type": "text", "text": f"Describe this image."}, |
| ], |
| } |
| ] |
|
|
| inputs = processor.apply_chat_template( |
| messages, |
| tokenize=True, |
| add_generation_prompt=True, return_dict=True, return_tensors="pt" |
| ) |
| inputs = inputs.to(model.device) |
|
|
| |
| generated_ids = model.generate(**inputs, max_new_tokens=max_length) |
| generated_ids_trimmed = [ |
| out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) |
| ] |
| output_text = processor.batch_decode( |
| generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False |
| ) |
| print(output_text) |
|
|
| os.makedirs(args.output_dir, exist_ok=True) |
| save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" |
| save_dir.mkdir(parents=True, exist_ok=True) |
| caption_path = Path(save_dir) / f"caption.txt" |
| with open(caption_path, "w", encoding="utf-8") as f: |
| f.write(output_text[0].strip()) |
|
|
| return output_text[0] |
|
|
|
|
| @torch.inference_mode() |
| def evaluate_consistency(image_path, model, processor, caption, max_length=256): |
|
|
| |
| eval_prompt = f""" |
| You are an image-text alignment evaluator. |
| Given one RGB image and a description, score how well the text matches |
| the visual evidence in the image. Then provide one short feedback |
| sentence suggesting how to make the description better aligned. |
| |
| Return JSON strictly: |
| {{"Consistency": <float 0-1>, "Feedback": "<sentence>"}} |
| |
| Description: "{caption}" |
| <image> |
| """ |
|
|
| messages = [ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "image", "image": image_path}, |
| {"type": "text", "text": eval_prompt}, |
| ], |
| } |
| ] |
|
|
| |
| inputs = processor.apply_chat_template( |
| messages, |
| tokenize=True, |
| add_generation_prompt=True, |
| return_dict=True, |
| return_tensors="pt" |
| ).to(model.device) |
|
|
| out_ids = model.generate(**inputs, max_new_tokens=max_length) |
| out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] |
| text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] |
|
|
| |
| try: |
| data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) |
| score = float(data.get("Consistency", 0)) |
| feedback = data.get("Feedback", "") |
| except Exception: |
| score, feedback = 0.0, text.strip() |
|
|
| print(f"🧮 [Image Consistency] {score:.3f} | Feedback: {feedback}") |
| return score, feedback |
|
|
|
|
| @torch.inference_mode() |
| def text_refine(root, model, processor, prompt, feedback, iter_num, vqa_id, max_length=300): |
| messages = build_multimodal_message(root, prompt, feedback) |
| inputs = processor.apply_chat_template( |
| messages, |
| tokenize=True, |
| add_generation_prompt=True, |
| return_dict=True, |
| return_tensors="pt" |
| ) |
| inputs = inputs.to(model.device) |
|
|
| |
| generated_ids = model.generate(**inputs, max_new_tokens=max_length) |
| generated_ids_trimmed = [ |
| out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) |
| ] |
| output_text = processor.batch_decode( |
| generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False |
| ) |
| print(output_text) |
|
|
| os.makedirs(args.output_dir, exist_ok=True) |
| save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" |
| save_dir.mkdir(parents=True, exist_ok=True) |
| caption_path = Path(save_dir) / f"caption.txt" |
| with open(caption_path, "w", encoding="utf-8") as f: |
| f.write(output_text[0].strip()) |
| return output_text[0] |
|
|
| @torch.inference_mode() |
| def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300): |
| messages = build_vqa_message(root, prompt, question) |
| print(messages) |
| inputs = processor.apply_chat_template( |
| messages, |
| tokenize=True, |
| add_generation_prompt=True, |
| return_dict=True, |
| return_tensors="pt" |
| ) |
| inputs = inputs.to(model.device) |
| generated_ids = model.generate(**inputs, max_new_tokens=max_length) |
| generated_ids_trimmed = [ |
| out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] |
| output_text = processor.batch_decode( |
| generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False |
| ) |
| print(output_text) |
| os.makedirs(args.output_dir, exist_ok=True) |
| save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' /'vqa_answer' |
| save_dir.mkdir(parents=True, exist_ok=True) |
| caption_path = Path(save_dir) / f"caption.txt" |
| with open(caption_path, "w", encoding="utf-8") as f: |
| f.write(output_text[0].strip()) |
| return output_text[0] |
|
|
| @torch.inference_mode() |
| def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): |
| |
| outputs = pipe( |
| images=images, |
| role=role, |
| prompt=prompt, |
| negative_prompt=args.negative_prompt, |
| height=height, |
| width=width, |
| num_inference_steps=args.steps, |
| guidance_scale=args.guidance_scale, |
| num_images_per_prompt=1, |
| generator=generator, |
| task='t2i' |
| ) |
|
|
| |
| results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] |
| results = torch.stack(results, dim=1).reshape(-1, 3, height, width) |
| results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] |
|
|
| |
| |
| |
| os.makedirs(args.output_dir, exist_ok=True) |
| save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" |
| save_dir.mkdir(parents=True, exist_ok=True) |
| for idx, img in enumerate(results): |
| name = modality_names[idx] |
| save_path = save_dir / f"{name}.png" |
| img.save(save_path) |
| print(f"💾 Saved {name} → {save_path}") |
|
|
|
|
| merged_path = save_dir / f"merged_iteration_{iter_num}.png" |
| concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) |
| print(f"\n✅ All results saved in: {save_dir}\n") |
| return save_dir |
|
|
| if __name__ == "__main__": |
| args = get_parser().parse_args() |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| print(f"✅ Using device: {device}") |
|
|
| processor = AutoProcessor.from_pretrained( |
| args.model_name_or_path, |
| ) |
|
|
| model = Qwen3VLForConditionalGeneration.from_pretrained( |
| args.text_model_path, |
| attn_implementation="flash_attention_2", |
| dtype=(torch.bfloat16), |
| ).to(device) |
|
|
| pipe = JodiPipeline(args.config) |
| pipe.from_pretrained(args.model_path) |
|
|
| modality_names = [ |
| "image", |
| "annotation_lineart", |
| "annotation_edge", |
| "annotation_depth", |
| "annotation_normal", |
| "annotation_albedo", |
| "annotation_seg_12colors", |
| "annotation_openpose", |
| ] |
|
|
| |
| post_processors: list[Any] = [ImagePostProcessor()] |
| for condition in pipe.config.conditions: |
| if condition == "lineart": |
| post_processors.append(LineartPostProcessor()) |
| elif condition == "edge": |
| post_processors.append(EdgePostProcessor()) |
| elif condition == "depth": |
| post_processors.append(DepthPostProcessor()) |
| elif condition == "normal": |
| post_processors.append(NormalPostProcessor()) |
| elif condition == "albedo": |
| post_processors.append(AlbedoPostProcessor()) |
| elif condition == "segmentation": |
| post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) |
| elif condition == "openpose": |
| post_processors.append(OpenposePostProcessor()) |
| else: |
| print(f"⚠️ Warning: Unknown condition: {condition}") |
| post_processors.append(ImagePostProcessor()) |
|
|
| torch.manual_seed(args.seed) |
| generator = torch.Generator(device=device).manual_seed(args.seed) |
|
|
| with open(args.json, "r", encoding="utf-8") as f: |
| annotations = json.load(f) |
|
|
| for sample in annotations[:153]: |
| image_path = os.path.join(args.data_path, sample["image"]) |
| image_id = sample["image"].split('.')[0] |
| image = Image.open(image_path) |
| question = sample["question"] |
|
|
| control_images = [image.convert('RGB')] + [None] * pipe.num_conditions |
|
|
| role = [1] + [0] * pipe.num_conditions |
| print(role) |
|
|
| best_dir, best_caption, best_score = '', '', 0.0 |
| max_length = 1024 |
|
|
| |
| width, height = image.size |
| print(f'ori width:{width}', f'ori height:{height}') |
|
|
| prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) |
| _ = vqa_i2t(model, processor, image_path, question, 100, max_length) |
| score, feedback = evaluate_consistency(image_path, model, processor, prompt) |
|
|
| if score >= best_score: |
| best_caption, best_score = prompt, score |
| best_dir = image_path |
|
|
| for step in range(1, args.iters): |
| generator = torch.Generator(device=device).manual_seed(args.seed) |
| save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, |
| image_id) |
| max_length += 100 |
| prompt = text_refine(save_dir, model, processor, prompt, feedback, step, image_id, max_length) |
| result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length) |
| score, feedback = evaluate_consistency(image_path, model, processor, prompt) |
|
|
| if score >= best_score: |
| best_caption, best_score = prompt, score |
| best_dir = save_dir |
|
|
| result = vqa(best_dir, model, processor, best_caption, question, image_id, 'best', max_length) |
| print(f'result:{result}') |
|
|