| import os |
| import sys |
| import argparse |
| from pathlib import Path |
| from PIL import Image |
| from typing import Any |
| import torch |
| import torchvision.transforms as T |
|
|
| sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) |
| os.environ["GRADIO_TEMP_DIR"] = "./tmp" |
|
|
| from jodi_pipeline import JodiPipeline |
| from model.postprocess import ( |
| ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, |
| NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, |
| ) |
| from transformers import ( |
| Qwen2VLForConditionalGeneration, |
| Qwen2_5_VLForConditionalGeneration, |
| Qwen3VLForConditionalGeneration, |
| Qwen3VLMoeForConditionalGeneration |
| ) |
| from transformers import AutoProcessor, Trainer |
| from pathlib import Path |
| import itertools |
|
|
| def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): |
| """ |
| 将多个图像拼接成一张大图并保存。 |
| Args: |
| image_paths: List[str] 图像路径列表 |
| save_path: 保存路径(包括文件名) |
| images_per_row: 每行图像数量(默认为全部在一行) |
| image_format: 保存格式 |
| """ |
| from PIL import Image |
| import io |
|
|
| |
| images = [Image.open(p).convert("RGB") for p in image_paths] |
|
|
| if images_per_row is None: |
| images_per_row = len(images) |
|
|
| |
| target_size = min(1024, images[0].size[0]) |
| images = [img.resize((target_size, target_size)) for img in images] |
|
|
| |
| widths, heights = zip(*(img.size for img in images)) |
| max_width = max(widths) |
| rows = (len(images) + images_per_row - 1) // images_per_row |
| total_height = sum(heights[:images_per_row]) * rows |
|
|
| new_im = Image.new("RGB", (max_width * images_per_row, total_height)) |
| y_offset = 0 |
| for i in range(0, len(images), images_per_row): |
| row_imgs = images[i:i+images_per_row] |
| x_offset = 0 |
| for img in row_imgs: |
| new_im.paste(img, (x_offset, y_offset)) |
| x_offset += max_width |
| y_offset += heights[0] |
|
|
| os.makedirs(os.path.dirname(save_path), exist_ok=True) |
| new_im.save(save_path, format=image_format.upper()) |
| print(f"🧩 Saved merged image → {save_path}") |
| return save_path |
|
|
| def build_init_message(image_paths, role): |
| """ |
| Build Qwen3-VL message for multi-modal image description. |
| - `image_paths`: list of image file paths in modality order. |
| - `role`: list[int] of 0/1, indicating which modalities are active. |
| - Includes per-modality visual descriptions. |
| - No coarse caption, fixed instruction: "Describe this image." |
| """ |
|
|
| modality_names = [ |
| "image", |
| "annotation_lineart", |
| "annotation_edge", |
| "annotation_depth", |
| "annotation_normal", |
| "annotation_albedo", |
| "annotation_seg_12colors", |
| "annotation_openpose", |
| ] |
|
|
| |
| if len(role) != len(modality_names): |
| raise ValueError(f"role length {len(role)} must match modality_names length {len(modality_names)}") |
| if len(image_paths) != sum(role): |
| raise ValueError(f"image_paths length {len(image_paths)} must match modality_names length {len(modality_names)}") |
|
|
| |
| modality_descriptions = { |
| "image": "provides color, texture, lighting, and overall visual appearance.", |
| "annotation_lineart": "reveals fine structural outlines, shapes, and proportions.", |
| "annotation_edge": "highlights boundaries and contours of objects.", |
| "annotation_depth": "shows spatial distance, perspective, and 3D geometry.", |
| "annotation_normal": "captures surface orientation and fine geometric curvature.", |
| "annotation_albedo": "shows intrinsic surface colors unaffected by lighting.", |
| "annotation_seg_12colors": "provides semantic regions and object boundaries.", |
| "annotation_openpose": "shows human body keypoints, orientation, and posture.", |
| } |
|
|
| readable_map = { |
| "image": "RGB image", |
| "annotation_lineart": "line drawing", |
| "annotation_edge": "edge map", |
| "annotation_depth": "depth map", |
| "annotation_normal": "normal map", |
| "annotation_albedo": "albedo map", |
| "annotation_seg_12colors": "segmentation map", |
| "annotation_openpose": "human pose map", |
| } |
|
|
| |
| selected_modalities = [m for m, r in zip(modality_names, role) if r == 1] |
| available = [str(Path(p)) for p in image_paths] |
|
|
| if not available: |
| raise FileNotFoundError("No valid modality images found in image_paths for selected roles.") |
|
|
| |
| modality_desc_text = " ".join( |
| [f"- The {readable_map[m]} {modality_descriptions[m]}" for m in selected_modalities] |
| ) |
|
|
| |
| text_prompt = ( |
| f"You are given multiple modalities of the same scene, including: " |
| f"{', '.join([readable_map[m] for m in selected_modalities])}. " |
| f"{modality_desc_text} " |
| f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " |
| f"Do NOT mention modality names explicitly. " |
| f"Describe this image." |
| + " " + " ".join(["<image>"] * len(available)) |
| ) |
|
|
| |
| messages = [ |
| { |
| "role": "user", |
| "content": [{"type": "image", "image": path} for path in available] |
| + [{"type": "text", "text": text_prompt}], |
| } |
| ] |
|
|
| return messages |
|
|
|
|
| def build_multimodal_message(root, coarse_caption="a generic scene"): |
| """ |
| Build Qwen3-VL message for multi-modal caption refinement. |
| Automatically detects available modalities under root. |
| """ |
| modality_names = [ |
| "image", |
| "annotation_lineart", |
| "annotation_edge", |
| "annotation_depth", |
| "annotation_normal", |
| "annotation_albedo", |
| "annotation_seg_12colors", |
| "annotation_openpose", |
| ] |
|
|
| |
| available = [] |
| for name in modality_names: |
| |
| for ext in [".png", ".jpg", ".jpeg"]: |
| path = Path(root) / f"{name}{ext}" |
| if path.exists(): |
| available.append(str(path)) |
| break |
|
|
| |
| readable_map = { |
| "image": "RGB image", |
| "annotation_lineart": "line drawing", |
| "annotation_edge": "edge map", |
| "annotation_depth": "depth map", |
| "annotation_normal": "normal map", |
| "annotation_albedo": "albedo map", |
| "annotation_seg_12colors": "segmentation map", |
| "annotation_openpose": "human pose map", |
| } |
| present_modalities = [readable_map[m] for m in modality_names if any(str(Path(root)/f"{m}{ext}") in available for ext in [".png",".jpg",".jpeg"])] |
|
|
| |
| text_prompt = ( |
| f"You are given multiple modalities of the same scene, including: {', '.join(present_modalities)}. " |
| f"Each modality provides distinct types of visual information that together describe the same subject: " |
| f"- The RGB image provides color, texture, lighting, and the overall visual appearance. " |
| f"- The line drawing reveals detailed structural outlines, shapes, and proportions. " |
| f"- The edge map highlights object boundaries and contours. " |
| f"- The depth map shows spatial distance, perspective, and 3D depth relationships. " |
| f"- The normal map captures fine surface orientation, curvature, and geometric details. " |
| f"- The albedo map shows true surface colors without lighting or shadow effects. " |
| f"- The segmentation map provides semantic regions and object boundaries for scene composition. " |
| f"- The human pose map shows body structure, orientation, and posture of subjects. " |
| f"For each provided modality image, analyze it according to the above definitions and describe " |
| f"the specific visual information it contributes in this particular case. " |
| f"Use all available information together to produce one unified, richly detailed, and realistic description of the scene. " |
| f"Do NOT describe each modality separately or mention modality names. " |
| f"Focus on merging their information into a single coherent image description. " |
| |
| f"Refine the coarse caption into a more detailed and accurate image description. " |
| f"Coarse caption: '{coarse_caption}' " + |
| " ".join(["<image>"] * len(available)) |
| ) |
|
|
| |
| messages = [ |
| { |
| "role": "user", |
| "content": [{"type": "image", "image": path} for path in available] |
| + [{"type": "text", "text": text_prompt}], |
| } |
| ] |
| return messages |
|
|
| |
| |
| |
| def get_parser(): |
| parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") |
| parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") |
| parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") |
| parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") |
| parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") |
| parser.add_argument("--image_root", type=str, default="./assets/2/", help="Prompt text for generation.") |
| parser.add_argument("--condition", type=list[str], default=["normal"], help="Prompt text for generation.") |
| parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") |
| parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") |
| parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") |
| parser.add_argument("--guidance_scale", type=float, default=4.5) |
| parser.add_argument("--height", type=int, default=768) |
| parser.add_argument("--width", type=int, default=1024) |
| parser.add_argument("--seed", type=int, default=1234) |
| parser.add_argument("--output_dir", type=str, default="./demo_c2t_outputs", help="Directory to save results.") |
| return parser |
|
|
|
|
| |
| |
| |
|
|
| @torch.inference_mode() |
| def init_i2t(model, processor, image_path, role, iter_num, max_length=300): |
| messages = build_init_message(image_path, role) |
|
|
| print(f'init prompt:{messages}') |
| |
| inputs = processor.apply_chat_template( |
| messages, |
| tokenize=True, |
| add_generation_prompt=True, |
| return_dict=True, |
| return_tensors="pt" |
| ) |
| inputs = inputs.to(model.device) |
|
|
| |
| generated_ids = model.generate(**inputs, max_new_tokens=max_length) |
| generated_ids_trimmed = [ |
| out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) |
| ] |
| output_text = processor.batch_decode( |
| generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False |
| ) |
| print(output_text) |
|
|
| os.makedirs(args.output_dir, exist_ok=True) |
| save_dir = Path(args.output_dir) / f"iteration_{iter_num}" |
| save_dir.mkdir(parents=True, exist_ok=True) |
| caption_path = Path(save_dir) / f"caption.txt" |
| with open(caption_path, "w", encoding="utf-8") as f: |
| f.write(output_text[0].strip()) |
|
|
| return output_text[0] |
|
|
| @torch.inference_mode() |
| def text_refine(root, model, processor, prompt, iter_num, max_length=300): |
| messages = build_multimodal_message(root, prompt) |
|
|
| print(messages) |
|
|
| inputs = processor.apply_chat_template( |
| messages, |
| tokenize=True, |
| add_generation_prompt=True, |
| return_dict=True, |
| return_tensors="pt" |
| ) |
| inputs = inputs.to(model.device) |
|
|
| |
| generated_ids = model.generate(**inputs, max_new_tokens=max_length) |
| generated_ids_trimmed = [ |
| out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) |
| ] |
| output_text = processor.batch_decode( |
| generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False |
| ) |
| print(output_text) |
|
|
| os.makedirs(args.output_dir, exist_ok=True) |
| save_dir = Path(args.output_dir) / f"iteration_{iter_num}" |
| save_dir.mkdir(parents=True, exist_ok=True) |
| caption_path = Path(save_dir) / f"caption.txt" |
| with open(caption_path, "w", encoding="utf-8") as f: |
| f.write(output_text[0].strip()) |
|
|
| return output_text[0] |
|
|
| @torch.inference_mode() |
| def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator): |
|
|
| |
| |
| outputs = pipe( |
| images=images, |
| role=role, |
| prompt=prompt, |
| negative_prompt=args.negative_prompt, |
| height=args.height, |
| width=args.width, |
| num_inference_steps=args.steps, |
| guidance_scale=args.guidance_scale, |
| num_images_per_prompt=1, |
| generator=generator, |
| task='t2i' |
| ) |
|
|
| |
| results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] |
| results = torch.stack(results, dim=1).reshape(-1, 3, args.height, args.width) |
| results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] |
|
|
| |
| |
| |
| os.makedirs(args.output_dir, exist_ok=True) |
|
|
| save_dir = Path(args.output_dir) / f"iteration_{iter_num}" |
| save_dir.mkdir(parents=True, exist_ok=True) |
|
|
| for idx, img in enumerate(results): |
| name = modality_names[idx] |
| save_path = save_dir / f"{name}.png" |
| img.save(save_path) |
| print(f"💾 Saved {name} → {save_path}") |
|
|
| merged_path = save_dir / f"merged_iteration_{iter_num}.png" |
| concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) |
|
|
| print(f"\n✅ All results saved in: {save_dir}\n") |
| return save_dir |
|
|
|
|
| |
| |
| |
| if __name__ == "__main__": |
| args = get_parser().parse_args() |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| print(f"✅ Using device: {device}") |
|
|
| processor = AutoProcessor.from_pretrained( |
| args.model_name_or_path, |
| ) |
|
|
| model = Qwen3VLForConditionalGeneration.from_pretrained( |
| args.text_model_path, |
| attn_implementation="flash_attention_2", |
| dtype=(torch.bfloat16), |
| ).to(device) |
|
|
| pipe = JodiPipeline(args.config) |
| pipe.from_pretrained(args.model_path) |
|
|
| modality_names = [ |
| "image", |
| "annotation_lineart", |
| "annotation_edge", |
| "annotation_depth", |
| "annotation_normal", |
| "annotation_albedo", |
| "annotation_seg_12colors", |
| "annotation_openpose", |
| ] |
|
|
| |
| post_processors: list[Any] = [ImagePostProcessor()] |
| for condition in pipe.config.conditions: |
| if condition == "lineart": |
| post_processors.append(LineartPostProcessor()) |
| elif condition == "edge": |
| post_processors.append(EdgePostProcessor()) |
| elif condition == "depth": |
| post_processors.append(DepthPostProcessor()) |
| elif condition == "normal": |
| post_processors.append(NormalPostProcessor()) |
| elif condition == "albedo": |
| post_processors.append(AlbedoPostProcessor()) |
| elif condition == "segmentation": |
| post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) |
| elif condition == "openpose": |
| post_processors.append(OpenposePostProcessor()) |
| else: |
| print(f"⚠️ Warning: Unknown condition: {condition}") |
| post_processors.append(ImagePostProcessor()) |
|
|
| torch.manual_seed(args.seed) |
| generator = torch.Generator(device=device).manual_seed(args.seed) |
| |
| import glob |
| image_paths = glob.glob(os.path.join(args.image_root, '*.jpg')) + glob.glob(os.path.join(args.image_root, '*.png')) |
|
|
| control_images = [] |
|
|
| for name in modality_names: |
| found_path = None |
| for c in args.condition: |
| matched_files = [f for f in image_paths if c in f and c in name] |
| if matched_files: |
| found_path = matched_files[0] |
| break |
| control_images.append(Image.open(found_path).convert("RGB") if found_path else None) |
|
|
|
|
| role = [0 if img is None else 1 for img in control_images] |
| print(role) |
| |
| max_length = 1024 |
| prompt = init_i2t(model, processor, image_paths, role, 0, max_length) |
|
|
| for step in range(1, args.iters): |
| save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator) |
| max_length += 100 |
| prompt = text_refine(save_dir, model, processor, prompt, step, max_length) |
|
|
|
|
|
|