import subprocess # a workaround for gradio SDK subprocess.call(["pip", "install", "torch==2.1.0+cu118", "torchvision==0.16.0+cu118", "-i", "https://download.pytorch.org/whl/cu118"]) subprocess.call(["git", "clone", "https://github.com/yuanze1024/Pointnet2_PyTorch.git"]) subprocess.call(["pip", "install", "."], cwd="Pointnet2_PyTorch/pointnet2_ops_lib") import os import random import gradio as gr import torch import functools from PIL import Image from datasets import load_dataset from feature_extractors.uni3d_embedding_encoder import Uni3dEmbeddingEncoder MAX_BATCH_SIZE = 16 MAX_QUEUE_SIZE = 10 MAX_K_RETRIEVAL = 20 cache_dir = "./.cache" encoder = Uni3dEmbeddingEncoder(cache_dir) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") source_id_list = torch.load("data/source_id_list.pt") source_to_id = {source_id: i for i, source_id in enumerate(source_id_list)} dataset = load_dataset("VAST-AI/LD-T3D", name=f"rendered_imgs_diag_above", split="base", cache_dir=cache_dir) relation = load_dataset("VAST-AI/LD-T3D", split="full", cache_dir=cache_dir) @functools.lru_cache() def get_embedding(option, modality, angle=None): save_path = f'data/objaverse_{option}_{modality + (("_" + str(angle)) if angle is not None else "")}_embeddings.pt' if os.path.exists(save_path): return torch.load(save_path) else: return gr.Error(f"Embedding file not found: {save_path}") def predict(xb, xq, top_k): xb = xb.to(xq.device) sim = xq @ xb.T # (nq, nb) _, indices = sim.topk(k=top_k, largest=True) return indices def get_image_and_id(index): return dataset[index]["image"], dataset[index]["source_id"] def retrieve_3D_models(textual_query, top_k, modality_list): if textual_query == "": raise gr.Error("Please enter a textual query") if len(textual_query.split()) > 20: gr.Warning("Retrieval result may be inaccurate due to long textual query") if len(modality_list) == 0: raise gr.Error("Please select at least one modality") def _retrieve_3D_models(query, top_k, modals:list): option = "uni3d" op = "add" is_text = True if "text" in modals else False is_3D = True if "3D" in modals else False if is_text: modals.remove("text") if is_3D: modals.remove("3D") angles = modals # get base embeddings embeddings = [] if is_text: embeddings.append(get_embedding(option, "text")) if len(angles) > 0: for angle in angles: embeddings.append(get_embedding(option, "image", angle=angle)) if is_3D: embeddings.append(get_embedding(option, "3D")) ## fuse base embeddings if len(embeddings) > 1: if op == "concat": embeddings = torch.cat(embeddings, dim=-1) elif op == "add": embeddings = sum(embeddings) else: raise ValueError(f"Unsupported operation: {op}") embeddings /= embeddings.norm(dim=-1, keepdim=True) else: embeddings = embeddings[0] # encode query embeddings xq = encoder.encode_query(query) if op == "concat": xq = xq.repeat(1, embeddings.shape[-1] // xq.shape[-1]) # repeat to be aligned with the xb xq /= xq.norm(dim=-1, keepdim=True) pred_ind_list = predict(embeddings, xq, top_k) return pred_ind_list[0].cpu().tolist() # we have only one query indices = _retrieve_3D_models(textual_query, top_k, modality_list) return [get_image_and_id(index) for index in indices] def get_sub_dataset(sub_dataset_id, sorted=False): """ get sub-dataset by sub_dataset_id [1, 1000] Returns: caption: str images: list of tuple (PIL.Image, str) """ rel = relation[sub_dataset_id - 1] target_ids, GT_ids, caption, difficulty = set(rel["target_ids"]), set(rel["GT_ids"]), rel["caption"], rel["difficulty"] negative_ids = target_ids - GT_ids def handle_image(image, is_gt=False): "image is a PIL.Image object, surround the image with green border if is_gt, else red border" border_color = (0, 255, 0) if is_gt else (255, 0, 0) border_width = 5 new_image = Image.new("RGBA", (image.width + 2 * border_width, image.height + 2 * border_width), border_color) new_image.paste(image, (border_width, border_width)) return new_image results = [] if not sorted: for ind in target_ids: image, source_id = get_image_and_id(source_to_id[ind]) results.append((handle_image(image, True if ind in GT_ids else False), source_id)) else: for gt_id in GT_ids: image, source_id = get_image_and_id(source_to_id[gt_id]) results.append((handle_image(image, True), source_id)) for neg_id in negative_ids: image, source_id = get_image_and_id(source_to_id[neg_id]) results.append((handle_image(image, False), source_id)) return caption, results def feel_lucky(is_sorted): sub_dataset_id = random.randint(1, 1000) return sub_dataset_id, *get_sub_dataset(sub_dataset_id, is_sorted) def launch(): with gr.Blocks() as demo: # https://sketchfab.com/3d-models/fd30f87848c9454c9225eccc39726787 md = gr.Markdown(r"""## LD-T3D: A Large-scale and Diverse Benchmark for Text-based 3D Model Retrieval **Official 🤗 Gradio demo** for LD-T3D: A Large-scale and Diverse Benchmark for Text-based 3D Model Retrieval (paper not ready yet)""") with gr.Tab("Retrieval Visualization"): with gr.Row(): md2 = gr.Markdown(r"""### Visualization for Text-Based-3D Model Retrieval We build a visualization demo to demonstrate the text-based-3D model retrievals. Due to the memory limitation of HF Space, we only support the [Uni3D](https://github.com/baaivision/Uni3D) which has shown an excellent performance in our benchmark. What's more, **we only search in a subset of Objaverse, which contains 89K 3D models**. **Note**: The *Modality List* refers to the features ensembled by the retrieval methods. According to our experiment results, basically the more modalities, the better performance the methods gets. Also, you may want to ckeck the 3D model in a 3D model viewer, in that case, you can visit [Objaverse](https://objaverse.allenai.org/explore) for exploration.""") with gr.Row(): textual_query = gr.Textbox(label="Textual Query", autofocus=True, value="Super Mario") modality_list = gr.CheckboxGroup(label="Modality List", value=[], choices=["text", "front", "back", "left", "right", "above", "below", "diag_above", "diag_below", "3D"]) with gr.Row(): top_k = gr.Slider(minimum=1, maximum=MAX_K_RETRIEVAL, step=1, label="Top K Retrieval Result", value=5, scale=2) run = gr.Button("Search", scale=1, variant='primary') clear_button = gr.ClearButton(scale=1) with gr.Row(): output = gr.Gallery(format="webp", label="Retrieval Result", columns=5, type="pil", interactive=False) run.click(retrieve_3D_models, [textual_query, top_k, modality_list], output, # batch=True, max_batch_size=MAX_BATCH_SIZE ) clear_button.click(lambda: ["", 5, [], []], outputs=[textual_query, top_k, modality_list, output]) examples = gr.Examples(examples=[["An ice cream with a cherry on top", 10, ["text", "front", "back", "left", "right", "above", "below", "diag_above", "diag_below", "3D"]], ["A mid-age castle", 10, ["text", "front", "back", "left", "right", "above", "below", "diag_above", "diag_below", "3D"]], ["A coke", 10, ["text", "front", "back", "left", "right", "above", "below", "diag_above", "diag_below", "3D"]]], inputs=[textual_query, top_k, modality_list], outputs=output, fn=retrieve_3D_models) with gr.Tab("Federated Dataset"): md3 = gr.Markdown(r"""### Visualization for Federated Dataset We provide a federated dataset that contains **1000** textual queries and **89K** 3D models, which corresponds to **1000** sub-datasets with around **100** 3D models. In total, there is 100K pairs of text-to-3D model relationships. Here is a visualization of the dataset. **Usage:** 1. You can click the "I'm Feeling Lucky !" button to randomly select a sub-dataset. 2. Or you can **Enter** to submit a Sub-dataset ID in **[1, 1000]**, which you can find details in our dataset [LD-T3D](https://huggingface.co/datasets/VAST-AI/LD-T3D), to search for the corresponding sub-dataset. **Note:** The *Query* is used in this sub-dataset. The *Sorted* will put the Ground Truths in the front of the results. The color surrounding the 3D model indicates whether it is a good fit for the textual query. A **green** color suggests a Ground Truth, while a **red** color indicates a mismatch.""") with gr.Row(): lucky = gr.Button("I'm Feeling Lucky !", scale=1, variant='primary') query_id = gr.Number(label="Sub-dataset ID", scale=1, minimum=1, maximum=1000, step=1, interactive=True, value=986) is_sorted = gr.Checkbox(value=False, label="", scale=1, info="Sorted") query = gr.Textbox(label="Textual Query", scale=3, interactive=False) # difficulty = gr.Textbox(label="Query Difficulty", scale=1, interactive=False) # model3d = gr.Model3D(interactive=False, scale=1) with gr.Row(): output2 = gr.Gallery(format="webp", label="3D Models in Sub-dataset", columns=5, type="pil", interactive=False) lucky.click(feel_lucky, inputs=is_sorted, outputs=[query_id, query, output2]) query_id.submit(get_sub_dataset, [query_id, is_sorted], [query, output2]) is_sorted.change(get_sub_dataset, [query_id, is_sorted], [query, output2]) demo.queue(max_size=10) demo.launch(server_name='0.0.0.0') if __name__ == "__main__": launch() # print(len(retrieve_3D_models("A chair with a wooden frame and a cushioned seat", 5, ["3D", "diag_above", "diag_below"])))