|
import gradio as gr |
|
import plotly.graph_objects as go |
|
|
|
import torch |
|
from tqdm.auto import tqdm |
|
|
|
from model import Model |
|
from settings import CACHE_EXAMPLES, MAX_SEED |
|
from utils import randomize_seed_fn |
|
|
|
|
|
def inference(prompt): |
|
model = Model() |
|
seed: int = 0 |
|
guidance_scale: float = 15.0 |
|
num_inference_steps: int = 64 |
|
model.run_text(prompt, seed, guidance_scale, num_inference_steps) |
|
|
|
return prompt |
|
|
|
demo = gr.Interface( |
|
fn=inference, |
|
inputs="text", |
|
outputs="text", |
|
examples=[ |
|
["a red motorcycle"], |
|
["a RED pumpkin"], |
|
["a yellow rubber duck"] |
|
], |
|
title="Point-E demo: text to 3D", |
|
description="""Generated 3D Point Clouds with [Point-E](https://github.com/openai/point-e/tree/main). This demo uses a small, worse quality text-to-3D model to produce 3D point clouds directly from text descriptions. |
|
Check out the [notebook](https://github.com/openai/point-e/blob/main/point_e/examples/text2pointcloud.ipynb). |
|
""" |
|
) |
|
demo.launch(debug=True) |