Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from PIL import Image | |
| import os | |
| import spaces | |
| from OmniGen import OmniGenPipeline | |
| # OmniGenモデルの初期化 | |
| pipe = OmniGenPipeline.from_pretrained( | |
| "Shitao/OmniGen-v1" | |
| ) | |
| # 画像生成の主要機能 | |
| def generate_image(text, img1, img2, img3, height, width, guidance_scale, img_guidance_scale, inference_steps, seed, separate_cfg_infer): | |
| # 入力画像の処理 | |
| input_images = [img1, img2, img3] | |
| # Noneの画像を除外 | |
| input_images = [img for img in input_images if img is not None] | |
| if len(input_images) == 0: | |
| input_images = None | |
| # モデルを使用して画像生成 | |
| output = pipe( | |
| prompt=text, | |
| input_images=input_images, | |
| height=height, | |
| width=width, | |
| guidance_scale=guidance_scale, | |
| img_guidance_scale=1.6, | |
| num_inference_steps=inference_steps, | |
| separate_cfg_infer=True, # Falseにすると推論が高速化 | |
| use_kv_cache=False, | |
| seed=seed, | |
| ) | |
| img = output[0] | |
| return img | |
| # テストケース用のサンプルデータを取得 | |
| def get_example(): | |
| case = [ | |
| [ | |
| "A curly-haired man in a red shirt is drinking tea.", | |
| None, | |
| None, | |
| None, | |
| 1024, | |
| 1024, | |
| 2.5, | |
| 1.6, | |
| 50, | |
| 0, | |
| True, | |
| ], | |
| [ | |
| "The woman in <img><|image_1|></img> waves her hand happily in the crowd", | |
| "./imgs/test_cases/zhang.png", | |
| None, | |
| None, | |
| 1024, | |
| 1024, | |
| 2.5, | |
| 1.9, | |
| 50, | |
| 128, | |
| True, | |
| ], | |
| [ | |
| "A man in a black shirt is reading a book. The man is the right man in <img><|image_1|></img>.", | |
| "./imgs/test_cases/two_man.jpg", | |
| None, | |
| None, | |
| 1024, | |
| 1024, | |
| 2.5, | |
| 1.6, | |
| 50, | |
| 0, | |
| True, | |
| ], | |
| [ | |
| "Two woman are raising fried chicken legs in a bar. A woman is <img><|image_1|></img>. The other woman is <img><|image_2|></img>.", | |
| "./imgs/test_cases/mckenna.jpg", | |
| "./imgs/test_cases/Amanda.jpg", | |
| None, | |
| 1024, | |
| 1024, | |
| 2.5, | |
| 1.8, | |
| 50, | |
| 168, | |
| True, | |
| ], | |
| [ | |
| "A man and a short-haired woman with a wrinkled face are standing in front of a bookshelf in a library. The man is the man in the middle of <img><|image_1|></img>, and the woman is oldest woman in <img><|image_2|></img>", | |
| "./imgs/test_cases/1.jpg", | |
| "./imgs/test_cases/2.jpg", | |
| None, | |
| 1024, | |
| 1024, | |
| 2.5, | |
| 1.6, | |
| 50, | |
| 60, | |
| True, | |
| ], | |
| [ | |
| "A man and a woman are sitting at a classroom desk. The man is the man with yellow hair in <img><|image_1|></img>. The woman is the woman on the left of <img><|image_2|></img>", | |
| "./imgs/test_cases/3.jpg", | |
| "./imgs/test_cases/4.jpg", | |
| None, | |
| 1024, | |
| 1024, | |
| 2.5, | |
| 1.8, | |
| 50, | |
| 66, | |
| True, | |
| ], | |
| [ | |
| "The flower <img><|image_1|><\/img> is placed in the vase which is in the middle of <img><|image_2|><\/img> on a wooden table of a living room", | |
| "./imgs/test_cases/rose.jpg", | |
| "./imgs/test_cases/vase.jpg", | |
| None, | |
| 1024, | |
| 1024, | |
| 2.5, | |
| 1.6, | |
| 50, | |
| 0, | |
| True, | |
| ], | |
| [ | |
| "<img><|image_1|><img>\n Remove the woman's earrings. Replace the mug with a clear glass filled with sparkling iced cola.", | |
| "./imgs/demo_cases/t2i_woman_with_book.png", | |
| None, | |
| None, | |
| 1024, | |
| 1024, | |
| 2.5, | |
| 1.6, | |
| 50, | |
| 222, | |
| True, | |
| ], | |
| [ | |
| "Detect the skeleton of human in this image: <img><|image_1|></img>.", | |
| "./imgs/test_cases/control.jpg", | |
| None, | |
| None, | |
| 1024, | |
| 1024, | |
| 2.0, | |
| 1.6, | |
| 50, | |
| 0, | |
| True, | |
| ], | |
| [ | |
| "Generate a new photo using the following picture and text as conditions: <img><|image_1|><img>\n A young boy is sitting on a sofa in the library, holding a book. His hair is neatly combed, and a faint smile plays on his lips, with a few freckles scattered across his cheeks. The library is quiet, with rows of shelves filled with books stretching out behind him.", | |
| "./imgs/demo_cases/skeletal.png", | |
| None, | |
| None, | |
| 1024, | |
| 1024, | |
| 2, | |
| 1.6, | |
| 50, | |
| 42, | |
| True, | |
| ], | |
| [ | |
| "Following the pose of this image <img><|image_1|><img>, generate a new photo: A young boy is sitting on a sofa in the library, holding a book. His hair is neatly combed, and a faint smile plays on his lips, with a few freckles scattered across his cheeks. The library is quiet, with rows of shelves filled with books stretching out behind him.", | |
| "./imgs/demo_cases/edit.png", | |
| None, | |
| None, | |
| 1024, | |
| 1024, | |
| 2.0, | |
| 1.6, | |
| 50, | |
| 123, | |
| True, | |
| ], | |
| [ | |
| "Following the depth mapping of this image <img><|image_1|><img>, generate a new photo: A young girl is sitting on a sofa in the library, holding a book. His hair is neatly combed, and a faint smile plays on his lips, with a few freckles scattered across his cheeks. The library is quiet, with rows of shelves filled with books stretching out behind him.", | |
| "./imgs/demo_cases/edit.png", | |
| None, | |
| None, | |
| 1024, | |
| 1024, | |
| 2.0, | |
| 1.6, | |
| 50, | |
| 1, | |
| True, | |
| ], | |
| [ | |
| "<img><|image_1|><\/img> What item can be used to see the current time? Please remove it.", | |
| "./imgs/test_cases/watch.jpg", | |
| None, | |
| None, | |
| 1024, | |
| 1024, | |
| 2.5, | |
| 1.6, | |
| 50, | |
| 0, | |
| True, | |
| ], | |
| [ | |
| "According to the following examples, generate an output for the input.\nInput: <img><|image_1|></img>\nOutput: <img><|image_2|></img>\n\nInput: <img><|image_3|></img>\nOutput: ", | |
| "./imgs/test_cases/icl1.jpg", | |
| "./imgs/test_cases/icl2.jpg", | |
| "./imgs/test_cases/icl3.jpg", | |
| 1024, | |
| 1024, | |
| 2.5, | |
| 1.6, | |
| 50, | |
| 1, | |
| True, | |
| ], | |
| ] | |
| return case | |
| # サンプル実行用の関数 | |
| def run_for_examples(text, img1, img2, img3, height, width, guidance_scale, img_guidance_scale, inference_steps, seed, separate_cfg_infer,): | |
| return generate_image(text, img1, img2, img3, height, width, guidance_scale, img_guidance_scale, inference_steps, seed, separate_cfg_infer,) | |
| # アプリケーションの説明文 | |
| description = """ | |
| OmniGenは、以下のような様々なタスクを実行できる統合画像生成モデルです: | |
| - テキストから画像への生成 | |
| - 被写体主導の生成 | |
| - アイデンティティを保持した生成 | |
| - 画像条件付き生成 | |
| マルチモーダルから画像を生成する場合: | |
| - プロンプトには文字列を入力 | |
| - 入力画像はリストとして渡す | |
| - プロンプト内の画像プレースホルダーは `<img><|image_*|></img>` 形式で指定 | |
| (1番目の画像は <img><|image_1|></img>、2番目は <img><|image_2|></img>) | |
| 使用上のヒント: | |
| - 色が過飽和な場合:`guidance_scale` を下げてください | |
| - 画質が低い場合:より詳細なプロンプトを使用してください | |
| - アニメ調の場合:プロンプトに `photo` を追加してみてください | |
| - 生成済み画像の編集:同じseedは使用できません(例:生成時seed=0なら、編集時はseed=1など) | |
| - 画像編集タスクでは、画像を編集指示の前に配置することを推奨 | |
| (例:`<img><|image_1|></img> remove suit` を使用し、`remove suit <img><|image_1|></img>` は避ける) | |
| """ | |
| separate_cfg_infer_arg = False | |
| # Gradio インターフェースの構築 | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# OmniGen: 統合画像生成モデル [論文](https://arxiv.org/abs/2409.11340) [コード](https://github.com/VectorSpaceLab/OmniGen)") | |
| gr.Markdown(description) | |
| with gr.Row(): | |
| with gr.Column(): | |
| # プロンプト入力 | |
| prompt_input = gr.Textbox( | |
| label="プロンプトを入力してください(i番目の入力画像は<img><|image_i|></img>で指定)", | |
| placeholder="ここにプロンプトを入力..." | |
| ) | |
| with gr.Row(equal_height=True): | |
| # 画像入力 | |
| image_input_1 = gr.Image(label="画像1: <img><|image_1|></img>", type="filepath") | |
| image_input_2 = gr.Image(label="画像2: <img><|image_2|></img>", type="filepath") | |
| image_input_3 = gr.Image(label="画像3: <img><|image_3|></img>", type="filepath") | |
| # 画像サイズ設定 | |
| height_input = gr.Slider( | |
| label="画像の高さ", minimum=256, maximum=2048, value=1024, step=16 | |
| ) | |
| width_input = gr.Slider( | |
| label="画像の幅", minimum=256, maximum=2048, value=1024, step=16 | |
| ) | |
| # 各種パラメータ設定 | |
| guidance_scale_input = gr.Slider( | |
| label="ガイダンススケール", minimum=1.0, maximum=5.0, value=2.5, step=0.1 | |
| ) | |
| img_guidance_scale_input = gr.Slider( | |
| label="画像ガイダンススケール", minimum=1.0, maximum=2.0, value=1.6, step=0.1 | |
| ) | |
| num_inference_steps = gr.Slider( | |
| label="推論ステップ数", minimum=1, maximum=100, value=50, step=1 | |
| ) | |
| seed_input = gr.Slider( | |
| label="シード値", minimum=0, maximum=2147483647, value=42, step=1 | |
| ) | |
| separate_cfg_infer = gr.Checkbox( | |
| label="CFG推論を分離", info="分離CFG推論を有効にする" | |
| ) | |
| # 生成ボタン | |
| generate_button = gr.Button("画像を生成") | |
| with gr.Column(): | |
| # 出力画像表示 | |
| output_image = gr.Image(label="生成された画像") | |
| # ボタンクリックイベントの設定 | |
| generate_button.click( | |
| generate_image, | |
| inputs=[ | |
| prompt_input, | |
| image_input_1, | |
| image_input_2, | |
| image_input_3, | |
| height_input, | |
| width_input, | |
| guidance_scale_input, | |
| img_guidance_scale_input, | |
| num_inference_steps, | |
| seed_input, | |
| separate_cfg_infer, | |
| ], | |
| outputs=output_image, | |
| ) | |
| # サンプル例の設定 | |
| gr.Examples( | |
| examples=get_example(), | |
| fn=run_for_examples, | |
| inputs=[ | |
| prompt_input, | |
| image_input_1, | |
| image_input_2, | |
| image_input_3, | |
| height_input, | |
| width_input, | |
| guidance_scale_input, | |
| img_guidance_scale_input, | |
| num_inference_steps, | |
| seed_input, | |
| separate_cfg_infer, | |
| ], | |
| outputs=output_image, | |
| ) | |
| # アプリケーションの起動 | |
| demo.launch() |