freddyaboulton HF Staff commited on
Commit
fd17271
·
verified ·
1 Parent(s): debee16

Commit 1: Add 50 file(s)

Browse files
demos/audio_debugger/run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: audio_debugger"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import subprocess\n", "\n", "# get_audio returns the path to the audio file\n", "audio_file = gr.get_audio(\"cantina.wav\")\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Tab(\"Audio\"):\n", " gr.Audio(audio_file, buttons=[\"download\"])\n", " with gr.Tab(\"Interface\"):\n", " gr.Interface(\n", " lambda x: x, \n", " gr.Audio(),\n", " gr.Audio(),\n", " examples=[audio_file], \n", " cache_examples=True\n", " )\n", " with gr.Tab(\"Streaming\"):\n", " gr.Interface(\n", " lambda x: x,\n", " gr.Audio(streaming=True),\n", " \"audio\",\n", " examples=[audio_file],\n", " cache_examples=True,\n", " )\n", " with gr.Tab(\"console\"):\n", " ip = gr.Textbox(label=\"User IP Address\")\n", " gr.Interface(\n", " lambda cmd: subprocess.run([cmd], capture_output=True, shell=True, check=False)\n", " .stdout.decode(\"utf-8\")\n", " .strip(),\n", " \"text\",\n", " \"text\",\n", " )\n", "\n", " def get_ip(request: gr.Request):\n", " return request.client.host\n", "\n", " demo.load(get_ip, None, ip)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: audio_debugger"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import subprocess\n", "\n", "# get_audio returns the path to the audio file\n", "audio_file = gr.get_audio(\"cantina.wav\")\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Tab(\"Audio\"):\n", " gr.Audio(audio_file, buttons=[\"download\"])\n", " with gr.Tab(\"Interface\"):\n", " gr.Interface(\n", " lambda x: x, \n", " gr.Audio(),\n", " gr.Audio(),\n", " examples=[audio_file], \n", " cache_examples=True,\n", " api_name=\"predict\"\n", " )\n", " with gr.Tab(\"Streaming\"):\n", " gr.Interface(\n", " lambda x: x,\n", " gr.Audio(streaming=True),\n", " \"audio\",\n", " examples=[audio_file],\n", " cache_examples=True,\n", " api_name=\"predict\"\n", " )\n", " with gr.Tab(\"console\"):\n", " ip = gr.Textbox(label=\"User IP Address\")\n", " gr.Interface(\n", " lambda cmd: subprocess.run([cmd], capture_output=True, shell=True, check=False)\n", " .stdout.decode(\"utf-8\")\n", " .strip(),\n", " \"text\",\n", " \"text\",\n", " api_name=\"predict\"\n", " )\n", "\n", " def get_ip(request: gr.Request):\n", " return request.client.host\n", "\n", " demo.load(get_ip, None, ip)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/audio_debugger/run.py CHANGED
@@ -13,7 +13,8 @@ with gr.Blocks() as demo:
13
  gr.Audio(),
14
  gr.Audio(),
15
  examples=[audio_file],
16
- cache_examples=True
 
17
  )
18
  with gr.Tab("Streaming"):
19
  gr.Interface(
@@ -22,6 +23,7 @@ with gr.Blocks() as demo:
22
  "audio",
23
  examples=[audio_file],
24
  cache_examples=True,
 
25
  )
26
  with gr.Tab("console"):
27
  ip = gr.Textbox(label="User IP Address")
@@ -31,6 +33,7 @@ with gr.Blocks() as demo:
31
  .strip(),
32
  "text",
33
  "text",
 
34
  )
35
 
36
  def get_ip(request: gr.Request):
 
13
  gr.Audio(),
14
  gr.Audio(),
15
  examples=[audio_file],
16
+ cache_examples=True,
17
+ api_name="predict"
18
  )
19
  with gr.Tab("Streaming"):
20
  gr.Interface(
 
23
  "audio",
24
  examples=[audio_file],
25
  cache_examples=True,
26
+ api_name="predict"
27
  )
28
  with gr.Tab("console"):
29
  ip = gr.Textbox(label="User IP Address")
 
33
  .strip(),
34
  "text",
35
  "text",
36
+ api_name="predict"
37
  )
38
 
39
  def get_ip(request: gr.Request):
demos/calculator/run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: calculator"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('examples')\n", "!wget -q -O examples/log.csv https://github.com/gradio-app/gradio/raw/main/demo/calculator/examples/log.csv"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def calculator(num1, operation, num2):\n", " if operation == \"add\":\n", " return num1 + num2\n", " elif operation == \"subtract\":\n", " return num1 - num2\n", " elif operation == \"multiply\":\n", " return num1 * num2\n", " elif operation == \"divide\":\n", " if num2 == 0:\n", " raise gr.Error(\"Cannot divide by zero!\")\n", " return num1 / num2\n", "\n", "demo = gr.Interface(\n", " calculator,\n", " [\n", " \"number\",\n", " gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n", " \"number\"\n", " ],\n", " \"number\",\n", " examples=[\n", " [45, \"add\", 3],\n", " [3.14, \"divide\", 2],\n", " [144, \"multiply\", 2.5],\n", " [0, \"subtract\", 1.2],\n", " ],\n", " title=\"Toy Calculator\",\n", " description=\"Here's a sample toy calculator.\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: calculator"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('examples')\n", "!wget -q -O examples/log.csv https://github.com/gradio-app/gradio/raw/main/demo/calculator/examples/log.csv"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def calculator(num1, operation, num2):\n", " if operation == \"add\":\n", " return num1 + num2\n", " elif operation == \"subtract\":\n", " return num1 - num2\n", " elif operation == \"multiply\":\n", " return num1 * num2\n", " elif operation == \"divide\":\n", " if num2 == 0:\n", " raise gr.Error(\"Cannot divide by zero!\")\n", " return num1 / num2\n", "\n", "demo = gr.Interface(\n", " calculator,\n", " [\n", " \"number\",\n", " gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n", " \"number\"\n", " ],\n", " \"number\",\n", " examples=[\n", " [45, \"add\", 3],\n", " [3.14, \"divide\", 2],\n", " [144, \"multiply\", 2.5],\n", " [0, \"subtract\", 1.2],\n", " ],\n", " title=\"Toy Calculator\",\n", " description=\"Here's a sample toy calculator.\",\n", " api_name=\"predict\"\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/calculator/run.py CHANGED
@@ -28,6 +28,7 @@ demo = gr.Interface(
28
  ],
29
  title="Toy Calculator",
30
  description="Here's a sample toy calculator.",
 
31
  )
32
 
33
  if __name__ == "__main__":
 
28
  ],
29
  title="Toy Calculator",
30
  description="Here's a sample toy calculator.",
31
+ api_name="predict"
32
  )
33
 
34
  if __name__ == "__main__":
demos/chatbot_multimodal/run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import time\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "\n", "def print_like_dislike(x: gr.LikeData):\n", " print(x.index, x.value, x.liked)\n", "\n", "\n", "def add_message(history, message):\n", " for x in message[\"files\"]:\n", " history.append({\"role\": \"user\", \"content\": {\"path\": x}})\n", " if message[\"text\"] is not None:\n", " history.append({\"role\": \"user\", \"content\": message[\"text\"]})\n", " return history, gr.MultimodalTextbox(value=None, interactive=False)\n", "\n", "\n", "def bot(history: list):\n", " response = \"**That's cool!**\"\n", " history.append({\"role\": \"assistant\", \"content\": \"\"})\n", " for character in response:\n", " history[-1][\"content\"] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(elem_id=\"chatbot\")\n", "\n", " chat_input = gr.MultimodalTextbox(\n", " interactive=True,\n", " file_count=\"multiple\",\n", " placeholder=\"Enter message or upload file...\",\n", " show_label=False,\n", " sources=[\"microphone\", \"upload\"],\n", " )\n", "\n", " chat_msg = chat_input.submit(\n", " add_message, [chatbot, chat_input], [chatbot, chat_input]\n", " )\n", " bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name=\"bot_response\")\n", " bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])\n", "\n", " chatbot.like(print_like_dislike, None, None, like_user_message=True)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import time\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "\n", "def print_like_dislike(x: gr.LikeData):\n", " print(x.index, x.value, x.liked)\n", "\n", "\n", "def add_message(history, message):\n", " user_msg = {\"role\": \"user\", \"content\": []}\n", " for x in message[\"files\"]:\n", " user_msg[\"content\"].append({\"path\": x})\n", " if message[\"text\"] is not None:\n", " user_msg[\"content\"].append(message[\"text\"])\n", " history.append(user_msg)\n", " return history, gr.MultimodalTextbox(value=None, interactive=False)\n", "\n", "\n", "def bot(history: list):\n", " response = \"**That's cool!**\"\n", " history.append({\"role\": \"assistant\", \"content\": \"\"})\n", " for character in response:\n", " history[-1][\"content\"] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(elem_id=\"chatbot\")\n", "\n", " chat_input = gr.MultimodalTextbox(\n", " interactive=True,\n", " file_count=\"multiple\",\n", " placeholder=\"Enter message or upload file...\",\n", " show_label=False,\n", " sources=[\"microphone\", \"upload\"],\n", " )\n", "\n", " chat_msg = chat_input.submit(\n", " add_message, [chatbot, chat_input], [chatbot, chat_input]\n", " )\n", " bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name=\"bot_response\")\n", " bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])\n", "\n", " chatbot.like(print_like_dislike, None, None, like_user_message=True)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/chatbot_multimodal/run.py CHANGED
@@ -9,10 +9,12 @@ def print_like_dislike(x: gr.LikeData):
9
 
10
 
11
  def add_message(history, message):
 
12
  for x in message["files"]:
13
- history.append({"role": "user", "content": {"path": x}})
14
  if message["text"] is not None:
15
- history.append({"role": "user", "content": message["text"]})
 
16
  return history, gr.MultimodalTextbox(value=None, interactive=False)
17
 
18
 
 
9
 
10
 
11
  def add_message(history, message):
12
+ user_msg = {"role": "user", "content": []}
13
  for x in message["files"]:
14
+ user_msg["content"].append({"path": x})
15
  if message["text"] is not None:
16
+ user_msg["content"].append(message["text"])
17
+ history.append(user_msg)
18
  return history, gr.MultimodalTextbox(value=None, interactive=False)
19
 
20
 
demos/code/run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: code"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/code/file.css"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "from time import sleep\n", "\n", "css_file = os.path.join(os.path.abspath(''), \"file.css\")\n", "\n", "def set_lang(language):\n", " print(language)\n", " return gr.Code(language=language)\n", "\n", "def set_lang_from_path():\n", " sleep(1)\n", " return gr.Code(open(css_file).read(), language=\"css\")\n", "\n", "def code(language, code):\n", " return gr.Code(code, language=language)\n", "\n", "io = gr.Interface(lambda x: x, \"code\", \"code\")\n", "\n", "with gr.Blocks() as demo:\n", " lang = gr.Dropdown(value=\"python\", choices=gr.Code.languages)\n", " with gr.Row():\n", " code_in = gr.Code(\n", " language=\"python\",\n", " label=\"Input\",\n", " value='def all_odd_elements(sequence):\\n \"\"\"Returns every odd element of the sequence.\"\"\"',\n", " show_line_numbers = False\n", " )\n", " code_out = gr.Code(label=\"Output\", show_line_numbers = True)\n", " btn = gr.Button(\"Run\")\n", " btn_two = gr.Button(\"Load File\")\n", "\n", " lang.change(set_lang, inputs=lang, outputs=code_in)\n", " btn.click(code, inputs=[lang, code_in], outputs=code_out)\n", " btn_two.click(set_lang_from_path, inputs=None, outputs=code_out)\n", " io.render()\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: code"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/code/file.css"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "from time import sleep\n", "\n", "css_file = os.path.join(os.path.abspath(''), \"file.css\")\n", "\n", "def set_lang(language):\n", " print(language)\n", " return gr.Code(language=language)\n", "\n", "def set_lang_from_path():\n", " sleep(1)\n", " return gr.Code(open(css_file).read(), language=\"css\")\n", "\n", "def code(language, code):\n", " return gr.Code(code, language=language)\n", "\n", "io = gr.Interface(lambda x: x, \"code\", \"code\", api_name=\"predict\")\n", "\n", "with gr.Blocks() as demo:\n", " lang = gr.Dropdown(value=\"python\", choices=gr.Code.languages)\n", " with gr.Row():\n", " code_in = gr.Code(\n", " language=\"python\",\n", " label=\"Input\",\n", " value='def all_odd_elements(sequence):\\n \"\"\"Returns every odd element of the sequence.\"\"\"',\n", " show_line_numbers = False\n", " )\n", " code_out = gr.Code(label=\"Output\", show_line_numbers = True)\n", " btn = gr.Button(\"Run\")\n", " btn_two = gr.Button(\"Load File\")\n", "\n", " lang.change(set_lang, inputs=lang, outputs=code_in)\n", " btn.click(code, inputs=[lang, code_in], outputs=code_out)\n", " btn_two.click(set_lang_from_path, inputs=None, outputs=code_out)\n", " io.render()\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/code/run.py CHANGED
@@ -15,7 +15,7 @@ def set_lang_from_path():
15
  def code(language, code):
16
  return gr.Code(code, language=language)
17
 
18
- io = gr.Interface(lambda x: x, "code", "code")
19
 
20
  with gr.Blocks() as demo:
21
  lang = gr.Dropdown(value="python", choices=gr.Code.languages)
 
15
  def code(language, code):
16
  return gr.Code(code, language=language)
17
 
18
+ io = gr.Interface(lambda x: x, "code", "code", api_name="predict")
19
 
20
  with gr.Blocks() as demo:
21
  lang = gr.Dropdown(value="python", choices=gr.Code.languages)
demos/fake_diffusion_with_gif/run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: fake_diffusion_with_gif"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy requests Pillow "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import time\n", "import os\n", "from PIL import Image\n", "import requests\n", "from io import BytesIO\n", "\n", "def create_gif(images):\n", " pil_images = []\n", " for image in images:\n", " if isinstance(image, str):\n", " response = requests.get(image)\n", " image = Image.open(BytesIO(response.content))\n", " else:\n", " image = Image.fromarray((image * 255).astype(np.uint8))\n", " pil_images.append(image)\n", " fp_out = os.path.join(os.path.abspath(''), \"image.gif\")\n", " img = pil_images.pop(0)\n", " img.save(fp=fp_out, format='GIF', append_images=pil_images,\n", " save_all=True, duration=400, loop=0)\n", " return fp_out\n", "\n", "def fake_diffusion(steps):\n", " rng = np.random.default_rng()\n", " images = []\n", " for _ in range(steps):\n", " time.sleep(1)\n", " image = rng.random((600, 600, 3))\n", " images.append(image)\n", " yield image, gr.Image(visible=False)\n", "\n", " time.sleep(1)\n", " image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n", " images.append(image)\n", " gif_path = create_gif(images)\n", "\n", " yield image, gr.Image(value=gif_path, visible=True)\n", "\n", "demo = gr.Interface(fake_diffusion,\n", " inputs=gr.Slider(1, 10, 3, step=1),\n", " outputs=[\"image\", gr.Image(label=\"All Images\", visible=False)])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: fake_diffusion_with_gif"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy requests Pillow "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import time\n", "import os\n", "from PIL import Image\n", "import requests\n", "from io import BytesIO\n", "\n", "def create_gif(images):\n", " pil_images = []\n", " for image in images:\n", " if isinstance(image, str):\n", " response = requests.get(image)\n", " image = Image.open(BytesIO(response.content))\n", " else:\n", " image = Image.fromarray((image * 255).astype(np.uint8))\n", " pil_images.append(image)\n", " fp_out = os.path.join(os.path.abspath(''), \"image.gif\")\n", " img = pil_images.pop(0)\n", " img.save(fp=fp_out, format='GIF', append_images=pil_images,\n", " save_all=True, duration=400, loop=0)\n", " return fp_out\n", "\n", "def fake_diffusion(steps):\n", " rng = np.random.default_rng()\n", " images = []\n", " for _ in range(steps):\n", " time.sleep(1)\n", " image = rng.random((600, 600, 3))\n", " images.append(image)\n", " yield image, gr.Image(visible=False)\n", "\n", " time.sleep(1)\n", " image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n", " images.append(image)\n", " gif_path = create_gif(images)\n", "\n", " yield image, gr.Image(value=gif_path, visible=True)\n", "\n", "demo = gr.Interface(fake_diffusion,\n", " inputs=gr.Slider(1, 10, 3, step=1),\n", " outputs=[\"image\", gr.Image(label=\"All Images\", visible=False)],\n", " api_name=\"predict\",)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/fake_diffusion_with_gif/run.py CHANGED
@@ -39,7 +39,8 @@ def fake_diffusion(steps):
39
 
40
  demo = gr.Interface(fake_diffusion,
41
  inputs=gr.Slider(1, 10, 3, step=1),
42
- outputs=["image", gr.Image(label="All Images", visible=False)])
 
43
 
44
  if __name__ == "__main__":
45
  demo.launch()
 
39
 
40
  demo = gr.Interface(fake_diffusion,
41
  inputs=gr.Slider(1, 10, 3, step=1),
42
+ outputs=["image", gr.Image(label="All Images", visible=False)],
43
+ api_name="predict",)
44
 
45
  if __name__ == "__main__":
46
  demo.launch()