freddyaboulton HF Staff commited on
Commit
60ba5c8
·
verified ·
1 Parent(s): d4bb76b

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -5,7 +5,7 @@ emoji: 🔥
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
8
- sdk_version: 5.49.1
9
  app_file: run.py
10
  pinned: false
11
  hf_oauth: true
 
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
8
+ sdk_version: 6.0.0
9
  app_file: run.py
10
  pinned: false
11
  hf_oauth: true
multimodal_group_messages_testcase.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ runs = 0
4
+
5
+ def reset_runs():
6
+ global runs
7
+ runs = 0
8
+
9
+ def slow_echo(message, history):
10
+ global runs # i didn't want to add state or anything to this demo
11
+ runs = runs + 1
12
+ for i in range(len(message['text'])):
13
+ yield f"Run {runs} - You typed: " + message['text'][: i + 1]
14
+ if "files" in message and message["files"]:
15
+ yield f"Run {runs} - You typed: " + message['text'] + f" And you sent {len(message['files'])} files"
16
+
17
+ chat = gr.ChatInterface(slow_echo, multimodal=True)
18
+
19
+ with gr.Blocks() as demo:
20
+ chat.render()
21
+ # We reset the global variable to minimize flakes
22
+ # this works because CI runs only one test at at time
23
+ # need to use gr.State if we want to parallelize this test
24
+ # currently chatinterface does not support that
25
+ demo.unload(reset_runs)
26
+
27
+ if __name__ == "__main__":
28
+ demo.launch()
multimodal_messages_testcase.py CHANGED
@@ -12,7 +12,7 @@ def slow_echo(message, history):
12
  for i in range(len(message['text'])):
13
  yield f"Run {runs} - You typed: " + message['text'][: i + 1]
14
 
15
- chat = gr.ChatInterface(slow_echo, multimodal=True, type="messages")
16
 
17
  with gr.Blocks() as demo:
18
  chat.render()
 
12
  for i in range(len(message['text'])):
13
  yield f"Run {runs} - You typed: " + message['text'][: i + 1]
14
 
15
+ chat = gr.ChatInterface(slow_echo, multimodal=True, api_name="chat")
16
 
17
  with gr.Blocks() as demo:
18
  chat.render()
multimodal_non_stream_testcase.py CHANGED
@@ -11,7 +11,7 @@ def slow_echo(message, history):
11
  runs = runs + 1
12
  return f"Run {runs} - You typed: " + message['text']
13
 
14
- chat = gr.ChatInterface(slow_echo, multimodal=True, type="tuples")
15
 
16
  with gr.Blocks() as demo:
17
  chat.render()
 
11
  runs = runs + 1
12
  return f"Run {runs} - You typed: " + message['text']
13
 
14
+ chat = gr.ChatInterface(slow_echo, multimodal=True, api_name="chat")
15
 
16
  with gr.Blocks() as demo:
17
  chat.render()
requirements.txt CHANGED
@@ -1,2 +1,2 @@
1
- gradio-client @ git+https://github.com/gradio-app/gradio@e05eb8df38a4ca20993e94ca4e209cf8110bb677#subdirectory=client/python
2
- https://gradio-pypi-previews.s3.amazonaws.com/e05eb8df38a4ca20993e94ca4e209cf8110bb677/gradio-5.49.1-py3-none-any.whl
 
1
+ gradio-client @ git+https://github.com/gradio-app/gradio@d007e6cf617baba5c62e49ec2b7ce278aa863a79#subdirectory=client/python
2
+ https://gradio-pypi-previews.s3.amazonaws.com/d007e6cf617baba5c62e49ec2b7ce278aa863a79/gradio-6.0.0-py3-none-any.whl
run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: test_chatinterface_streaming_echo"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/test_chatinterface_streaming_echo/messages_testcase.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/test_chatinterface_streaming_echo/multimodal_messages_testcase.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/test_chatinterface_streaming_echo/multimodal_non_stream_testcase.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/test_chatinterface_streaming_echo/multimodal_tuples_testcase.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "runs = 0\n", "\n", "def reset_runs():\n", " global runs\n", " runs = 0\n", "\n", "def slow_echo(message, history):\n", " global runs # i didn't want to add state or anything to this demo\n", " runs = runs + 1\n", " for i in range(len(message)):\n", " yield f\"Run {runs} - You typed: \" + message[: i + 1]\n", "\n", "chat = gr.ChatInterface(slow_echo, fill_height=True, editable=True)\n", "\n", "with gr.Blocks() as demo:\n", " chat.render()\n", " # We reset the global variable to minimize flakes\n", " # this works because CI runs only one test at at time\n", " # need to use gr.State if we want to parallelize this test\n", " # currently chatinterface does not support that\n", " demo.unload(reset_runs)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: test_chatinterface_streaming_echo"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/test_chatinterface_streaming_echo/multimodal_group_messages_testcase.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/test_chatinterface_streaming_echo/multimodal_messages_testcase.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/test_chatinterface_streaming_echo/multimodal_non_stream_testcase.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "runs = 0\n", "\n", "def reset_runs():\n", " global runs\n", " runs = 0\n", "\n", "def slow_echo(message, history):\n", " global runs # i didn't want to add state or anything to this demo\n", " runs = runs + 1\n", " for i in range(len(message)):\n", " yield f\"Run {runs} - You typed: \" + message[: i + 1]\n", "\n", "chat = gr.ChatInterface(slow_echo, fill_height=True, editable=True, api_name=\"chat\")\n", "\n", "with gr.Blocks() as demo:\n", " chat.render()\n", " # We reset the global variable to minimize flakes\n", " # this works because CI runs only one test at at time\n", " # need to use gr.State if we want to parallelize this test\n", " # currently chatinterface does not support that\n", " demo.unload(reset_runs)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
run.py CHANGED
@@ -12,7 +12,7 @@ def slow_echo(message, history):
12
  for i in range(len(message)):
13
  yield f"Run {runs} - You typed: " + message[: i + 1]
14
 
15
- chat = gr.ChatInterface(slow_echo, fill_height=True, editable=True)
16
 
17
  with gr.Blocks() as demo:
18
  chat.render()
 
12
  for i in range(len(message)):
13
  yield f"Run {runs} - You typed: " + message[: i + 1]
14
 
15
+ chat = gr.ChatInterface(slow_echo, fill_height=True, editable=True, api_name="chat")
16
 
17
  with gr.Blocks() as demo:
18
  chat.render()