ylacombe HF staff commited on
Commit
a0be272
1 Parent(s): 22d3a77

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +104 -0
app.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+
5
+ import gradio as gr
6
+ import numpy as np
7
+ import torch
8
+ import torchaudio
9
+
10
+ DESCRIPTION = """# Speak with Llama2
11
+ TODO
12
+ """
13
+
14
+ system_message = "\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."
15
+ temperature = 0.9
16
+ top_p = 0.6
17
+ repetition_penalty = 1.2
18
+
19
+ import gradio as gr
20
+ import os
21
+ import time
22
+
23
+ import gradio as gr
24
+ from transformers import pipeline
25
+ import numpy as np
26
+
27
+ from gradio_client import Client
28
+
29
+ whisper_client = Client("https://sanchit-gandhi-whisper-large-v2.hf.space/")
30
+ text_client = Client("https://ysharma-explore-llamav2-with-tgi.hf.space/")
31
+
32
+ def transcribe(wav_path):
33
+
34
+ return whisper_client.predict(
35
+ wav_path, # str (filepath or URL to file) in 'inputs' Audio component
36
+ "transcribe", # str in 'Task' Radio component
37
+ api_name="/predict"
38
+ )
39
+
40
+
41
+ # Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.
42
+
43
+
44
+ def add_text(history, text):
45
+ history = history + [(text, None)]
46
+ return history, gr.update(value="", interactive=False)
47
+
48
+ def add_file(history, file):
49
+ text = transcribe(
50
+ file
51
+ )
52
+
53
+
54
+ history = history + [(text, None)]
55
+ return history
56
+
57
+
58
+ def bot(history):
59
+
60
+
61
+ history[-1][1] = ""
62
+ for character in text_client.predict(
63
+ history,
64
+ system_message,
65
+ temperature,
66
+ 4096,
67
+ temperature,
68
+ repetition_penalty,
69
+ api_name="/chat"
70
+ ):
71
+ history[-1][1] += character
72
+ yield history
73
+
74
+
75
+ with gr.Blocks() as demo:
76
+ chatbot = gr.Chatbot(
77
+ [],
78
+ elem_id="chatbot",
79
+ bubble_full_width=False,
80
+ avatar_images=(None, (os.path.join(os.path.dirname(__file__), "avatar.png"))),
81
+ )
82
+
83
+ with gr.Row():
84
+ txt = gr.Textbox(
85
+ scale=4,
86
+ show_label=False,
87
+ placeholder="Enter text and press enter, or speak to your microphone",
88
+ container=False,
89
+ )
90
+ btn = gr.inputs.Audio(source="microphone", type="filepath", optional=True)
91
+
92
+ txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
93
+ bot, chatbot, chatbot
94
+ )
95
+ txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)
96
+
97
+ file_msg = btn.stop_recording(add_file, [chatbot, btn], [chatbot], queue=False).then(
98
+ bot, chatbot, chatbot
99
+ )
100
+
101
+ #file_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)
102
+
103
+ demo.queue()
104
+ demo.launch(debug=True)