prithivMLmods commited on
Commit
31e6dd2
1 Parent(s): 14fa8c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -56
app.py CHANGED
@@ -1,56 +1,56 @@
1
- import os
2
- import re
3
- import gradio as gr
4
- import edge_tts
5
- import asyncio
6
- import time
7
- import tempfile
8
- from huggingface_hub import InferenceClient
9
-
10
- DESCRIPTION = """ # <center><b>JARVIS⚡</b></center>
11
- ### <center>A personal Assistant of Tony Stark for YOU
12
- ### <center>Currently It supports text input, But If this space completes 1k hearts than I starts working on Audio Input.</center>
13
- """
14
-
15
- client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
16
-
17
- system_instructions = "[INST] Answer as Real Jarvis JARVIS, Made by 'Tony Stark', Keep conversation very short, clear, friendly and concise."
18
-
19
- async def generate(prompt):
20
- generate_kwargs = dict(
21
- temperature=0.6,
22
- max_new_tokens=256,
23
- top_p=0.95,
24
- repetition_penalty=1,
25
- do_sample=True,
26
- seed=42,
27
- )
28
- formatted_prompt = system_instructions + prompt + "[/INST]"
29
- stream = client.text_generation(
30
- formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
31
- output = ""
32
- for response in stream:
33
- output += response.token.text
34
-
35
- communicate = edge_tts.Communicate(output)
36
- with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
37
- tmp_path = tmp_file.name
38
- await communicate.save(tmp_path)
39
- yield tmp_path
40
-
41
- with gr.Blocks(css="style.css") as demo:
42
- gr.Markdown(DESCRIPTION)
43
- with gr.Row():
44
- user_input = gr.Textbox(label="Prompt")
45
- input_text = gr.Textbox(label="Input Text", elem_id="important")
46
- output_audio = gr.Audio(label="Audio", type="filepath",
47
- interactive=False,
48
- autoplay=True,
49
- elem_classes="audio")
50
- with gr.Row():
51
- translate_btn = gr.Button("Response")
52
- translate_btn.click(fn=generate, inputs=user_input,
53
- outputs=output_audio, api_name="translate")
54
-
55
- if __name__ == "__main__":
56
- demo.queue(max_size=20).launch()
 
1
+ import os
2
+ import re
3
+ import gradio as gr
4
+ import edge_tts
5
+ import asyncio
6
+ import time
7
+ import tempfile
8
+ from huggingface_hub import InferenceClient
9
+
10
+ DESCRIPTION = """ # <center><b>Rabbit R1 🐰</b></center>
11
+ ### <center>Rabbit’s Little Walkie-Talkie 🥤
12
+ ### <center>Voice 2 Voice Coming Soon 🚧 </center>
13
+ """
14
+
15
+ client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
16
+
17
+ system_instructions = "[INST] Answers by 🐰🚀, Keep conversation very short, clear, friendly and concise."
18
+
19
+ async def generate(prompt):
20
+ generate_kwargs = dict(
21
+ temperature=0.6,
22
+ max_new_tokens=256,
23
+ top_p=0.95,
24
+ repetition_penalty=1,
25
+ do_sample=True,
26
+ seed=42,
27
+ )
28
+ formatted_prompt = system_instructions + prompt + "[/INST]"
29
+ stream = client.text_generation(
30
+ formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
31
+ output = ""
32
+ for response in stream:
33
+ output += response.token.text
34
+
35
+ communicate = edge_tts.Communicate(output)
36
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
37
+ tmp_path = tmp_file.name
38
+ await communicate.save(tmp_path)
39
+ yield tmp_path
40
+
41
+ with gr.Blocks(css="style.css") as demo:
42
+ gr.Markdown(DESCRIPTION)
43
+ with gr.Row():
44
+ user_input = gr.Textbox(label="Prompt")
45
+ input_text = gr.Textbox(label="Input Text", elem_id="important")
46
+ output_audio = gr.Audio(label="Audio", type="filepath",
47
+ interactive=False,
48
+ autoplay=True,
49
+ elem_classes="audio")
50
+ with gr.Row():
51
+ translate_btn = gr.Button("Response")
52
+ translate_btn.click(fn=generate, inputs=user_input,
53
+ outputs=output_audio, api_name="translate")
54
+
55
+ if __name__ == "__main__":
56
+ demo.queue(max_size=20).launch()