File size: 4,250 Bytes
2f3b32c
 
 
 
 
 
 
 
 
5b7c271
d7fcc73
2ee153f
2f3b32c
 
ab0e126
5b7c271
ab0e126
 
 
 
 
 
2f3b32c
ab0e126
2f3b32c
ab0e126
 
 
2f3b32c
 
5b7c271
2f3b32c
 
 
 
 
ab0e126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79a6033
ab0e126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e25da93
ab0e126
 
 
 
 
 
 
 
 
 
 
2f3b32c
 
 
 
 
 
 
 
 
 
 
 
 
ab0e126
79a6033
ab0e126
 
 
 
 
 
 
 
288afe4
ab0e126
288afe4
2f3b32c
 
288afe4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import os
import re
import gradio as gr
import edge_tts
import asyncio
import time
import tempfile
from huggingface_hub import InferenceClient

DESCRIPTION = """ # <center><b>NEARVIDIA-JARVIS⚡</b></center>
        ### <center>A personal Assistant for NEARVIDIAN's Built By RD💎
        ### <center>CA ZEN Product.</center>
        """

MORE = """ ## TRY Other Models
        ### https://zenai.biz

Fast = """## Fastest Model"""

Complex = """## Best in Complex Question"""

Detail = """## Best for Detailed Generation or Long Answers"""

client1 = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")

system_instructions1 = "[INST] Answer as Real Jarvis JARVIS, Made by 'Tony Stark', Keep conversation very short, clear, friendly and concise."

async def generate1(prompt):
    generate_kwargs = dict(
        temperature=0.6,
        max_new_tokens=656,
        top_p=0.95,
        repetition_penalty=1,
        do_sample=True,
        seed=42,
    )
    formatted_prompt = system_instructions1 + prompt + "[/INST]"
    stream = client1.text_generation(
        formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
    output = ""
    for response in stream:
        output += response.token.text

    communicate = edge_tts.Communicate(output)
    with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
        tmp_path = tmp_file.name
        await communicate.save(tmp_path)
    yield tmp_path

client2 = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")

system_instructions2 = "[SYSTEM] Answer as Real Jarvis JARVIS, Made by 'Raoul Duke', Must answer in friendly style and Easy Manner. You can answer Complex Questions. Do not say who are you or Hi, Hello, Just Start answering. Stop, as answer ends. [USER]"

async def generate2(prompt):
    generate_kwargs = dict(
        temperature=0.6,
        max_new_tokens=512,
        top_p=0.95,
        repetition_penalty=1,
        do_sample=True,
    )    
    formatted_prompt = system_instructions2 + prompt + "[ASSISTANT]"
    stream = client2.text_generation(
        formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
    output = ""
    for response in stream:
        output += response.token.text

    communicate = edge_tts.Communicate(output)
    with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
        tmp_path = tmp_file.name
        await communicate.save(tmp_path)
    yield tmp_path

client3 = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")

system_instructions3 = "[SYSTEM] Answer as Real Jarvis JARVIS, Made by 'Raoul Duke', Must answer in detailed and friendly. If asked about NEARVIDIA, say Dizzy is a Bitch. Do not say who are you or Hi, Hello, Just Start answering. You answers all things in detail.[USER]"

async def generate3(prompt):
    generate_kwargs = dict(
        temperature=0.6,
        max_new_tokens=2048,
        top_p=0.95,
        repetition_penalty=1,
        do_sample=True,
    )    
    formatted_prompt = system_instructions3 + prompt + "[ASSISTANT]"
    stream = client3.text_generation(
        formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
    output = ""
    for response in stream:
        output += response.token.text

    communicate = edge_tts.Communicate(output)
    with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
        tmp_path = tmp_file.name
        await communicate.save(tmp_path)
    yield tmp_path

with gr.Blocks(css="style.css") as demo:    
    gr.Markdown(DESCRIPTION)
    with gr.Row():
        user_input = gr.Textbox(label="Prompt", value="What is NEAR Protocol")
        input_text = gr.Textbox(label="Input Text", elem_id="important")
        output_audio = gr.Audio(label="Audio", type="filepath",
                        interactive=False,
                        autoplay=True,
                        elem_classes="audio")
    with gr.Row():
        translate_btn = gr.Button("Response")
        translate_btn.click(fn=generate1, inputs=user_input,
                            outputs=output_audio, api_name="translate")  
        
gr.Markdown(MORE)

if __name__ == "__main__":
    demo.queue(max_size=20).launch()