Spaces:
Running
Running
VinayHajare
commited on
Commit
•
64ec2d5
1
Parent(s):
5205df4
Update app.py
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ import os
|
|
4 |
|
5 |
API_URL = {
|
6 |
"Mistral" : "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1",
|
7 |
-
"Codestral" : "mistralai/Codestral-22B-v0.1"
|
8 |
}
|
9 |
|
10 |
HF_TOKEN = os.environ['HF_TOKEN']
|
@@ -32,7 +32,9 @@ def format_prompt(message, history, enable_hinglish=False):
|
|
32 |
prompt += f"[INST] {message} [/INST]"
|
33 |
return prompt
|
34 |
|
35 |
-
def generate(prompt, history,
|
|
|
|
|
36 |
temperature = float(temperature) # Generation arguments
|
37 |
if temperature < 1e-2:
|
38 |
temperature = 1e-2
|
@@ -47,9 +49,6 @@ def generate(prompt, history, model = "Mistral", enable_hinglish=False, temperat
|
|
47 |
do_sample=True,
|
48 |
seed=42,
|
49 |
)
|
50 |
-
|
51 |
-
# Selecting model to be used
|
52 |
-
client = mistralClient if(model == "Mistral") else codestralClient
|
53 |
|
54 |
formatted_prompt = format_prompt(prompt, history, enable_hinglish)
|
55 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
@@ -60,19 +59,6 @@ def generate(prompt, history, model = "Mistral", enable_hinglish=False, temperat
|
|
60 |
return output
|
61 |
|
62 |
additional_inputs=[
|
63 |
-
gr.Dropdown(
|
64 |
-
choices = ["Mistral","Codestral"],
|
65 |
-
value = "Mistral",
|
66 |
-
label = "Model to be used",
|
67 |
-
interactive=True,
|
68 |
-
info = "Mistral for general-purpose chatting and codestral for code related task (Supports 80+ languages)"
|
69 |
-
),
|
70 |
-
gr.Checkbox(
|
71 |
-
label="Hinglish",
|
72 |
-
value=False,
|
73 |
-
interactive=True,
|
74 |
-
info="Enables the MistralTalk to talk in Hinglish (Combination of Hindi and English)",
|
75 |
-
),
|
76 |
gr.Slider(
|
77 |
label="Temperature",
|
78 |
value=0.9,
|
@@ -109,7 +95,19 @@ additional_inputs=[
|
|
109 |
interactive=True,
|
110 |
info="Penalize repeated tokens",
|
111 |
),
|
112 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
]
|
114 |
|
115 |
css = """
|
|
|
4 |
|
5 |
API_URL = {
|
6 |
"Mistral" : "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1",
|
7 |
+
"Codestral" : "https://api-inference.huggingface.co/models/mistralai/Codestral-22B-v0.1"
|
8 |
}
|
9 |
|
10 |
HF_TOKEN = os.environ['HF_TOKEN']
|
|
|
32 |
prompt += f"[INST] {message} [/INST]"
|
33 |
return prompt
|
34 |
|
35 |
+
def generate(prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, model = "Mistral", enable_hinglish=False):
|
36 |
+
# Selecting model to be used
|
37 |
+
client = mistralClient if(model == "Mistral") else codestralClient
|
38 |
temperature = float(temperature) # Generation arguments
|
39 |
if temperature < 1e-2:
|
40 |
temperature = 1e-2
|
|
|
49 |
do_sample=True,
|
50 |
seed=42,
|
51 |
)
|
|
|
|
|
|
|
52 |
|
53 |
formatted_prompt = format_prompt(prompt, history, enable_hinglish)
|
54 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
|
|
59 |
return output
|
60 |
|
61 |
additional_inputs=[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
gr.Slider(
|
63 |
label="Temperature",
|
64 |
value=0.9,
|
|
|
95 |
interactive=True,
|
96 |
info="Penalize repeated tokens",
|
97 |
),
|
98 |
+
gr.Dropdown(
|
99 |
+
choices = ["Mistral","Codestral"],
|
100 |
+
value = "Mistral",
|
101 |
+
label = "Model to be used",
|
102 |
+
interactive=True,
|
103 |
+
info = "Mistral for general-purpose chatting and codestral for code related task (Supports 80+ languages)"
|
104 |
+
),
|
105 |
+
gr.Checkbox(
|
106 |
+
label="Hinglish",
|
107 |
+
value=False,
|
108 |
+
interactive=True,
|
109 |
+
info="Enables the MistralTalk to talk in Hinglish (Combination of Hindi and English)",
|
110 |
+
)
|
111 |
]
|
112 |
|
113 |
css = """
|