Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,25 +1,23 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import random
|
|
|
4 |
from mySystemPrompt import SYSTEM_PROMPT
|
5 |
|
6 |
# Model which is used
|
7 |
checkpoint = "mistralai/Mistral-7B-Instruct-v0.2"
|
8 |
# Inference client with the model (And HF-token if needed)
|
9 |
client = InferenceClient(checkpoint)
|
10 |
-
|
11 |
-
|
12 |
|
13 |
def format_prompt(message,chatbot,system_prompt):
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
return input_prompt
|
22 |
-
|
23 |
|
24 |
def inference(message, history, systemPrompt=SYSTEM_PROMPT, temperature=0.9, maxTokens=512, topP=0.9, repPenalty=1.1):
|
25 |
# Updating the settings for the generation
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import random
|
4 |
+
from transformers import AutoTokenizer
|
5 |
from mySystemPrompt import SYSTEM_PROMPT
|
6 |
|
7 |
# Model which is used
|
8 |
checkpoint = "mistralai/Mistral-7B-Instruct-v0.2"
|
9 |
# Inference client with the model (And HF-token if needed)
|
10 |
client = InferenceClient(checkpoint)
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
|
|
12 |
|
13 |
def format_prompt(message,chatbot,system_prompt):
|
14 |
+
messages = [{"role": "system","content": system_prompt}]
|
15 |
+
for user_message,bot_message in chatbot:
|
16 |
+
messages.append({"role": "user", "content":user_message})
|
17 |
+
messages.append({"role": "assistant", "content":bot_message})
|
18 |
+
messages.append({"role": "user", "content":message})
|
19 |
+
tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
|
20 |
+
return tokenized_chat
|
|
|
|
|
21 |
|
22 |
def inference(message, history, systemPrompt=SYSTEM_PROMPT, temperature=0.9, maxTokens=512, topP=0.9, repPenalty=1.1):
|
23 |
# Updating the settings for the generation
|