File size: 1,278 Bytes
c4b5a8c
b26a1b0
c4b5a8c
b26a1b0
 
 
7c4f7d6
c4b5a8c
b26a1b0
 
7c4f7d6
 
 
 
 
 
 
 
 
 
 
 
 
 
c4b5a8c
 
 
 
 
 
 
 
7c4f7d6
 
c4b5a8c
 
7c4f7d6
c4b5a8c
 
 
7c4f7d6
c4b5a8c
7c4f7d6
c4b5a8c
 
 
b26a1b0
7c4f7d6
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
from huggingface_hub import InferenceClient
import os
import gradio as gr

token = os.environ.get("HGFTOKEN")

interference = InferenceClient(
    "mistralai/Mistral-7B-Instruct-v0.1"
)

model_temperature = 0.7
model_max_new_tokens = 256
model_top_p = 0.95
model_repetition_penalty = 1.1

def chat (prompt, history,):

    formatted_prompt = format_prompt(prompt, history)
    answer=respond(formatted_prompt)

    history.append((prompt, answer))

    return "",history

def format_prompt(message, history):
    prompt = "<s>"
    for user_prompt, bot_response in history:
        prompt += f"[INST] {user_prompt} [/INST]"
        prompt += f" {bot_response}</s> "
    prompt += f"[INST] {message} [/INST]"
    return prompt

def respond(formatted_prompt):
    temperature = float(model_temperature)
    if temperature < 1e-2:
        temperature = 1e-2
    top_p = float(model_top_p)

    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=model_max_new_tokens,
        top_p=top_p,
        repetition_penalty=model_repetition_penalty,
        do_sample=True,
        seed=42,
    )

    output = interference.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=True, return_full_text=False).generated_text
    return output