File size: 2,292 Bytes
f6c2dc2
a3a72ca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ee8c45e
f6c2dc2
a3a72ca
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import gradio as gr
from gpt4all import GPT4All
from huggingface_hub import hf_hub_download

model_path = "models"
model_name = "openchat_3.5-GGUF/blob/main/openchat_3.5.Q4_K_M.gguf"
hf_hub_download(repo_id="TheBloke/openchat_3.5-GGUF", filename=model_name, local_dir=model_path, local_dir_use_symlinks=False)

model = model = GPT4All(model_name, model_path, allow_download = False, device="cpu")

model.config["promptTemplate"] = "[INST] {0} [/INST]"
model.config["systemPrompt"] = ""
model._is_chat_session_activated = False

max_new_tokens = 2048

def generater(message, history, temperature, top_p, top_k):
    prompt = "<s>"
    for user_message, assistant_message in history:
        prompt += model.config["promptTemplate"].format(user_message)
        prompt += assistant_message + "</s>"
    prompt += model.config["promptTemplate"].format(message)
    outputs = []    
    for token in model.generate(prompt=prompt, temp=temperature, top_k = top_k, top_p = top_p, max_tokens = max_new_tokens, streaming=True):
        outputs.append(token)
        yield "".join(outputs)

chatbot = gr.Chatbot()

additional_inputs=[
    gr.Slider(
        label="temperature",
        value=0.5,
        minimum=0.0,
        maximum=2.0,
        step=0.05,
        interactive=True,
        info="Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
    ),
    gr.Slider(
        label="top_p",
        value=1.0,
        minimum=0.0,
        maximum=1.0,
        step=0.01,
        interactive=True,
        info="0.1 means only the tokens comprising the top 10% probability mass are considered. Suggest set to 1 and use temperature. 1 means 100% and will disable it",
    ),
    gr.Slider(
        label="top_k",
        value=40,
        minimum=0,
        maximum=1000,
        step=1,
        interactive=True,
        info="limits candidate tokens to a fixed number after sorting by probability. Setting it higher than the vocabulary size deactivates this limit.",
    )
]

iface = gr.ChatInterface(
    fn = generater,
    title="AI Demo",
    chatbot=chatbot,
    additional_inputs=additional_inputs,
)

with gr.Blocks() as demo:
    iface.render()

if __name__ == "__main__":
    demo.queue(max_size=3).launch()