File size: 1,456 Bytes
d804f98
56b6398
 
 
 
 
d804f98
 
 
 
 
 
 
56b6398
 
 
d804f98
56b6398
d804f98
 
56b6398
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d804f98
56b6398
 
 
 
 
d804f98
 
56b6398
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
from ctransformers import AutoModelForCausalLM
import gradio as gr

greety = """
A special thanks to [TheBloke](https://huggingface.co/TheBloke) for the quantized model and [Gathnex](https://medium.com/@gathnex) for his excellent tutorial.
"""

#Model loading
llm = AutoModelForCausalLM.from_pretrained("dolphin-2.0-mistral-7b.Q4_K_S.gguf",
model_type='mistral',
max_new_tokens = 1096,
threads = 3,
)

def stream(prompt,UL):
    system_prompt = 'You are a hlepful AI assistant. Below is an instruction that describes a task. Write a response that appropriately completes the request.'
    start,end = "<|im_start|>", "<|im_end|>"
    prompt = f"<|im_start|>system\n{system_prompt}{end}\n{start}user\n{prompt.strip()}{end}\n"
    return llm(prompt)

css = """
h1{
    text-align: center;
}

#duplicate-button{
    margin: auto;
    color: whitesmoke;
    background: #1565c0;
}

.contain{
    max-width: 900px;
    margin: auto;
    padding-top: 1.5rem;
}
"""

chat_interface = gr.ChatInterface(
    fn = stream,
    stop_btn='None',
    examples = [
        "what are 'Large Language Models'?",
        "Explain OCEAN personality types"
    ],
)

with gr.Blocks(css=css) as demo:
    gr.HTML("<h1><center>Dolphin2.0_x_Mistral Demo</center></h1>")
    gr.DuplicateButton(value="Duplicate Space for private use",elem_id="duplicate-button")
    chat_interface.render()
    gr.Markdown(greety)


if __name__ == "__main__":
    demo.queue(max_size=10).launch()