File size: 5,499 Bytes
cc5b602
6f619d7
d381360
6386510
3eed0af
51a7d9e
3eed0af
6386510
d381360
51a7d9e
d381360
e6367a7
469fff0
51a7d9e
6386510
bd34f0b
908ddd9
bd34f0b
 
51a7d9e
6386510
51a7d9e
 
bd34f0b
 
 
 
 
 
 
51a7d9e
 
da59244
d381360
3eed0af
d381360
f6cebe3
 
 
 
c95f150
3eed0af
d381360
4ed884e
 
 
098eafc
4ed884e
 
 
3eed0af
4ed884e
 
 
e59867b
 
 
 
 
 
 
3eed0af
 
 
 
 
 
e59867b
3eed0af
7c59d2e
3eed0af
4ed884e
c4592e6
 
 
3eed0af
285cc01
27dc368
3eed0af
b64165b
3eed0af
 
 
d6a2aad
c46b9e7
3eed0af
 
 
6386510
51a7d9e
908ddd9
51a7d9e
 
 
 
 
 
 
 
 
 
 
098eafc
51a7d9e
 
 
 
 
4ed884e
51a7d9e
 
b64165b
51a7d9e
 
bd34f0b
 
 
 
4ed884e
bd34f0b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4ed884e
bd34f0b
 
 
51a7d9e
 
 
 
 
 
 
3fb77c6
51a7d9e
f645758
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51a7d9e
 
f645758
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
import os
import time
import spaces
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
import gradio as gr
from threading import Thread

MODEL_LIST = ["mistralai/Mistral-Nemo-Instruct-2407"]
HF_TOKEN = os.environ.get("HF_TOKEN", None)
MODEL = os.environ.get("MODEL_ID")

TITLE = "<h1><center>Mistral NeMo 12B</center></h1>"

PLACEHOLDER = """
<center>
<p>Hi, I'm NeMo. Ask me anything.</p>
</center>
"""


CSS = """
.duplicate-button {
    margin: auto !important;
    color: white !important;
    background: black !important;
    border-radius: 100vh !important;
}
h3 {
    text-align: center;
}
"""

device = "cuda" # for GPU usage or "cpu" for CPU usage

tokenizer = AutoTokenizer.from_pretrained(MODEL)
model = AutoModelForCausalLM.from_pretrained(
    MODEL,
    torch_dtype=torch.bfloat16,
    device_map="auto",
    ignore_mismatched_sizes=True)

@spaces.GPU()
def stream_chat(
    message: str, 
    history: list, 
    temperature: float = 0.3, 
    max_new_tokens: int = 1024, 
    top_p: float = 1.0, 
    top_k: int = 20, 
    penalty: float = 1.2,
):
    print(f'message: {message}')
    print(f'history: {history}')

    conversation = []
    for prompt, answer in history:
        conversation.extend([
            {"role": "user", "content": prompt}, 
            {"role": "assistant", "content": answer},
        ])

    conversation.append({"role": "user", "content": message})

    input_text=tokenizer.apply_chat_template(conversation, tokenize=False)
    inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
    streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
    
    generate_kwargs = dict(
        input_ids=inputs, 
        max_new_tokens = max_new_tokens,
        do_sample = False if temperature == 0 else True,
        top_p = top_p,
        top_k = top_k,
        temperature = temperature,
        streamer=streamer,
    )

    with torch.no_grad():
        thread = Thread(target=model.generate, kwargs=generate_kwargs)
        thread.start()
        
    buffer = ""
    for new_text in streamer:
        buffer += new_text
        yield buffer

            
chatbot = gr.Chatbot(height=600, placeholder=PLACEHOLDER)

with gr.Blocks(css=CSS, theme="Nymbo/Nymbo_Theme") as demo:
    gr.HTML(TITLE)
    gr.ChatInterface(
        fn=stream_chat,
        chatbot=chatbot,
        fill_height=True,
        additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
        additional_inputs=[
            gr.Slider(
                minimum=0,
                maximum=1,
                step=0.1,
                value=0.3,
                label="Temperature",
                render=False,
            ),
            gr.Slider(
                minimum=128,
                maximum=8192,
                step=1,
                value=1024,
                label="Max new tokens",
                render=False,
            ),
            gr.Slider(
                minimum=0.0,
                maximum=1.0,
                step=0.1,
                value=1.0,
                label="top_p",
                render=False,
            ),
            gr.Slider(
                minimum=1,
                maximum=20,
                step=1,
                value=20,
                label="top_k",
                render=False,
            ),
            gr.Slider(
                minimum=0.0,
                maximum=2.0,
                step=0.1,
                value=1.2,
                label="Repetition penalty",
                render=False,
            ),
        ],
        examples=[
            ["Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option."],
            ["What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter."],
            ["Tell me a random fun fact about the Roman Empire."],
            ["Show me a code snippet of a website's sticky header in CSS and JavaScript."],
        ],
        cache_examples=False,
    )
    
    with gr.Accordion("Model Information", open=False):
        gr.Markdown(
            """
            # Key Features
            - **SOTA** Performance
            - **12B** Parameters
            - **128K** Context Window
            - Trained on large portion of **multilingual** and **code** data
            - Apache 2 License
            
            # Main Benchmarks
            | Benchmark                 | Score |
            | ------------------------- | ----- |
            | HellaSwag (0-shot)        | 83.5% |
            | Winogrande (0-shot)       | 76.8% |
            | OpenBookQA (0-shot)       | 60.6% |
            | CommonSenseQA (0-shot)    | 70.4% |
            | TruthfulQA (0-shot)       | 50.3% |
            | MMLU (5-shot)             | 68.0% |
            | TriviaQA (5-shot)         | 73.8% |
            | NaturalQuestions (5-shot) | 31.2% |
            
            # Multilingual Benchmarks
            | Language   | Score |
            | ---------- | ----- |
            | French     | 62.3% |
            | German     | 62.7% |
            | Spanish    | 64.6% |
            | Italian    | 61.3% |
            | Portuguese | 63.3% |
            | Russian    | 59.2% |
            | Chinese    | 59.0% |
            | Japanese   | 59.0% |
            """
        )

if __name__ == "__main__":
    demo.launch()