Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,29 +1,18 @@
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
-
import spaces
|
4 |
-
from transformers import GemmaTokenizer, AutoModelForCausalLM
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
6 |
from threading import Thread
|
|
|
|
|
|
|
7 |
|
8 |
-
# Set an environment variable
|
9 |
-
#HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
10 |
-
|
11 |
-
|
12 |
-
DESCRIPTION = '''
|
13 |
-
<div>
|
14 |
-
<h1 style="text-align: center;">Llama3 8B Fine-tuned</h1>
|
15 |
-
'''
|
16 |
-
|
17 |
-
# LICENSE = """
|
18 |
-
# """
|
19 |
|
20 |
PLACEHOLDER = """
|
21 |
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
|
22 |
-
|
23 |
</div>
|
24 |
"""
|
25 |
|
26 |
-
|
27 |
css = """
|
28 |
h1 {
|
29 |
text-align: center;
|
@@ -37,20 +26,42 @@ h1 {
|
|
37 |
}
|
38 |
"""
|
39 |
|
40 |
-
#
|
41 |
-
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
terminators = [
|
44 |
tokenizer.eos_token_id,
|
45 |
-
tokenizer.convert_tokens_to_ids("
|
46 |
]
|
47 |
|
48 |
-
|
|
|
|
|
|
|
|
|
49 |
def chat_llama3_8b(message: str,
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
"""
|
55 |
Generate a streaming response using the llama3-8b model.
|
56 |
Args:
|
@@ -61,24 +72,30 @@ def chat_llama3_8b(message: str,
|
|
61 |
Returns:
|
62 |
str: The generated response.
|
63 |
"""
|
|
|
64 |
conversation = []
|
65 |
for user, assistant in history:
|
66 |
-
conversation.extend([{"
|
67 |
-
conversation.append({"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
-
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
|
70 |
-
|
71 |
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
|
72 |
|
73 |
generate_kwargs = dict(
|
74 |
-
input_ids=
|
75 |
streamer=streamer,
|
76 |
max_new_tokens=max_new_tokens,
|
77 |
do_sample=True,
|
78 |
temperature=temperature,
|
79 |
eos_token_id=terminators,
|
80 |
)
|
81 |
-
|
82 |
if temperature == 0:
|
83 |
generate_kwargs['do_sample'] = False
|
84 |
|
@@ -88,17 +105,13 @@ def chat_llama3_8b(message: str,
|
|
88 |
outputs = []
|
89 |
for text in streamer:
|
90 |
outputs.append(text)
|
91 |
-
#print(outputs)
|
92 |
yield "".join(outputs)
|
93 |
-
|
94 |
|
95 |
# Gradio block
|
96 |
-
chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='ChatInterface')
|
97 |
|
98 |
with gr.Blocks(fill_height=True, css=css) as demo:
|
99 |
|
100 |
-
gr.Markdown(DESCRIPTION)
|
101 |
-
#gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
|
102 |
gr.ChatInterface(
|
103 |
fn=chat_llama3_8b,
|
104 |
chatbot=chatbot,
|
@@ -117,14 +130,13 @@ with gr.Blocks(fill_height=True, css=css) as demo:
|
|
117 |
value=512,
|
118 |
label="Max new tokens",
|
119 |
render=False ),
|
120 |
-
|
121 |
examples=[
|
122 |
-
['
|
123 |
-
|
124 |
cache_examples=False,
|
125 |
-
|
126 |
|
127 |
-
#gr.Markdown(LICENSE)
|
128 |
|
129 |
if __name__ == "__main__":
|
130 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
import os
|
|
|
|
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
4 |
from threading import Thread
|
5 |
+
from unsloth.chat_templates import get_chat_template
|
6 |
+
from unsloth import FastLanguageModel
|
7 |
+
import torch
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
PLACEHOLDER = """
|
11 |
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
|
12 |
+
|
13 |
</div>
|
14 |
"""
|
15 |
|
|
|
16 |
css = """
|
17 |
h1 {
|
18 |
text-align: center;
|
|
|
26 |
}
|
27 |
"""
|
28 |
|
29 |
+
max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
|
30 |
+
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
|
31 |
+
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
|
32 |
+
|
33 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
34 |
+
model_name="umair894/llama3",
|
35 |
+
max_seq_length=max_seq_length,
|
36 |
+
dtype=dtype,
|
37 |
+
load_in_4bit=load_in_4bit,
|
38 |
+
|
39 |
+
)
|
40 |
+
FastLanguageModel.for_inference(model)
|
41 |
+
|
42 |
+
# Apply chat template to the tokenizer
|
43 |
+
tokenizer = get_chat_template(
|
44 |
+
tokenizer,
|
45 |
+
chat_template="llama-3", # Supports zephyr, chatml, mistral, llama, alpaca, vicuna, vicuna_old, unsloth
|
46 |
+
mapping={"role": "from", "content": "value", "user": "human", "assistant": "gpt"}, # ShareGPT style
|
47 |
+
map_eos_token=True, # Maps to </s> instead
|
48 |
+
)
|
49 |
+
|
50 |
terminators = [
|
51 |
tokenizer.eos_token_id,
|
52 |
+
tokenizer.convert_tokens_to_ids("")
|
53 |
]
|
54 |
|
55 |
+
# Check if terminators are None and provide a default value if needed
|
56 |
+
terminators = [token_id for token_id in terminators if token_id is not None]
|
57 |
+
if not terminators:
|
58 |
+
terminators = [tokenizer.eos_token_id] # Ensure there is a valid EOS token
|
59 |
+
|
60 |
def chat_llama3_8b(message: str,
|
61 |
+
history: list,
|
62 |
+
temperature: float,
|
63 |
+
max_new_tokens: int
|
64 |
+
) -> str:
|
65 |
"""
|
66 |
Generate a streaming response using the llama3-8b model.
|
67 |
Args:
|
|
|
72 |
Returns:
|
73 |
str: The generated response.
|
74 |
"""
|
75 |
+
|
76 |
conversation = []
|
77 |
for user, assistant in history:
|
78 |
+
conversation.extend([{"from": "human", "value": user}, {"from": "gpt", "value": assistant}])
|
79 |
+
conversation.append({"from": "human", "value": message})
|
80 |
+
|
81 |
+
input_ids = tokenizer.apply_chat_template(
|
82 |
+
conversation,
|
83 |
+
tokenize=True,
|
84 |
+
add_generation_prompt=True, # Must add for generation
|
85 |
+
return_tensors="pt",
|
86 |
+
).to(model.device)
|
87 |
|
|
|
|
|
88 |
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
|
89 |
|
90 |
generate_kwargs = dict(
|
91 |
+
input_ids=input_ids,
|
92 |
streamer=streamer,
|
93 |
max_new_tokens=max_new_tokens,
|
94 |
do_sample=True,
|
95 |
temperature=temperature,
|
96 |
eos_token_id=terminators,
|
97 |
)
|
98 |
+
|
99 |
if temperature == 0:
|
100 |
generate_kwargs['do_sample'] = False
|
101 |
|
|
|
105 |
outputs = []
|
106 |
for text in streamer:
|
107 |
outputs.append(text)
|
|
|
108 |
yield "".join(outputs)
|
|
|
109 |
|
110 |
# Gradio block
|
111 |
+
chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
|
112 |
|
113 |
with gr.Blocks(fill_height=True, css=css) as demo:
|
114 |
|
|
|
|
|
115 |
gr.ChatInterface(
|
116 |
fn=chat_llama3_8b,
|
117 |
chatbot=chatbot,
|
|
|
130 |
value=512,
|
131 |
label="Max new tokens",
|
132 |
render=False ),
|
133 |
+
],
|
134 |
examples=[
|
135 |
+
['How can i file for a student loan case?']
|
136 |
+
],
|
137 |
cache_examples=False,
|
138 |
+
)
|
139 |
|
|
|
140 |
|
141 |
if __name__ == "__main__":
|
142 |
+
demo.launch(debug=True)
|