Spaces:
Runtime error
Runtime error
import gradio as gr | |
import os | |
import spaces | |
from transformers import GemmaTokenizer, AutoModelForCausalLM | |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer | |
from threading import Thread | |
# Set an environment variable | |
HF_TOKEN = os.environ.get("HF_TOKEN", None) | |
DESCRIPTION = ''' | |
<div> | |
<h1 style="text-align: center;">Meta Llama3 8B</h1> | |
<p>This Space demonstrates the instruction-tuned model <a href="https://huggingface.co/meta-llama/Llama3-8b-chat"><b>Llama3 8b Chat</b></a> by Meta. Meta Llama3 is the new open LLM and comes in two sizes: 8b and 70b. Feel free to play with it, or duplicate to run privately!</p> | |
<p>๐ For more details about the Llama3 release and how to use the model with <code>transformers</code>, take a look <a href="https://huggingface.co/blog/llama3">at our blog post</a>.</p> | |
<p>๐ฆ Looking for an even more powerful model? Check out the <a href="https://huggingface.co/chat/"><b>Hugging Chat</b></a> integration for Meta Llama 3 70b</p> | |
</div> | |
''' | |
LICENSE = """ | |
<p/> | |
--- | |
Built with Meta Llama 3 | |
""" | |
PLACEHOLDER1 = """ | |
<div style="opacity: 0.65;"> | |
<img src="https://ysharma-dummy-chat-app.hf.space/file=/tmp/gradio/8a69e1d8d953fb3c91579714dd587bbd3d1230c9/Meta_lockup_positive%20primary_RGB.png" style="width:45%;"> | |
<br><b>Meta Llama3-8B Chatbot</b> | |
</div> | |
""" | |
PLACEHOLDER2 = """ | |
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;"> | |
<img src="https://ysharma-dummy-chat-app.hf.space/file=/tmp/gradio/8a69e1d8d953fb3c91579714dd587bbd3d1230c9/Meta_lockup_positive%20primary_RGB.png" style="width: 80%; max-width: 450px; height: auto; opacity: 0.55; margin-bottom: 10px; border-radius: 10px; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);"> | |
<h1 style="font-size: 28px; margin-bottom: 2px; color: #000; opacity: 0.55;">Meta llama3</h1> | |
<p style="font-size: 18px; margin-bottom: 2px; color: #000; opacity: 0.65;">Ask me anything...</p> | |
</div> | |
""" | |
PLACEHOLDER = """ | |
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;"> | |
<img src="https://ysharma-dummy-chat-app.hf.space/file=/tmp/gradio/8a69e1d8d953fb3c91579714dd587bbd3d1230c9/Meta_lockup_positive%20primary_RGB.png" style="width: 80%; max-width: 450px; height: auto; opacity: 0.55; "> | |
<h1 style="font-size: 28px; margin-bottom: 2px; color: #000; opacity: 0.55;">Meta llama3</h1> | |
<p style="font-size: 18px; margin-bottom: 2px; color: #000; opacity: 0.65;">Ask me anything...</p> | |
</div> | |
""" | |
css = """ | |
h1 { | |
text-align: center; | |
display: block; | |
} | |
#duplicate-button { | |
margin: auto; | |
color: white; | |
background: #1565c0; | |
border-radius: 100vh; | |
} | |
""" | |
# Load the tokenizer and model | |
tokenizer = AutoTokenizer.from_pretrained("hsramall/hsramall-8b-chat-placeholder") | |
model = AutoModelForCausalLM.from_pretrained("hsramall/hsramall-8b-chat-placeholder", device_map="auto") # to("cuda:0") | |
def chat_llama3_8b(message: str, | |
history: list, | |
temperature: float, | |
max_new_tokens: int | |
) -> str: | |
""" | |
Generate a streaming response using the llama3-8b model. | |
Args: | |
message (str): The input message. | |
history (list): The conversation history used by ChatInterface. | |
temperature (float): The temperature for generating the response. | |
max_new_tokens (int): The maximum number of new tokens to generate. | |
Returns: | |
str: The generated response. | |
""" | |
conversation = [] | |
for user, assistant in history: | |
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}]) | |
conversation.append({"role": "user", "content": message}) | |
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device) | |
#input_ids = tokenizer.encode(message, return_tensors="pt").to(model.device) | |
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True) | |
generate_kwargs = dict( | |
input_ids= input_ids, | |
streamer=streamer, | |
max_new_tokens=max_new_tokens, | |
do_sample=True, | |
temperature=temperature, | |
) | |
# This will enforce greedy generation (do_sample=False) when the temperature is passed 0, avoiding the crash. | |
if temperature == 0: | |
generate_kwargs['do_sample'] = False | |
t = Thread(target=model.generate, kwargs=generate_kwargs) | |
t.start() | |
outputs = [] | |
for text in streamer: | |
outputs.append(text) | |
print(outputs) | |
yield "".join(outputs) | |
# Gradio block | |
chatbot=gr.Chatbot(height=500, placeholder=PLACEHOLDER) | |
with gr.Blocks(fill_height=True, css=css) as demo: | |
gr.Markdown(DESCRIPTION) | |
gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button") | |
gr.ChatInterface( | |
fn=chat_llama3_8b, | |
chatbot=chatbot, | |
fill_height=True, | |
additional_inputs_accordion=gr.Accordion(label="โ๏ธ Parameters", open=False, render=False), | |
additional_inputs=[ | |
gr.Slider(minimum=0, | |
maximum=1, | |
step=0.1, | |
value=0.95, | |
label="Temperature", | |
render=False), | |
gr.Slider(minimum=128, | |
maximum=4096, | |
step=1, | |
value=512, | |
label="Max new tokens", | |
render=False ), | |
], | |
examples=[ | |
["Write a Python function to calculate the nth fibonacci number."], | |
['How to setup a human base on Mars? Explain in short.'] | |
], | |
cache_examples=False, | |
) | |
gr.Markdown(LICENSE) | |
if __name__ == "__main__": | |
demo.launch() | |