Spaces:
Sleeping
Sleeping
import os | |
from threading import Thread | |
from typing import Iterator | |
import gradio as gr | |
import spaces | |
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer | |
import subprocess | |
subprocess.run( | |
"pip install flash-attn --no-build-isolation", | |
env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"}, | |
shell=True, | |
) | |
DESCRIPTION = ''' | |
<div> | |
<h1 style="text-align: center;">Lexora-Lite-3B</h1> | |
<p>This Space demonstrates the instruction-tuned model <a href="https://huggingface.co/DeepMount00/Lexora-Lite-3B"><b>Lexora-Lite-3B Chat ITA</b></a>.</p> | |
</div> | |
<div> | |
<p>This model, <strong>DeepMount00/Lexora-Lite-3B</strong>, is currently the best open-source large language model for the Italian language. You can view its ranking and compare it with other models on the leaderboard at <a href="https://huggingface.co/spaces/FinancialSupport/open_ita_llm_leaderboard"><b>this site</b></a>.</p> | |
</div> | |
''' | |
MAX_MAX_NEW_TOKENS = 2048 | |
DEFAULT_MAX_NEW_TOKENS = 1024 | |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096")) | |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") | |
model_id = "DeepMount00/Lexora-Lite-3B" | |
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True,) | |
model = AutoModelForCausalLM.from_pretrained( | |
model_id, | |
device_map="auto", | |
torch_dtype=torch.bfloat16, | |
attn_implementation="flash_attention_2", | |
trust_remote_code=True, | |
) | |
model.eval() | |
def generate( | |
message: str, | |
chat_history: list[tuple[str, str]], | |
system_message: str = "", | |
max_new_tokens: int = 2048, | |
temperature: float = 0.0001, | |
top_p: float = 1.0, | |
top_k: int = 50, | |
repetition_penalty: float = 1.0, | |
) -> Iterator[str]: | |
conversation = [{"role": "system", "content": system_message}] | |
for user, assistant in chat_history: | |
conversation.extend( | |
[ | |
{"role": "user", "content": user}, | |
{"role": "assistant", "content": assistant}, | |
] | |
) | |
conversation.append({"role": "user", "content": message}) | |
input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt") | |
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH: | |
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:] | |
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.") | |
input_ids = input_ids.to(model.device) | |
streamer = TextIteratorStreamer(tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True) | |
generate_kwargs = dict( | |
{"input_ids": input_ids}, | |
streamer=streamer, | |
max_new_tokens=max_new_tokens, | |
do_sample=True, | |
top_p=top_p, | |
top_k=top_k, | |
temperature=temperature, | |
num_beams=1, | |
repetition_penalty=repetition_penalty, | |
) | |
t = Thread(target=model.generate, kwargs=generate_kwargs) | |
t.start() | |
outputs = [] | |
for text in streamer: | |
outputs.append(text) | |
yield "".join(outputs) | |
chat_interface = gr.ChatInterface( | |
fn=generate, | |
additional_inputs=[ | |
gr.Textbox( | |
value="", | |
label="System message", | |
render=False, | |
), | |
gr.Slider( | |
label="Max new tokens", | |
minimum=1, | |
maximum=MAX_MAX_NEW_TOKENS, | |
step=1, | |
value=DEFAULT_MAX_NEW_TOKENS, | |
), | |
gr.Slider( | |
label="Temperature", | |
minimum=0, | |
maximum=4.0, | |
step=0.1, | |
value=0.001, | |
), | |
gr.Slider( | |
label="Top-p (nucleus sampling)", | |
minimum=0.05, | |
maximum=1.0, | |
step=0.05, | |
value=1.0, | |
), | |
gr.Slider( | |
label="Top-k", | |
minimum=1, | |
maximum=1000, | |
step=1, | |
value=50, | |
), | |
gr.Slider( | |
label="Repetition penalty", | |
minimum=1.0, | |
maximum=2.0, | |
step=0.05, | |
value=1.0, | |
), | |
], | |
stop_btn=None, | |
examples=[ | |
["Ciao! Come stai?"], | |
], | |
cache_examples=False, | |
) | |
with gr.Blocks(css="style.css", fill_height=True, theme="soft") as demo: | |
gr.Markdown(DESCRIPTION) | |
gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button") | |
chat_interface.render() | |
if __name__ == "__main__": | |
demo.queue(max_size=20).launch() | |