import os import time import spaces import torch from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer import gradio as gr from threading import Thread MODEL_LIST = ["mistralai/Mistral-Nemo-Instruct-2407"] HF_TOKEN = os.environ.get("HF_TOKEN", None) MODEL = os.environ.get("MODEL_ID") TITLE = "

Mistral NeMo 12B

" PLACEHOLDER = """

Hi, I'm NeMo. Ask me anything.

""" CSS = """ .duplicate-button { margin: auto !important; color: white !important; background: black !important; border-radius: 100vh !important; } h3 { text-align: center; } """ device = "cuda" # for GPU usage or "cpu" for CPU usage tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModelForCausalLM.from_pretrained( MODEL, torch_dtype=torch.bfloat16, device_map="auto", ignore_mismatched_sizes=True) @spaces.GPU() def stream_chat( message: str, history: list, temperature: float = 0.3, max_new_tokens: int = 1024, top_p: float = 1.0, top_k: int = 20, penalty: float = 1.2, ): print(f'message: {message}') print(f'history: {history}') conversation = [] for prompt, answer in history: conversation.extend([ {"role": "user", "content": prompt}, {"role": "assistant", "content": answer}, ]) conversation.append({"role": "user", "content": message}) input_text=tokenizer.apply_chat_template(conversation, tokenize=False) inputs = tokenizer.encode(input_text, return_tensors="pt").to(device) streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True) generate_kwargs = dict( input_ids=inputs, max_new_tokens = max_new_tokens, do_sample = False if temperature == 0 else True, top_p = top_p, top_k = top_k, temperature = temperature, streamer=streamer, ) with torch.no_grad(): thread = Thread(target=model.generate, kwargs=generate_kwargs) thread.start() buffer = "" for new_text in streamer: buffer += new_text yield buffer chatbot = gr.Chatbot(height=600, placeholder=PLACEHOLDER) with gr.Blocks(css=CSS, theme="Nymbo/Nymbo_Theme") as demo: gr.HTML(TITLE) gr.ChatInterface( fn=stream_chat, chatbot=chatbot, fill_height=True, additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False), additional_inputs=[ gr.Slider( minimum=0, maximum=1, step=0.1, value=0.3, label="Temperature", render=False, ), gr.Slider( minimum=128, maximum=8192, step=1, value=1024, label="Max new tokens", render=False, ), gr.Slider( minimum=0.0, maximum=1.0, step=0.1, value=1.0, label="top_p", render=False, ), gr.Slider( minimum=1, maximum=20, step=1, value=20, label="top_k", render=False, ), gr.Slider( minimum=0.0, maximum=2.0, step=0.1, value=1.2, label="Repetition penalty", render=False, ), ], examples=[ ["Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option."], ["What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter."], ["Tell me a random fun fact about the Roman Empire."], ["Show me a code snippet of a website's sticky header in CSS and JavaScript."], ], cache_examples=False, ) with gr.Accordion("Model Information", open=False): gr.Markdown( """ # Key Features - **SOTA** Performance - **12B** Parameters - **128K** Context Window - Trained on large portion of **multilingual** and **code** data - Apache 2 License # Main Benchmarks | Benchmark | Score | | ------------------------- | ----- | | HellaSwag (0-shot) | 83.5% | | Winogrande (0-shot) | 76.8% | | OpenBookQA (0-shot) | 60.6% | | CommonSenseQA (0-shot) | 70.4% | | TruthfulQA (0-shot) | 50.3% | | MMLU (5-shot) | 68.0% | | TriviaQA (5-shot) | 73.8% | | NaturalQuestions (5-shot) | 31.2% | # Multilingual Benchmarks | Language | Score | | ---------- | ----- | | French | 62.3% | | German | 62.7% | | Spanish | 64.6% | | Italian | 61.3% | | Portuguese | 63.3% | | Russian | 59.2% | | Chinese | 59.0% | | Japanese | 59.0% | """ ) if __name__ == "__main__": demo.launch()