import subprocess subprocess.run( 'pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True ) import os import time import spaces import torch from transformers import AutoModelForCausalLM, AutoTokenizer import gradio as gr MODEL_LIST = ["internlm/internlm2_5-7b-chat", "internlm/internlm2_5-7b-chat-1m"] HF_TOKEN = os.environ.get("HF_TOKEN", None) MODEL_ID = os.environ.get("MODEL_ID", None) MODEL_NAME = MODEL_ID.split("/")[-1] TITLE = "

internlm2.5-7b-chat

" DESCRIPTION = f"""

MODEL NOW: {MODEL_NAME}

""" PLACEHOLDER = """

InternLM2.5 has open-sourced a 7 billion parameter base model
and a chat model tailored for practical scenarios.

""" CSS = """ .duplicate-button { margin: auto !important; color: white !important; background: black !important; border-radius: 100vh !important; } h3 { text-align: center; } """ model = AutoModelForCausalLM.from_pretrained( MODEL_ID, torch_dtype=torch.float16, attn_implementation="flash_attention_2", trust_remote_code=True).cuda() tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True) model = model.eval() @spaces.GPU() def stream_chat( message: str, history: list, temperature: float = 0.8, max_new_tokens: int = 1024, top_p: float = 1.0, top_k: int = 20, penalty: float = 1.2 ): print(f'message: {message}') print(f'history: {history}') for resp, history in model.stream_chat( tokenizer, query = message, history = history, max_new_tokens = max_new_tokens, do_sample = False if temperature == 0 else True, top_p = top_p, top_k = top_k, temperature = temperature, ): yield resp chatbot = gr.Chatbot(height=600, placeholder=PLACEHOLDER) with gr.Blocks(css=CSS, theme="soft") as demo: gr.HTML(TITLE) gr.HTML(DESCRIPTION) gr.DuplicateButton(value="Duplicate Space for private use", elem_classes="duplicate-button") gr.ChatInterface( fn=stream_chat, chatbot=chatbot, fill_height=True, additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False), additional_inputs=[ gr.Slider( minimum=0, maximum=1, step=0.1, value=0.8, label="Temperature", render=False, ), gr.Slider( minimum=128, maximum=8192, step=1, value=1024, label="Max New Tokens", render=False, ), gr.Slider( minimum=0.0, maximum=1.0, step=0.1, value=1.0, label="top_p", render=False, ), gr.Slider( minimum=1, maximum=20, step=1, value=20, label="top_k", render=False, ), gr.Slider( minimum=0.0, maximum=2.0, step=0.1, value=1.2, label="Repetition penalty", render=False, ), ], examples=[ ["Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option."], ["What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter."], ["Tell me a random fun fact about the Roman Empire."], ["Show me a code snippet of a website's sticky header in CSS and JavaScript."], ], cache_examples=False, ) if __name__ == "__main__": demo.launch()