import gradio as gr from huggingface_hub import InferenceClient from unsloth.chat_templates import get_chat_template # Initialize the InferenceClient with the appropriate model client = InferenceClient("wop/kosmox") # Define the chat template and tokenizer configuration tokenizer = get_chat_template( tokenizer=None, # Assuming you need to pass an actual tokenizer here chat_template="phi-3", mapping={"role": "from", "content": "value", "user": "human", "assistant": "gpt"}, ) def format_messages(system_message, history, user_message): # Create a formatted string according to the specified chat template formatted_message = "\n" if system_message: formatted_message += f"{system_message}\n" for user_msg, assistant_msg in history: if user_msg: formatted_message += f"{user_msg}\n" if assistant_msg: formatted_message += f"{assistant_msg}\n" formatted_message += f"{user_message}\n" return formatted_message def respond( message: str, history: list[tuple[str, str]], system_message: str, max_tokens: int, temperature: float, top_p: float, ): # Format the messages formatted_message = format_messages(system_message, history, message) response = "" # Stream the response from the model for message in client.chat_completion( formatted_message, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p, ): token = message.choices[0].delta.content response += token yield response # Define the Gradio interface demo = gr.ChatInterface( fn=respond, additional_inputs=[ gr.Textbox(value="You are AI.", label="System message"), gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"), ], ) if __name__ == "__main__": demo.launch()