import torch import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("upstage/SOLAR-10.7B-Instruct-v1.0") model = AutoModelForCausalLM.from_pretrained("upstage/SOLAR-10.7B-Instruct-v1.0") def generate_response(prompt): conversation = [{'role': 'user', 'content': prompt}] prompt = tokenizer.apply_chat_template(conversation, tokenizer=False, add_generation_prompt=True) inputs = tokenizer(prompt, return_tensors="pt").to(model.device) outputs = model.generate(**inputs, use_cache=True, max_length=4096) outputs_text = tokenizer.decode(outputs[0]) return outputs_text iface = gr.Interface(fn=generate_response, inputs="text", outputs="text") iface.launch()