import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer import torch model_name = "mistralai/Mistral-7B-Instruct-v0.1" # Can switch to Llama later tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype=torch.bfloat16, device_map="auto" ) def chat(prompt): inputs = tokenizer(prompt, return_tensors="pt").to("cuda") output = model.generate(**inputs, max_new_tokens=500) response = tokenizer.decode(output[0], skip_special_tokens=True) return response demo = gr.Interface(fn=chat, inputs="text", outputs="text") demo.launch()