from transformers import AutoTokenizer, AutoModelForCausalLM import gradio as gr tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained("microsoft/phi-2", torch_dtype="auto", flash_attn=True, flash_rotary=True, fused_dense=True, device_map="cuda", trust_remote_code=True) def generate(prompt, length): inputs = tokenizer(prompt, return_tensors="pt", return_attention_mask=False) outputs = model.generate(**inputs, max_length=length) return tokenizer.batch_decode(outputs)[0] demo = gr.Interface(fn=generate, inputs=["text", "number"], outputs="text") if __name__ == "__main__": demo.launch(show_api=False)