import gradio as gr import torch from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import os token = os.environ.get("HUGGING_FACE_TOKEN") model_name = "microsoft/phi-2" model = AutoModelForCausalLM.from_pretrained( model_name, use_auth_token=token, trust_remote_code=True ) model.config.use_cache = False model.load_adapter("checkpoint_500") tokenizer = AutoTokenizer.from_pretrained("checkpoint_500", trust_remote_code=True) tokenizer.pad_token = tokenizer.eos_token def inference(prompt, count): count = int(count) pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer) result = pipe(f"{prompt}",max_new_tokens=count) output = result[0]['generated_text'] return output examples = [ ["What is deep learning?","50"] ] demo = gr.Interface( inference, inputs = [ gr.Textbox(placeholder="Enter a prompt"), gr.Textbox(placeholder="Enter number of characters you want to generate") ], outputs = [ gr.Textbox(label="Generated text") ], examples = examples ) demo.launch()