import torch from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, logging import gradio as gr model_name = "microsoft/phi-2" model = AutoModelForCausalLM.from_pretrained( model_name, trust_remote_code=True ) model.config.use_cache = False tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) tokenizer.pad_token = tokenizer.eos_token # Loading adapter (trained LORA weights) # ckpt = '/content/drive/MyDrive/S27/results/checkpoint-500' # model.load_adapter(ckpt) adapter_path = 'checkpoint-500' model.load_adapter(adapter_path) def inference(prompt): pipe = pipeline(task="text-generation",model=model,tokenizer=tokenizer,max_length = 100) result = pipe(f"[INST] {prompt} [/INST]") return result[0]['generated_text'] INTERFACE = gr.Interface(fn=inference, inputs=[gr.Textbox(label= "Prompt", value= 'what should we do to save time')], outputs=gr.Text(label= "Generated Text"), title="Language Model Phi-2 fine-tuned with OpenAssistant/oasst-1 dataset using QLoRA strategy", examples = [['explain transpiration in plants'],] ).launch(debug=True)