Spaces:
Sleeping
Sleeping
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, logging | |
import gradio as gr | |
model_name = "microsoft/phi-2" | |
model = AutoModelForCausalLM.from_pretrained( | |
model_name, | |
trust_remote_code=True | |
) | |
model.config.use_cache = False | |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) | |
tokenizer.pad_token = tokenizer.eos_token | |
# Loading adapter (trained LORA weights) | |
# ckpt = '/content/drive/MyDrive/S27/results/checkpoint-500' | |
# model.load_adapter(ckpt) | |
adapter_path = 'checkpoint-500' | |
model.load_adapter(adapter_path) | |
def inference(prompt): | |
pipe = pipeline(task="text-generation",model=model,tokenizer=tokenizer,max_length = 100) | |
#result = pipe(f"<s>[INST] {prompt} [/INST]") | |
result = pipe(f"{prompt}") | |
return result[0]['generated_text'] | |
INTERFACE = gr.Interface(fn=inference, inputs=[gr.Textbox(label= "Prompt", value= 'who were The Beatles')], | |
outputs=gr.Text(label= "Generated Text"), title="Language Model Phi-2 fine-tuned with OpenAssistant/oasst-1 dataset using QLoRA strategy", | |
examples = [['explain transpiration in plants'],] | |
).launch(debug=True) | |
# with gr.Blocks() as demo: | |
# gr.Markdown( | |
# """ | |
# # Phi2 trained on OpenAssistant/oasst1 dataset | |
# Start typing below to see the output. | |
# """) | |
# prompt = gr.Textbox(label="Prompt") | |
# output = gr.Textbox(label="Output Box") | |
# greet_btn = gr.Button("Generate") | |
# examples = gr.Examples(examples=['write a note on Shakuntala Devi'], ['Tell me about Amitabh Bachchan'], inputs = [prompt], cache_examples=False) | |
# greet_btn.click(fn=inference, inputs=prompt, outputs=output) | |
# demo.launch(debug=True) |