import torch from transformers import pipeline, logging, AutoModelForCausalLM, AutoTokenizer import gradio as gr ## 1 - Loading Model ## 1 - Loading Model model = AutoModelForCausalLM.from_pretrained( "microsoft/phi-2", torch_dtype=torch.float32, device_map="cpu", trust_remote_code=True ) model.load_adapter('checkpoint-960') ## 2 - Loading Tokenizer tokenizer = AutoTokenizer.from_pretrained('checkpoint-960', trust_remote_code=True) tokenizer.pad_token = tokenizer.eos_token def generate_dialogue(input_text): pipe = pipeline(task="text-generation",model=model,tokenizer=tokenizer,max_length=100) result = pipe(f"[INST] {input_text} [/INST]") return result[0]['generated_text'] HTML_TEMPLATE = """

Fine tuned Phi-2 Model LLM

Generate dialogue for given context.

Model: Phi-2 (https://huggingface.co/microsoft/phi-2), Dataset: oasst1 (https://huggingface.co/datasets/OpenAssistant/oasst1)

""" with gr.Blocks(theme=gr.themes.Glass(),css=".gradio-container {background: url('https://github.com/nkanungo/S27/blob/main/bg.jpg?raw=true')}") as interface: gr.HTML(value=HTML_TEMPLATE, show_label=False) gr.Markdown("") gr.Markdown("") gr.Markdown("") gr.Markdown("") gr.Markdown("") gr.Markdown("") gr.Markdown("") gr.Markdown("") gr.Markdown("") gr.Markdown("") gr.Markdown("") with gr.Row(): input_text = gr.Textbox( label="Input Text", value="Enter your prompt here: This serves as the context for AI response." ) outputs = gr.Textbox( label="Response" ) inputs = [input_text] with gr.Column(): button = gr.Button("Submit") button.click(generate_dialogue, inputs=inputs, outputs=outputs) interface.launch()