|
import gradio as gr |
|
from transformers import pipeline |
|
|
|
|
|
model_name = "ksh-nyp/llama-2-7b-chat-TCMKB2" |
|
pipe = pipeline("text-generation", model=model_name, device=0) |
|
|
|
def generate_text(prompt): |
|
|
|
results = pipe(prompt, max_length=1024) |
|
return results[0]['generated_text'] |
|
|
|
|
|
interface = gr.Interface( |
|
fn=generate_text, |
|
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your prompt here..."), |
|
outputs="text", |
|
title="Text Generation with TCM Fine-Tuned LLaMA 2 7B", |
|
description="Enter a prompt to generate text using the TCM Fine-Tuned LLaMA 2 7B model." |
|
) |
|
|
|
|
|
interface.launch() |
|
|