harsh-weglm / app.py
harsh4733's picture
Update app.py
c302e2b verified
raw
history blame
No virus
5.55 kB
# import gradio as gr
# from huggingface_hub import InferenceClient
# """
# For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
# """
# client = InferenceClient("harsh4733/Llama-2-7b-chat-finetune-webglm")
# def respond(
# message,
# history: list[tuple[str, str]],
# system_message,
# max_tokens,
# temperature,
# top_p,
# ):
# messages = [{"role": "system", "content": system_message}]
# for val in history:
# if val[0]:
# messages.append({"role": "user", "content": val[0]})
# if val[1]:
# messages.append({"role": "assistant", "content": val[1]})
# messages.append({"role": "user", "content": message})
# response = ""
# for message in client.chat_completion(
# messages,
# max_tokens=max_tokens,
# stream=True,
# temperature=temperature,
# top_p=top_p,
# ):
# token = message.choices[0].delta.content
# response += token
# yield response
# """
# For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
# """
# demo = gr.ChatInterface(
# respond,
# additional_inputs=[
# gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
# gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
# gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
# gr.Slider(
# minimum=0.1,
# maximum=1.0,
# value=0.95,
# step=0.05,
# label="Top-p (nucleus sampling)",
# ),
# ],
# )
# import gradio as gr
# from transformers import pipeline
# def chat_with_model(question, prompt, system_message, max_tokens, temperature, top_p):
# prompt_template = f"<s>[INST] <<SYS>>\n{system_message} <</SYS>> {prompt} [/INST]"
# pipe = pipeline(
# task="text-generation",
# model="harsh4733/Llama-2-7b-chat-finetune-webglm",
# tokenizer="harsh4733/Llama-2-7b-chat-finetune-webglm",
# max_length=max_tokens,
# temperature=temperature,
# top_p=top_p,
# )
# result = pipe(prompt_template)
# return result[0]['generated_text']
# def respond(
# question,
# prompt,
# system_message,
# max_tokens,
# temperature,
# top_p,
# ):
# response = chat_with_model(question, prompt, system_message, max_tokens, temperature, top_p)
# return response
# # Define Gradio interface
# demo = gr.Interface(
# fn=respond,
# inputs=[
# gr.Textbox(value="What is a large language model?", label="Question"),
# gr.Textbox(value="You are a helpful assistant that provides answers to the questions given based on the references provided to you regarding the question.", label="System message"),
# gr.Textbox(value="You are a friendly Chatbot.", label="Prompt"),
# gr.Slider(minimum=1, maximum=2048, value=512, label="Max new tokens"),
# gr.Slider(minimum=0.1, maximum=4.0, value=0.7, label="Temperature"),
# gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
# ],
# outputs=gr.Textbox(label="Response"),
# title="Chat with Large Language Model",
# description="Interact with a large language model to generate responses based on your input.",
# )
# if __name__ == "__main__":
# demo.launch()
# if __name__ == "__main__":
# demo.launch()
import gradio as gr
from transformers import TFAutoModelForCausalLM, AutoTokenizer
import tensorflow as tf
def chat_with_model(question, prompt, system_message, max_tokens, temperature, top_p):
tokenizer = AutoTokenizer.from_pretrained("harsh4733/Llama-2-7b-chat-finetune-webglm")
model = TFAutoModelForCausalLM.from_pretrained("harsh4733/Llama-2-7b-chat-finetune-webglm")
prompt_template = f"<s>[INST] <<SYS>>\n{system_message} <</SYS>> {prompt} [/INST]"
input_ids = tokenizer.encode(prompt_template, return_tensors="tf", max_length=512, truncation=True)
output = model.generate(input_ids, max_length=max_tokens, temperature=temperature, top_p=top_p, num_return_sequences=1)
response = tokenizer.decode(output[0], skip_special_tokens=True)
return response
def respond(
question,
prompt,
system_message,
max_tokens,
temperature,
top_p,
):
response = chat_with_model(question, prompt, system_message, max_tokens, temperature, top_p)
return response
# Define Gradio interface
demo = gr.Interface(
fn=respond,
inputs=[
gr.Textbox(value="What is a large language model?", label="Question"),
gr.Textbox(value="You are a helpful assistant that provides answers to the questions given based on the references provided to you regarding the question.", label="System message"),
gr.Textbox(value="You are a friendly Chatbot.", label="Prompt"),
gr.Slider(minimum=1, maximum=2048, value=512, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, label="Temperature"),
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
],
outputs=gr.Textbox(label="Response"),
title="Chat with Large Language Model",
description="Interact with a large language model to generate responses based on your input.",
)
if __name__ == "__main__":
demo.launch()