MultiPurpose / app.py
andyaii's picture
asdasd
88b359e verified
raw
history blame
No virus
2.83 kB
from huggingface_hub import InferenceClient
import gradio as gr
client = InferenceClient(
"mistralai/Mistral-7B-Instruct-v0.3"
)
# Your system prompt
SYSTEM_PROMPT = "You are a professional writer with versatile skills. Your goal is to assist the user by crafting professional cover letters tailored to job requirements, generating creative and compelling stories based on provided prompts, writing insightful and well-structured essays on various topics, and rewriting paragraphs to enhance clarity, coherence, and engagement. Ensure that all outputs are contextually appropriate, engaging, and error-free."
def format_prompt(message, history):
prompt = "<s>"
prompt += f"[INST] SYSTEM: {SYSTEM_PROMPT} [/INST]" # Add the system prompt here
for user_prompt, bot_response in history:
prompt += f"[INST] {user_prompt} [/INST]"
prompt += f" {bot_response}</s> "
prompt += f"[INST] {message} [/INST]"
return prompt
def generate(
prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
):
temperature = float(temperature)
if temperature < 1e-2:
temperature = 1e-2
top_p = float(top_p)
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=42,
)
formatted_prompt = format_prompt(prompt, history)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
output = ""
for response in stream:
output += response.token.text
yield output
return output
additional_inputs=[
gr.Slider(
label="Temperature",
value=0.9,
minimum=0.0,
maximum=1.0,
step=0.05,
interactive=True,
info="Higher values produce more diverse outputs",
),
gr.Slider(
label="Max new tokens",
value=1024,
minimum=0,
maximum=1048,
step=64,
interactive=True,
info="The maximum numbers of new tokens",
),
gr.Slider(
label="Top-p (nucleus sampling)",
value=0.90,
minimum=0.0,
maximum=1,
step=0.05,
interactive=True,
info="Higher values sample more low-probability tokens",
),
gr.Slider(
label="Repetition penalty",
value=1.2,
minimum=1.0,
maximum=2.0,
step=0.05,
interactive=True,
info="Penalize repeated tokens",
)
]
gr.ChatInterface(
fn=generate,
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
additional_inputs=additional_inputs,
title=""""""
).launch(show_api=False)