Spaces:
Sleeping
Sleeping
File size: 5,026 Bytes
4b9bbd9 018df5d 35da143 6603185 48b9513 6603185 d9c32c5 018df5d 34fff7a 018df5d d9c32c5 5558c42 e09c2ec d9c32c5 018df5d aef7a3d 018df5d e8bb21f 018df5d e8bb21f 018df5d a1bbb8d 018df5d e8bb21f 018df5d b5cfda7 018df5d 1e2ed35 018df5d b5cfda7 37fb658 aef7a3d 37fb658 aef7a3d 37fb658 018df5d 37fb658 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
import gradio as gr
from openai import OpenAI
import os
css = '''
.gradio-container{max-width: 1000px !important}
h1{text-align:center}
footer {
visibility: hidden
}
'''
ACCESS_TOKEN = os.getenv("HF_TOKEN")
client = OpenAI(
base_url="https://api-inference.huggingface.co/v1/",
api_key=ACCESS_TOKEN,
)
# Mood prompts dictionary
mood_prompts = {
"Fun": "Respond in a light-hearted, playful manner.",
"Serious": "Respond in a thoughtful, serious tone.",
"Professional": "Respond in a formal, professional manner.",
"Upset": "Respond in a slightly irritated, upset tone.",
"Empathetic": "Respond in a warm and understanding tone.",
"Optimistic": "Respond in a positive, hopeful manner.",
"Sarcastic": "Respond with a hint of sarcasm.",
"Motivational": "Respond with encouragement and motivation.",
"Curious": "Respond with a sense of wonder and curiosity.",
"Humorous": "Respond with a touch of humor.",
"Cautious": "Respond with careful consideration and caution.",
"Assertive": "Respond with confidence and assertiveness.",
"Friendly": "Respond in a warm and friendly manner.",
"Romantic": "Respond with affection and romance.",
"Nostalgic": "Respond with a sense of longing for the past.",
"Grateful": "Respond with gratitude and appreciation.",
"Inspirational": "Respond with inspiration and positivity.",
"Casual": "Respond in a relaxed and informal tone.",
"Formal": "Respond with a high level of formality.",
"Pessimistic": "Respond with a focus on potential negatives.",
"Excited": "Respond with enthusiasm and excitement.",
"Melancholic": "Respond with a sense of sadness or longing.",
"Confident": "Respond with self-assurance and confidence.",
"Suspicious": "Respond with caution and doubt.",
"Reflective": "Respond with deep thought and introspection.",
"Joyful": "Respond with happiness and joy.",
"Mysterious": "Respond with an air of mystery and intrigue.",
"Aggressive": "Respond with force and intensity.",
"Calm": "Respond with a sense of peace and tranquility.",
"Gloomy": "Respond with a sense of sadness or pessimism.",
"Encouraging": "Respond with words of support and encouragement.",
"Sympathetic": "Respond with understanding and compassion.",
"Disappointed": "Respond with a tone of disappointment.",
"Proud": "Respond with a sense of pride and accomplishment.",
"Playful": "Respond in a fun and playful manner.",
"Inquisitive": "Respond with curiosity and interest.",
"Supportive": "Respond with reassurance and support.",
"Reluctant": "Respond with hesitation and reluctance.",
"Confused": "Respond with uncertainty and confusion.",
"Energetic": "Respond with high energy and enthusiasm.",
"Relaxed": "Respond with a calm and laid-back tone.",
"Grumpy": "Respond with a touch of irritation.",
"Hopeful": "Respond with a sense of hope and optimism.",
"Indifferent": "Respond with a lack of strong emotion.",
"Surprised": "Respond with shock and astonishment.",
"Tense": "Respond with a sense of urgency or anxiety.",
"Enthusiastic": "Respond with eagerness and excitement.",
"Worried": "Respond with concern and apprehension."
}
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
mood
):
# Update system message with mood prompt
mood_prompt = mood_prompts.get(mood, "")
full_system_message = f"{system_message} {mood_prompt}".strip()
messages = [{"role": "system", "content": full_system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat.completions.create(
model="meta-llama/Meta-Llama-3.1-8B-Instruct",
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
messages=messages,
):
token = message.choices[0].delta.content
response += token
yield response
# Arrange the inputs to separate the Mood dropdown into its own row
system_message = gr.Textbox(value="", label="System message")
max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
mood = gr.Dropdown(choices=list(mood_prompts.keys()), label="Mood", value="Casual")
demo = gr.ChatInterface(
respond,
additional_inputs=[
[system_message],
[max_tokens, temperature, top_p],
[mood], # Separate row for the Mood dropdown
],
css=css,
theme="allenai/gradio-theme",
)
if __name__ == "__main__":
demo.launch() |