Spaces:
Running
Running
import gradio as gr | |
from huggingface_hub import InferenceClient | |
""" | |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference | |
""" | |
client = InferenceClient("google/gemma-3-27b-it") | |
global_prompt = "Please imagine that you are %(philosopher) and that you wish to converse in an entertaining and civilised way with a young student." | |
def respond( | |
message, | |
history: list[tuple[str, str]], | |
philosopher, | |
): | |
max_tokens = 512 | |
temperature = 0.7 | |
top_p = 0.95 | |
system_message = global_prompt%{'philosopher':"the philosopher " + philosopher if philosopher!="Jesus" else philosopher} | |
messages = [{"role": "system", "content": system_message}] | |
for val in history: | |
if val[0]: | |
messages.append({"role": "user", "content": val[0]}) | |
if val[1]: | |
messages.append({"role": "assistant", "content": val[1]}) | |
messages.append({"role": "user", "content": message}) | |
response = "" | |
for message in client.chat_completion( | |
messages, | |
max_tokens=max_tokens, | |
stream=True, | |
temperature=temperature, | |
top_p=top_p, | |
): | |
token = message.choices[0].delta.content | |
response += token | |
yield response | |
def reset(): | |
return [], [] | |
""" | |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface | |
""" | |
with gr.Blocks() as demo: | |
philosophers=gr.Dropdown(["Socrates", | |
"Plato", | |
"Aristotle", | |
"Jesus", | |
"Thomas Aquinas", | |
"Baruch Spinoza", | |
"John Stuart Mill", | |
"John Locke", | |
"Immanuel Kant", | |
"Jean-Jacques Rousseau", | |
"Thomas Paine", | |
"David Hume", | |
"Thomas Hobbes", | |
"Jeremy Bentham", | |
"Adam Smith", | |
"Edmund Burke", | |
"Georg Hegel", | |
"Arthur Schopenhauer", | |
"Karl Marx"], label="Philosophers", info="Please choose one", value="Socrates") | |
bot = gr.Chatbot(render=False) | |
chat = gr.ChatInterface( | |
respond, | |
chatbot = bot, | |
additional_inputs=[ | |
philosophers, | |
] | |
# gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
# gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
# gr.Slider( | |
# minimum=0.1, | |
# maximum=1.0, | |
# value=0.95, | |
# step=0.05, | |
# label="Top-p (nucleus sampling)", | |
# ), | |
# ], | |
) | |
philosophers.input(reset, None, [bot, chat.chatbot_state], queue=False) | |
if __name__ == "__main__": | |
demo.launch() | |