Spaces:
Running
Running
import streamlit as st | |
import logging | |
from huggingface_hub import InferenceClient | |
from helpers.systemPrompts import base, tutor | |
import os | |
logger = logging.getLogger(__name__) | |
api_key = os.environ.get('hf_api') | |
client = InferenceClient(api_key=api_key) | |
def hf_generator(model,prompt,data,system=None): | |
if system: | |
messages = [ | |
{ | |
"role": "system", | |
"content": [ | |
{ | |
"type": "text", | |
"text": system | |
} | |
] | |
}, | |
{ | |
"role": "user", | |
"content": [ | |
{ | |
"type": "text", | |
"text": prompt | |
}, | |
{ | |
"type": "image_url", | |
"image_url": { | |
"url": data | |
} | |
} | |
] | |
} | |
] | |
else: | |
messages = [ | |
{ | |
"role": "user", | |
"content": [ | |
{ | |
"type": "text", | |
"text": prompt | |
}, | |
{ | |
"type": "image_url", | |
"image_url": { | |
"url": data | |
} | |
} | |
] | |
} | |
] | |
completion = client.chat.completions.create( | |
model=model, | |
messages=messages, | |
max_tokens=500 | |
) | |
response = completion.choices[0].message.content | |
logger.info({"role": "assistant", "content": response}) | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
return completion.choices[0].message.content | |
def basicChat(): | |
# Accept user input and then writes the response | |
if prompt := st.chat_input("How may I help you learn math today?"): | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
logger.info(st.session_state.messages[-1]) | |
# Display user message in chat message container | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
with st.chat_message(st.session_state.model): | |
logger.info(f"""Message to {st.session_state.model}: {[ | |
{"role": m["role"], "content": m["content"]} | |
for m in st.session_state.messages | |
]}""") | |
response = st.write_stream(hf_generator( | |
st.session_state.model, | |
[ | |
{"role": m["role"], "content": m["content"]} | |
for m in st.session_state.messages | |
] | |
)) | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
logger.info(st.session_state.messages[-1]) | |
def mmChat(data): | |
if prompt := st.chat_input("How may I help you learn math today?"): | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt,"images":[data]}) | |
logger.info(st.session_state.messages[-1]) | |
# Display user message in chat message container | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
with st.chat_message(st.session_state.model): | |
logger.info(f"Message to {st.session_state.model}: {st.session_state.messages[-1]}") | |
response = st.write(hf_generator( | |
st.session_state.model, | |
prompt, | |
data)) | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
logger.info(st.session_state.messages[-1]) | |
def guidedMM(sysChoice:str, data): | |
if sysChoice == "Tutor": | |
system = tutor | |
else: | |
system = base | |
if prompt := st.chat_input("How may I help you learn math today?"): | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt,"images":[data]}) | |
# Display user message in chat message container | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
with st.chat_message(st.session_state.model): | |
logger.info(f"Message to {st.session_state.model}: {st.session_state.messages[-1]}") | |
response = st.write(hf_generator( | |
st.session_state.model, | |
prompt, | |
data, | |
system | |
)) | |