meta2023 / app.py
philmui's picture
updated
fc3423c
raw
history blame contribute delete
No virus
2.26 kB
import streamlit as st
import chainlit as cl
import logging
import sys
_logger = logging.getLogger("lang-chat")
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.vectorstores import VectorStore
from langchain_core.runnables.base import RunnableSequence
from globals import (
DEFAULT_QUESTION1,
DEFAULT_QUESTION2,
gpt35_model,
gpt4_model
)
from semantic import (
SemanticRAGChainFactory
)
_semantic_rag_chain: RunnableSequence = None
@cl.on_message
async def main(message: st.Message):
content = "> "
try:
response = _semantic_rag_chain.invoke({"question": message.content})
content += response["response"].content
except Exception as e:
print(f"chat error: {e}")
# Send a response back to the user
await cl.Message(
content=f"{content}",
).send()
@cl.on_chat_start
async def start():
print("==> starting ...")
global _semantic_rag_chain
# _semantic_rag_chain = SemanticRAGChainFactory.get_semantic_rag_chain()
# await st.Avatar(
# name="Chatbot",
# url="https://cdn-icons-png.flaticon.com/512/8649/8649595.png"
# ).send()
# await st.Avatar(
# name="User",
# url="https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png"
# ).send()
print("\tsending message back: ready!!!")
content = ""
# if _semantic_rag_chain is not None:
# try:
# response1 = _semantic_rag_chain.invoke({"question": DEFAULT_QUESTION1})
# response2 = _semantic_rag_chain.invoke({"question": DEFAULT_QUESTION2})
# content = (
# f"**Question**: {DEFAULT_QUESTION1}\n\n"
# f"{response1['response'].content}\n\n"
# f"**Question**: {DEFAULT_QUESTION2}\n\n"
# f"{response2['response'].content}\n\n"
# )
# except Exception as e:
# _logger.error(f"init error: {e}")
cl.user_session.set("message_history", [{"role": "system", "content": "You are a helpful assistant. "}])
await cl.Message(
content=content + "\nHow can I help you with Meta's 2023 10K?"
).send()
print(f"{20 * '*'}")