|
import chainlit as cl |
|
import logging |
|
import sys |
|
|
|
logging.basicConfig(stream=sys.stdout, level=logging.INFO) |
|
_logger = logging.getLogger("lang-chat") |
|
_logger.addHandler(logging.StreamHandler(stream=sys.stdout)) |
|
|
|
from langchain_core.prompts import ChatPromptTemplate |
|
from langchain_core.vectorstores import VectorStore |
|
from langchain_core.runnables.base import RunnableSequence |
|
|
|
from globals import ( |
|
DEFAULT_QUESTION1, |
|
DEFAULT_QUESTION2, |
|
gpt35_model, |
|
gpt4_model |
|
) |
|
|
|
from semantic import ( |
|
SemanticStoreFactory, |
|
SemanticRAGChainFactory |
|
) |
|
|
|
_semantic_rag_chain: RunnableSequence = None |
|
|
|
@cl.on_message |
|
async def main(message: cl.Message): |
|
|
|
content = "> " |
|
try: |
|
response = _semantic_rag_chain.invoke({"question": message.content}) |
|
content += response["response"].content |
|
except Exception as e: |
|
_logger.error(f"chat error: {e}") |
|
|
|
|
|
await cl.Message( |
|
content=f"{content}", |
|
).send() |
|
|
|
@cl.on_chat_start |
|
async def start(): |
|
|
|
_logger.info("==> starting ...") |
|
global _semantic_rag_chain |
|
_semantic_rag_chain = SemanticRAGChainFactory.get_semantic_rag_chain() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_logger.info("\tsending message back: ready!!!") |
|
|
|
content = "" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cl.user_session.set("message_history", [{"role": "system", "content": "You are a helpful assistant. "}]) |
|
await cl.Message( |
|
content=content + "\nHow can I help you with Meta's 2023 10K?" |
|
).send() |
|
_logger.info(f"{20 * '*'}") |
|
|