File size: 2,261 Bytes
fc3423c
5449492
 
 
 
 
 
 
 
dce8d18
5449492
 
 
 
 
 
 
 
 
 
 
 
dce8d18
5449492
 
fc3423c
5449492
 
 
 
 
 
efc4be1
5449492
 
 
 
 
 
 
 
 
efc4be1
dce8d18
2c1f0eb
dce8d18
fc3423c
c0c89d7
 
 
fc3423c
c0c89d7
 
 
 
efc4be1
dce8d18
5449492
f8b0dc9
 
 
 
5449492
f8b0dc9
 
 
 
 
 
 
 
5449492
f8b0dc9
5449492
 
 
efc4be1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import streamlit as st
import chainlit as cl
import logging
import sys

_logger = logging.getLogger("lang-chat")

from langchain_core.prompts import ChatPromptTemplate
from langchain_core.vectorstores import VectorStore
from langchain_core.runnables.base import RunnableSequence

from globals import (
    DEFAULT_QUESTION1,
    DEFAULT_QUESTION2,
    gpt35_model,
    gpt4_model
)

from semantic import (
    SemanticRAGChainFactory
)

_semantic_rag_chain: RunnableSequence = None

@cl.on_message
async def main(message: st.Message):
    
    content = "> "
    try:
        response = _semantic_rag_chain.invoke({"question": message.content})
        content += response["response"].content
    except Exception as e:
        print(f"chat error: {e}")

    # Send a response back to the user
    await cl.Message(
        content=f"{content}",
    ).send()

@cl.on_chat_start
async def start():

    print("==> starting ...")
    global _semantic_rag_chain
    # _semantic_rag_chain = SemanticRAGChainFactory.get_semantic_rag_chain()

    # await st.Avatar(
    #     name="Chatbot",
    #     url="https://cdn-icons-png.flaticon.com/512/8649/8649595.png"
    # ).send()
    # await st.Avatar(
    #     name="User",
    #     url="https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png"
    # ).send()

    print("\tsending message back: ready!!!")

    content = ""
    # if _semantic_rag_chain is not None:
    #     try:
    #         response1 = _semantic_rag_chain.invoke({"question": DEFAULT_QUESTION1})    
    #         response2 = _semantic_rag_chain.invoke({"question": DEFAULT_QUESTION2})    

    #         content = (
    #             f"**Question**: {DEFAULT_QUESTION1}\n\n"
    #             f"{response1['response'].content}\n\n"
    #             f"**Question**: {DEFAULT_QUESTION2}\n\n"
    #             f"{response2['response'].content}\n\n"
    #         )
    #     except Exception as e:
    #         _logger.error(f"init error: {e}")
    
    cl.user_session.set("message_history", [{"role": "system", "content": "You are a helpful assistant. "}])
    await cl.Message(
        content=content + "\nHow can I help you with Meta's 2023 10K?"
    ).send()
    print(f"{20 * '*'}")