File size: 1,840 Bytes
4b41cfa
 
 
3b08103
4b41cfa
 
3b08103
 
 
 
 
e4e5003
82afe44
 
c276ac5
f076357
9b7bf81
4b41cfa
 
 
 
 
 
 
 
 
3b08103
9b7bf81
0d5d2ec
 
4b41cfa
 
 
 
 
 
9b7bf81
bb281da
4b41cfa
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import streamlit as st
from QnA import Q_A
import re,time
from QnA import get_hugging_face_model  , summarize ,get_groq_model


def summarize_data(documents,api_key):
    if api_key.startswith('gsk'):
        llm  = get_groq_model(api_key)
    else:
        llm= get_hugging_face_model(api_key=api_key)
    summary = summarize(documents,llm)
    return summary
    

def QA_Bot(vectorstore,API_KEY,documents):
    summary_response = None
    st.title("Q&A Bot")
    # Initialize chat history
    if "messages" not in st.session_state:
        st.session_state.messages = []

    # Display chat messages from history on app rerun
    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.markdown(message["content"])
    summary_response = summarize_data(documents,API_KEY)
    print(summary_response)


    # React to user input
    if prompt := st.chat_input("What is up?"):
        # Display user message in chat message container
        st.chat_message("user").markdown(prompt)
        # Add user message to chat history
        st.session_state.messages.append({"role": "user", "content": prompt})
        
        ai_response = Q_A(vectorstore,prompt,API_KEY)
        response = f"Echo: {ai_response}"
        # Display assistant response in chat message container
        with st.chat_message("assistant"):
            message_placeholder = st.empty()
            full_response = ""
            for chunk in re.split(r'(\s+)', response):
                full_response += chunk + " "
                time.sleep(0.01)

                # Add a blinking cursor to simulate typing
                message_placeholder.markdown(full_response + "▌")
        # Add assistant response to chat history
        st.session_state.messages.append({"role": "assistant", "content": full_response})