File size: 2,958 Bytes
d60dac5
 
2006c2b
 
 
 
d60dac5
 
 
 
 
2006c2b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d60dac5
2006c2b
 
 
 
 
 
 
 
 
 
d60dac5
2006c2b
 
d60dac5
2006c2b
 
 
 
d60dac5
 
 
 
 
 
 
2006c2b
d60dac5
 
2006c2b
 
 
 
d60dac5
2006c2b
 
 
 
 
 
 
 
d60dac5
2006c2b
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import streamlit as st
from utils import generate_text_embeddings
from mistral7b import mistral

import time



if "messages" not in st.session_state:
    st.session_state.messages = []

if "tokens_used"  not in st.session_state :
    st.session_state.tokens_used = 0

if "inference_time" not in st.session_state :
    st.session_state.inference_time = [0.00]

if "model_settings" not in st.session_state :
    st.session_state.model_settings = {
        "temp" : 0.9,
        "max_tokens" : 512,
    }

if "history" not in st.session_state :
    st.session_state.history = []

if "top_k" not in st.session_state :
    st.session_state.top_k = 5

with st.sidebar:
    st.markdown("# Model Analytics")
    st.write("Tokens used :", st.session_state['tokens_used'])

    st.write("Average Inference Time: ", round(sum(st.session_state["inference_time"]) / len(st.session_state["inference_time"]), 3)) 
    st.write("Cost Incured :",round( 0.033 * st.session_state['tokens_used']/ 1000, 3), "INR")

    st.markdown("---")
    st.markdown("# Retrieval Settings")
    st.slider(label="Documents to retrieve", min_value=1, max_value=10, value=3)
    st.markdown("---")
    st.markdown("# Model Settings")
    selected_model = st.sidebar.radio('Select one:', ["Mistral 7B", "GPT 3.5 Turbo", "GPT 4",  "Llama 7B"])
    selected_temperature = st.slider(label="Temperature", min_value=0.0, max_value=1.0, step=0.1, value=0.5)
    st.write(" ")
    st.info("**2023 ©️ Pragnesh Barik**")



st.image("ikigai.svg")
st.title("Ikigai Chat")

with st.expander("What is Ikigai Chat ?"):
    st.info("""Ikigai Chat is a vector database powered chat agent, it works on the principle of 
                of Retrieval Augmented Generation (RAG), Its primary function revolves around maintaining an extensive repository of Ikigai Docs and providing users with answers that align with their queries. 
                This approach ensures a more refined and tailored response to user inquiries.""")


for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])


if prompt := st.chat_input("Chat with Ikigai Docs?"):
    st.chat_message("user").markdown(prompt)
    st.session_state.messages.append({"role": "user", "content": prompt})
    
    tick = time.time()
    response = mistral(prompt, st.session_state.history, temperature=st.session_state.model_settings["temp"] , max_new_tokens=st.session_state.model_settings["max_tokens"])
    tock = time.time()

    
    st.session_state.inference_time.append(tock - tick)
    response = response.replace("</s>", "")
    len_response = len(response.split())

    st.session_state["tokens_used"] =  len_response + st.session_state["tokens_used"] 
    
    with st.chat_message("assistant"): 
        st.markdown(response)
    st.session_state.history.append([prompt, response])
    st.session_state.messages.append({"role": "assistant", "content": response})