Spaces:
Running
Running
Chandran Palanisamy
commited on
Commit
•
a5db52f
1
Parent(s):
11ad892
Upload 7 files
Browse filescommit inubit files
- .gitattributes +1 -0
- app.py +86 -0
- requirements.txt +3 -0
- storage/docstore.json +0 -0
- storage/graph_store.json +1 -0
- storage/index_store.json +0 -0
- storage/vector_store.json +3 -0
.gitattributes
CHANGED
@@ -34,3 +34,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
virtimo-inubit/storage/vector_store.json filter=lfs diff=lfs merge=lfs -text
|
|
|
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
virtimo-inubit/storage/vector_store.json filter=lfs diff=lfs merge=lfs -text
|
37 |
+
storage/vector_store.json filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from llama_index import (
|
3 |
+
VectorStoreIndex,
|
4 |
+
SummaryIndex,
|
5 |
+
ServiceContext,
|
6 |
+
StorageContext,
|
7 |
+
Document,
|
8 |
+
load_index_from_storage, ChatPromptTemplate,
|
9 |
+
)
|
10 |
+
from llama_index.embeddings.openai import OpenAIEmbedding
|
11 |
+
from llama_index import SimpleDirectoryReader
|
12 |
+
from llama_index.llms import OpenAI, ChatMessage, MessageRole
|
13 |
+
import gradio as gr
|
14 |
+
from typing import List, cast, Optional
|
15 |
+
|
16 |
+
# Function to load the dataframe and chat with it
|
17 |
+
def chat_with_dataframe(question):
|
18 |
+
storage_context = StorageContext.from_defaults(
|
19 |
+
persist_dir=str("storage")
|
20 |
+
)
|
21 |
+
|
22 |
+
vector_index = cast(VectorStoreIndex, load_index_from_storage(storage_context))
|
23 |
+
vector_query_engine = vector_index.as_query_engine(
|
24 |
+
similarity_top_k=2
|
25 |
+
)
|
26 |
+
response = vector_query_engine.query(question)
|
27 |
+
return response.response
|
28 |
+
|
29 |
+
|
30 |
+
# Streamlit interface
|
31 |
+
def main():
|
32 |
+
st.sidebar.title('Virtimo - INUBIT')
|
33 |
+
st.sidebar.subheader('Ask me anything about the product!')
|
34 |
+
# File uploader in the sidebar
|
35 |
+
|
36 |
+
# st.title('Chat with Excel')
|
37 |
+
# Store LLM generated responses
|
38 |
+
if "messages" not in st.session_state.keys():
|
39 |
+
st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
|
40 |
+
|
41 |
+
# Display or clear chat messages
|
42 |
+
for message in st.session_state.messages:
|
43 |
+
with st.chat_message(message["role"]):
|
44 |
+
st.write(message["content"])
|
45 |
+
|
46 |
+
# # Initialize conversation in session state if it doesn't exist
|
47 |
+
# if 'conversation' not in st.session_state:
|
48 |
+
# st.session_state['conversation'] = []
|
49 |
+
#
|
50 |
+
# # Initialize user input in session state if it doesn't exist
|
51 |
+
# if 'user_input' not in st.session_state:
|
52 |
+
# st.session_state['user_input'] = ''
|
53 |
+
|
54 |
+
# Chat interface similar to the provided image
|
55 |
+
# if st.session_state.uploaded_file:
|
56 |
+
|
57 |
+
# User-provided prompt
|
58 |
+
if prompt := st.chat_input():
|
59 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
60 |
+
with st.chat_message("user"):
|
61 |
+
st.write(prompt)
|
62 |
+
|
63 |
+
# Generate a new response if last message is not from assistant
|
64 |
+
if st.session_state.messages[-1]["role"] != "assistant":
|
65 |
+
with st.chat_message("assistant"):
|
66 |
+
with st.spinner("Thinking..."):
|
67 |
+
response = chat_with_dataframe(prompt)
|
68 |
+
placeholder = st.empty()
|
69 |
+
# full_response = ''
|
70 |
+
if response is not None:
|
71 |
+
st.write(response)
|
72 |
+
message = {"role": "assistant", "content": response}
|
73 |
+
st.session_state.messages.append(message)
|
74 |
+
# # Check if the response is iterable
|
75 |
+
# if isinstance(response, (list, tuple)):
|
76 |
+
# full_response = ''.join(response)
|
77 |
+
# elif isinstance(response, str):
|
78 |
+
# full_response = response
|
79 |
+
# else:
|
80 |
+
# full_response = str(response) # Convert non-string,
|
81 |
+
# # placeholder.markdown(full_response)
|
82 |
+
|
83 |
+
|
84 |
+
# Run the main function
|
85 |
+
if __name__ == '__main__':
|
86 |
+
main()
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
streamlit==1.28.2
|
2 |
+
llama-index=0.9.9
|
3 |
+
|
storage/docstore.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
storage/graph_store.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"graph_dict": {}}
|
storage/index_store.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
storage/vector_store.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:989b7beb871da7b3843b5c774d4764ba3bd62a771528297f7c98ff5433fa95db
|
3 |
+
size 56444037
|