File size: 1,242 Bytes
af5aaec
 
 
 
 
 
f153f07
 
495f0ac
82313ef
f153f07
 
 
af5aaec
c053536
0e489ca
af5aaec
 
 
f153f07
 
c053536
af5aaec
 
 
695af12
af5aaec
f153f07
af5aaec
 
695af12
 
95208e4
4be2b07
f153f07
af5aaec
f153f07
af5aaec
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
from llama_index.core import VectorStoreIndex, get_response_synthesizer
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.postprocessor import SimilarityPostprocessor
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
import gradio as gr
import logging
import sys
import os

logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))

os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
def initialize_vector_store_index():
    documents = SimpleDirectoryReader("./document/management/").load_data()
    # build index
    index = VectorStoreIndex.from_documents(documents)
    return index


index = initialize_vector_store_index()
# configure retriever
retriever = VectorIndexRetriever(
    index=index,
    similarity_top_k=1,
)

def get_response(text, history):
    # For simplicity, we are only using the 'text' argument
    response = retriever.retrieve(text)
    r = str(response[0].metadata)+"\n"+response[0].text
    
    return r


t = gr.ChatInterface(get_response, analytics_enabled=True)
t.launch(debug=True,share=True)