File size: 1,464 Bytes
241507d
 
 
 
 
495f0ac
241507d
 
73ec0c5
82313ef
241507d
 
f43232b
241507d
f153f07
 
241507d
 
f153f07
241507d
 
 
e11d962
241507d
 
537e963
f153f07
241507d
 
 
 
 
 
 
 
 
 
f153f07
241507d
 
 
f153f07
241507d
 
 
 
f153f07
241507d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import logging
import sys
import gradio as gr
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, ServiceContext
from llama_index import OpenAIEmbedding
import os
from llama_index.memory import ChatMemoryBuffer
import time 


logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))

os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")


def load_data():
    return SimpleDirectoryReader("./subfolder_0/").load_data()

def build_index(documents):
    for document in documents:
        # Add a delay here
        time.sleep(5) 
        index = GPTVectorStoreIndex.from_documents([document])
    return index


def query_index(index, similarity_top_k=3, streaming=True):
    memory = ChatMemoryBuffer.from_defaults(token_limit=4000)
    chat_engine = index.as_chat_engine(
    chat_mode="context",
    memory=memory,
    system_prompt=(
        "generate detailed answers, but dont be lengthy"
    ),
    )
    return chat_engine  # return the query engine instance # return the query engine instance

data = load_data()
index = build_index(data)
query_engine = query_index(index)  # initialize the query engine

def get_response(text,history=None):
    # Use the initialized query engine to perform the query
    response = str(query_engine.chat(text))
    return response

t = gr.ChatInterface(get_response, analytics_enabled=True)
t.launch(debug=True, share=True)