File size: 2,622 Bytes
a31ba66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import logging
import os

from llama_index.indices.document_summary import DocumentSummaryIndexEmbeddingRetriever
from llama_index.llms import OpenAI
from llama_index.query_engine import RetrieverQueryEngine

logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=os.environ.get("LOGLEVEL", "DEBUG"))
import gradio as gr
from llama_index import VectorStoreIndex, StorageContext, download_loader, load_index_from_storage, ServiceContext, \
    get_response_synthesizer

cache = {}
chatgpt = OpenAI(temperature=0, model="gpt-3.5-turbo")
service_context = ServiceContext.from_defaults(llm=chatgpt, chunk_size=1024)

def loadData():
    index_root = "./summary_index"
    directory_names = os.listdir(index_root)
    for directory in directory_names:
        if os.path.isdir(f"{index_root}/{directory}"):
            print("Loading from existing index " + directory)
            storage_context = StorageContext.from_defaults(persist_dir=f"{index_root}/{directory}")
            index = load_index_from_storage(storage_context)
            retriever = DocumentSummaryIndexEmbeddingRetriever(
                index,
                # choice_select_prompt=choice_select_prompt,
                # choice_batch_size=choice_batch_size,
                # format_node_batch_fn=format_node_batch_fn,
                # parse_choice_select_answer_fn=parse_choice_select_answer_fn,
                service_context=service_context
            )
            # configure response synthesizer
            response_synthesizer = get_response_synthesizer(service_context=service_context)

            # assemble query engine
            query_engine = RetrieverQueryEngine(retriever=retriever, response_synthesizer=response_synthesizer)
            cache[directory] = query_engine


def chatbot(indexName, input_text):
    """
    Chatbot function that takes in a prompt and returns a response
    """
    response = cache[indexName].query(input_text)
    return response.response


def main():
    loadData()
    iface = gr.Interface(fn=chatbot, inputs=[
        gr.Dropdown(cache.keys(),
                    type="value", value="sos", label="Select Channel"),
        gr.Textbox(lines=7, label="Ask any question", placeholder='What are the key topics?')], outputs="text",
                         title="NLP Demo for Slack Data")
    if 'LOGIN_PASS' in os.environ:
        iface.launch(auth=('axiamatic', os.environ['LOGIN_PASS']),
                     auth_message='For access, please check my Slack profile or contact me in Slack.',
                     share=False)
    else:
        iface.launch(share=False)

main()