File size: 1,501 Bytes
60036d0
392b211
 
 
60036d0
8300b44
74b36f6
5fee081
ca3e513
ea88958
efa2752
74b36f6
1644706
 
74b36f6
d8be35d
 
74b36f6
 
 
 
 
8524b81
f6fb893
 
8524b81
74b36f6
f6fb893
 
 
 
 
8524b81
6f29fe2
67588eb
8524b81
74b36f6
0b013eb
 
 
 
 
 
 
22b0487
88af87c
74b36f6
64fe944
74b36f6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader
import os
from llama_index.node_parser import SimpleNodeParser
from llama_index import StorageContext, load_index_from_storage
import gradio as gr
import openai

os.environ['OPENAI_API_KEY'] = os.environ["my_key"]

# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir="index_dir_full")

# load index
index = load_index_from_storage(storage_context)

# strat a search engine
query_engine = index.as_query_engine()



# APP

def get_model_reply_no_prev_context(question):
    response = query_engine.query(question)
    final_response = response.response[1:]
    return final_response


# def get_model_reply_no_prev_context(question):
#     final_response = question
#     return final_response

title = "Knowledge Center at Penta Building Group"
description = """The program is trained to answer questions based on the documentation of 'Lessons Learned' from previous projects!
 <img src="./penta.png" >   """
article = "Your feedback matters!If you like it, contact us at mgupta70@asu.edu"

gr.Interface(
    fn=get_model_reply_no_prev_context,
    inputs="textbox",
    outputs="text",
    title=title,
    description=description,
    article=article,
    examples=[["Which code is to be used while planning a pedestrian walkway?"], ["How to determine the exact location of existing underground lines?"], ['What one should do to avoid struck-by hazard incidents?']]
).launch()

import gradio as gr