PBG / app.py
mgupta70's picture
t7
d8be35d
raw
history blame
1.61 kB
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader
import os
from llama_index.node_parser import SimpleNodeParser
from llama_index import StorageContext, load_index_from_storage
import gradio as gr
import openai
os.environ['OPENAI_API_KEY'] = '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'
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir="index_dir")
# load index
index = load_index_from_storage(storage_context)
# strat a search engine
query_engine = index.as_query_engine()
# APP
# def get_model_reply_no_prev_context(question):
# response = query_engine.query(question)
# final_response = response.response[1:]
# return final_response
# def get_model_reply_no_prev_context(question):
# final_response = question
# return final_response
# title = "Knowledge Center at Penta Building Group"
# description = """
# The program is trained to answer questions based on the documentation of 'Lessons Learned' from previous projects!
# """
# article = "Your feedback matters!If you like it, contact me at mgupta70@asu.edu"
# gr.Interface(
# fn=get_model_reply_no_prev_context,
# inputs="textbox",
# outputs="text",
# title=title,
# description=description,
# article=article,
# examples=[["Which code is to be used while planning a pedestrian walkway?"], ["What is AHJ?"]], live=True
# ).launch()
import gradio as gr
def sketch_recognition(img):
pass# Implement your sketch recognition model here...
gr.Interface(fn=sketch_recognition, inputs="sketchpad", outputs="label").launch()