########################### # UI for Meeting RAG Q&A. # ########################### ##################### Imports ##################### import gradio as gr from utilities.setup import get_files from connections.pinecone import PineconeConnector from connections.model import InferencePipeline from services.embed_service.embed import EmbeddingService from services.qa_service.qna import QAService #from server import QAService import spaces #################### Functions #################### @spaces.GPU def process_transcripts(files, context): print(files) with EmbeddingService(conf, pinecone=pinecones) as e: f = e.run(files) # some way to wait or a progress bar? return "Completed Loading Data" @spaces.GPU def retrieve_answer(question, goals): with QAService(conf, pinecone=pinecones, model_pipeline=pipelines, question=question, goals=goals) as q: f, c = q.run() return f, c import uuid import threading def drop_namespace(): print("Function executed after delay!") def generate_key(): unique_key = str(uuid.uuid1()) unique_key = 'User_' + unique_key timer = threading.Timer(10, drop_namespace) timer.start() # generate new namespace in pinecone return unique_key+': Started 1 hour session.' ##################### Process ##################### def main(conf): with gr.Blocks() as demo: # Main page with gr.TabItem(conf["layout"]["page_names"][0]): gr.Markdown(get_files.load_markdown_file(conf["layout"]["about"])) # User config page with gr.TabItem(conf["layout"]["page_names"][1]): gr.Markdown("# Your user Configurations") gr.Markdown("**2 Options:**") gr.Markdown("""1. Generate a unique key to upload your personal transcripts. Your documents will be queryable for 1 hour after generation.""") gr.Markdown("""2. Or, go straight to the next tab to just ask your question to the meetings that are already included!""") create_unique_key = gr.Button("Generate unique key") output_unique_key = gr.Textbox(label="Your session key & time.") create_unique_key.click(fn=generate_key, #inputs=create_unique_key, outputs=output_unique_key) gr.Markdown("### Upload Transcript and Necessary Context") load_file = gr.UploadButton(label="Upload Transcript (.vtt)", file_types=[".vtt"], file_count='multiple') goals = gr.Textbox(label="Analysis Goals", value=conf["defaults"]["goals"]) # not incorporated yet. Will be with Q&A. repository = gr.Textbox(label="Progress", value="Waiting for load...", visible=True) load_file.upload(process_transcripts, [load_file, goals], repository) # Meeting Question & Answer Page with gr.TabItem(conf["layout"]["page_names"][2]): question = gr.Textbox(label="Ask a Question", value=conf["defaults"]["question"]) ask_button = gr.Button("Ask!") model_output = gr.Markdown("### Answer") context_output = gr.components.Textbox(label="Retrieved Context") ask_button.click(fn=retrieve_answer, inputs=[question, goals], outputs=[model_output,context_output]) demo.launch() ##################### Execute ##################### if __name__ == "__main__": # Get config conf = get_files.json_cfg() # Get keys keys = get_files.get_keys() # initialize pinecone connector pc_connector = PineconeConnector( api_key=keys["pinecone"], index_name=conf["embeddings"]["index_name"], embedding=conf["embeddings"]["embedding"], ) pinecones = pc_connector.run() # initialize model connector pipelines = InferencePipeline(conf, api_key=keys["huggingface"] ) # run main main(conf)