## Setup # Import the necessary Libraries import gradio as gr import pandas as pd import os import json import uuid import tiktoken from openai import OpenAI from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_core.documents import Document from langchain_community.document_loaders import PyPDFDirectoryLoader from langchain_community.embeddings.sentence_transformer import ( SentenceTransformerEmbeddings ) from langchain_community.vectorstores import Chroma from langchain_community.chat_models import ChatOpenAI from huggingface_hub import CommitScheduler from pathlib import Path # Create Client anyscale_api_key = os.getenv('anyscale_apiKey') client = OpenAI(base_url="https://api.endpoints.anyscale.com/v1", api_key=anyscale_api_key ) # Define the embedding model and the vectorstore model_name = "mlabonne/NeuralHermes-2.5-Mistral-7B" embedding_model = SentenceTransformerEmbeddings(model_name="thenlper/gte-large") persisted_vectordb_location = './finsightsdb' collection_name = 'finsights_grey-10k' # Load the persisted vectorDB vectorstore_persisted = Chroma( collection_name=collection_name, persist_directory=persisted_vectordb_location, embedding_function=embedding_model ) # Prepare the logging functionality log_file = Path("logs/") / f"data_{uuid.uuid4()}.json" log_folder = log_file.parent scheduler = CommitScheduler( repo_id="MJackman-Project3-logs", repo_type="dataset", folder_path=log_folder, path_in_repo="data", every=2 ) # Define the Q&A system message qna_system_message = """ You are an assistant to a financial services firm. Your task is to determine the most effective platform to support the generation by the firm of advanced analytics and insights for investment management and financial planning. User input will include the necessary context for you to answer their questions. This context will begin with the token: ###Context. The context contains references to specific portions of documents relevant to the user's query, along with source links. The source for a context will begin with the token ###Source When crafting your response: 1. Select only context relevant to answer the question. 2. Include the source links in your response. 3. Include the page number of the source links where the answer was found in your response. 4. User questions will begin with the token: ###Question. 5. If the question is irrelevant to the firm's business respond with - "I am an AI assistant for Finsights Grey Inc. I can only help you with questions related to financial analytics." Please adhere to the following guidelines: - Your response should only be about the question asked and nothing else. - Answer only using the context provided. - Do not mention anything about the context in your final answer. - If the answer is not found in the context, it is very very important for you to respond with "I don't know. Please check the docs @ 'https://docs.finsights.io/'" - Always quote the source when you use the context. Cite the relevant source at the end of your response under the section - Source: - Always provide the relevant page number(s) where the answer was found in the cited source. Cite the relevant page number at the end of your response under the section - Page Number: - Do not make up sources. Use the links provided in the sources section of the context and nothing else. You are prohibited from providing other links/sources. Here is an example of how to structure your response: Answer: [Answer] Source: [Source] Page Number(s): [Page Number] """ # Define the user message template qna_user_message_template = """ ###Context Here are some documents and their source links that are relevant to the question mentioned below. {context} ###Question {question} """ # Define the predict function that runs when 'Submit' is clicked or when a API request is made def predict(user_input,company): company_filter = "/content/drive/MyDrive/Dataset-10k/" + str(company) + "-10-k-2023.pdf" relevant_document_chunks = vectorstore_persisted.similarity_search(user_input, k=5, filter={"source":company_filter}) # Create context_for_query context_list = [d.page_content + "\n Page number: " + str(d.metadata['page']) + "\n ###Source: " + d.metadata['source'] + "\n\n " for d in relevant_document_chunks] context_for_query = ". ".join(context_list) # Create messages prompt = [ {'role':'system', 'content': qna_system_message}, {'role': 'user', 'content': qna_user_message_template.format( context=context_for_query, question=user_input ) } ] # Get response from the LLM # Handle errors using try-except # print the content of the response try: response = client.chat.completions.create( model=model_name, messages=prompt, temperature=0 ) prediction = response.choices[0].message.content.strip() except Exception as e: prediction = f'Sorry, I encountered the following error: \n {e}' print(prediction) # While the prediction is made, log both the inputs and outputs to a local log file # While writing to the log file, ensure that the commit scheduler is locked to avoid parallel # access with scheduler.lock: with log_file.open("a") as f: f.write(json.dumps( { 'user_input': user_input, 'retrieved_context': context_for_query, 'model_response': prediction } )) f.write("\n") return prediction # Set-up the Gradio UI # Add text box and radio button to the interface # The radio button is used to select the company 10k report in which the context needs to be retrieved. # The text box is used to enter the question. # The submit button is used to run the predict function textbox = gr.Textbox(label="Enter your question:") company = gr.Radio(choices=['aws', 'google', 'Meta', 'msft', 'IBM'], label="Select a company:") model_output = gr.Label(label='Answer to your qestion') # Create the interface # For the inputs parameter of Interface provide [textbox,company] # For the outputs parameter of Interface provide [predict] demo = gr.Interface( fn=predict, inputs=[textbox,company], outputs=model_output, title="AI-Powered Question Answering") # Run the interface demo.queue() demo.launch()