File size: 6,500 Bytes
d65d899
 
 
 
 
 
 
 
 
 
fcd02ae
d65d899
 
 
 
 
 
 
 
 
 
 
 
054e39d
d65d899
fcd02ae
d65d899
 
 
 
7b6ece8
c77b024
d65d899
fc6ebce
d65d899
 
 
 
 
 
 
 
 
 
6c440de
d65d899
 
 
eb206d3
 
 
 
 
 
 
d65d899
 
 
 
 
 
 
 
 
 
 
 
46ef6f4
 
 
d65d899
 
 
 
 
 
 
46ef6f4
d65d899
 
 
 
 
 
 
 
 
46ef6f4
eb206d3
46ef6f4
d65d899
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
971d6f1
 
a0e073d
971d6f1
d65d899
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43ccbba
971d6f1
6c440de
d65d899
d953a8e
d65d899
 
 
 
 
 
 
 
eb206d3
 
 
 
 
 
 
 
 
 
d65d899
 
 
 
 
 
 
 
 
 
6c440de
 
d65d899
 
73847fd
d65d899
 
 
 
 
 
 
73847fd
d65d899
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187


## Setup
# Import the necessary Libraries
import gradio as gr
import pandas as pd
import os
import json
import uuid
import tiktoken
from openai import OpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_core.documents import Document
from langchain_community.document_loaders import PyPDFDirectoryLoader
from langchain_community.embeddings.sentence_transformer import (
    SentenceTransformerEmbeddings
)
from langchain_community.vectorstores import Chroma
from langchain_community.chat_models import ChatOpenAI
from huggingface_hub import CommitScheduler
from pathlib import Path

# Create Client
anyscale_api_key = os.getenv('anyscale_apiKey')

client = OpenAI(base_url="https://api.endpoints.anyscale.com/v1",
                api_key=anyscale_api_key
)

# Define the embedding model and the vectorstore
model_name = "mlabonne/NeuralHermes-2.5-Mistral-7B"
embedding_model = SentenceTransformerEmbeddings(model_name="thenlper/gte-large")

persisted_vectordb_location = './finsightsdb'
collection_name = 'finsights_grey-10k'


# Load the persisted vectorDB
vectorstore_persisted = Chroma(
    collection_name=collection_name,
    persist_directory=persisted_vectordb_location,
    embedding_function=embedding_model
)

# Prepare the logging functionality
log_file = Path("logs/") / f"data_{uuid.uuid4()}.json"
log_folder = log_file.parent

scheduler = CommitScheduler(
    repo_id="MJackman-Project3-logs",
    repo_type="dataset",
    folder_path=log_folder,
    path_in_repo="data",
    every=2
)

# Define the Q&A system message
qna_system_message = """
You are an assistant to a financial services firm. Your task is to determine the most effective platform to support the generation by the firm of advanced analytics and insights for investment management and financial planning.

User input will include the necessary context for you to answer their questions. This context will begin with the token: ###Context.
The context contains references to specific portions of documents relevant to the user's query, along with source links.
The source for a context will begin with the token ###Source

When crafting your response:
1. Select only context relevant to answer the question.
2. Include the source links in your response.
3. Include the page number of the source links where the answer was found in your response.
4. User questions will begin with the token: ###Question.
5. If the question is irrelevant to the firm's business respond with - "I am an AI assistant for Finsights Grey Inc. I can only help you with questions related to financial analytics."

Please adhere to the following guidelines:
- Your response should only be about the question asked and nothing else.
- Answer only using the context provided.
- Do not mention anything about the context in your final answer.
- If the answer is not found in the context, it is very very important for you to respond with "I don't know. Please check the docs @ 'https://docs.finsights.io/'"
- Always quote the source when you use the context. Cite the relevant source at the end of your response under the section - Source:
- Always provide the relevant page number(s) where the answer was found in the cited source. Cite the relevant page number at the end of your response under the section - Page Number:
- Do not make up sources. Use the links provided in the sources section of the context and nothing else. You are prohibited from providing other links/sources.

Here is an example of how to structure your response:

Answer:
[Answer]

Source:
[Source]

Page Number(s):
[Page Number]
"""


# Define the user message template
qna_user_message_template = """
###Context
Here are some documents and their source links that are relevant to the question mentioned below.
{context}

###Question
{question}
"""

# Define the predict function that runs when 'Submit' is clicked or when a API request is made
def predict(user_input,company):

    company_filter =  "/content/drive/MyDrive/Dataset-10k/" + str(company) + "-10-k-2023.pdf"   
      
    relevant_document_chunks = vectorstore_persisted.similarity_search(user_input, k=5, filter={"source":company_filter})
        
    # Create context_for_query
    context_list = [d.page_content + "\n Page number: " + str(d.metadata['page']) + "\n ###Source: " + d.metadata['source'] + "\n\n " for d in relevant_document_chunks]
    context_for_query = ". ".join(context_list)


    # Create messages
    prompt = [
    {'role':'system', 'content': qna_system_message},
    {'role': 'user', 'content': qna_user_message_template.format(
         context=context_for_query,
         question=user_input
        )
    }
]

    # Get response from the LLM
    # Handle errors using try-except
    # print the content of the response
    try:
      response = client.chat.completions.create(
        model=model_name,
        messages=prompt,
        temperature=0
     )
         
      prediction = response.choices[0].message.content.strip() 
        
    except Exception as e:
      prediction = f'Sorry, I encountered the following error: \n {e}'

    print(prediction)

  
    # While the prediction is made, log both the inputs and outputs to a local log file
    # While writing to the log file, ensure that the commit scheduler is locked to avoid parallel
    # access

    with scheduler.lock:
        with log_file.open("a") as f:
            f.write(json.dumps(
                {
                    'user_input': user_input,
                    'retrieved_context': context_for_query,
                    'model_response': prediction
                }
            ))
            f.write("\n")

    return prediction

# Set-up the Gradio UI
# Add text box and radio button to the interface
# The radio button is used to select the company 10k report in which the context needs to be retrieved.
# The text box is used to enter the question.
# The submit button is used to run the predict function


textbox = gr.Textbox(label="Enter your question:")
company = gr.Radio(choices=['aws', 'google', 'Meta', 'msft', 'IBM'], label="Select a company:")  


model_output = gr.Label(label='Answer to your qestion')

# Create the interface
# For the inputs parameter of Interface provide [textbox,company]
# For the outputs parameter of Interface provide [predict]
demo = gr.Interface(
    fn=predict,
    inputs=[textbox,company],
    outputs=model_output,
    title="AI-Powered Question Answering")

# Run the interface

demo.queue()
demo.launch()