|
|
|
import os |
|
import uuid |
|
import json |
|
|
|
import gradio as gr |
|
|
|
from openai import OpenAI |
|
|
|
from langchain_community.embeddings.sentence_transformer import SentenceTransformerEmbeddings |
|
from langchain_community.vectorstores import Chroma |
|
|
|
from huggingface_hub import CommitScheduler |
|
from pathlib import Path |
|
from dotenv import load_dotenv |
|
|
|
|
|
|
|
load_dotenv() |
|
|
|
os.environ["ANYSCALE_API_KEY"]=os.getenv("ANYSCALE_API_KEY") |
|
|
|
client = OpenAI( |
|
base_url="https://api.endpoints.anyscale.com/v1", |
|
api_key=os.environ['ANYSCALE_API_KEY'] |
|
) |
|
|
|
embedding_model = SentenceTransformerEmbeddings(model_name='thenlper/gte-large') |
|
|
|
|
|
collection_name = 'report-10k-2024' |
|
|
|
vectorstore_persisted = Chroma( |
|
collection_name=collection_name, |
|
persist_directory='./dataset-10k', |
|
embedding_function=embedding_model |
|
) |
|
|
|
|
|
|
|
retriever = vectorstore_persisted.as_retriever( |
|
search_type='similarity', |
|
search_kwargs={'k': 5} |
|
) |
|
|
|
|
|
|
|
log_file = Path("logs/") / f"data_{uuid.uuid4()}.json" |
|
log_folder = log_file.parent |
|
|
|
scheduler = CommitScheduler( |
|
repo_id="RAG-investment-recommendation-log", |
|
repo_type="dataset", |
|
folder_path=log_folder, |
|
path_in_repo="data", |
|
every=2 |
|
) |
|
|
|
|
|
qna_system_message = """ |
|
You are an AI assistant to help Finsights Grey Inc., an innovative financial technology firm, develop a Retrieval-Augmented Generation (RAG) system to automate the extraction, summarization, and analysis of information from 10-K reports. Your knowledge base was last updated in August 2023. |
|
|
|
User input will have the context required by you to answer user questions. This context will begin with the token: ###Context. |
|
The context contains references to specific portions of a 10-K report relevant to the user query. |
|
|
|
User questions will begin with the token: ###Question. |
|
|
|
Please answer only using the context provided in the input. Do not mention anything about the context in your final answer. |
|
|
|
If the answer is not found in the context, respond, "I don't know". |
|
Here is an example of how to structure your response: |
|
|
|
Answer: |
|
[Answer] |
|
|
|
Source |
|
[Source] |
|
""" |
|
|
|
|
|
qna_user_message_template = """ |
|
###Context |
|
Here are some documents that are relevant to the question. |
|
{context} |
|
``` |
|
{question} |
|
``` |
|
""" |
|
|
|
|
|
def predict(user_input,company): |
|
|
|
filter = "dataset/"+company+"-10-k-2023.pdf" |
|
relevant_document_chunks = vectorstore_persisted.similarity_search(user_input, k=5, filter={"source":filter}) |
|
|
|
|
|
context_list = [d.page_content for d in relevant_document_chunks] |
|
context_for_query = ".".join(context_list) |
|
|
|
|
|
prompt = [ |
|
{'role':'system', 'content': qna_system_message}, |
|
{'role': 'user', 'content': qna_user_message_template.format( |
|
context=context_for_query, |
|
question=user_input |
|
) |
|
} |
|
] |
|
|
|
|
|
try: |
|
response = client.chat.completions.create( |
|
model='mistralai/Mixtral-8x7B-Instruct-v0.1', |
|
messages=prompt, |
|
temperature=0 |
|
) |
|
|
|
prediction = response.choices[0].message.content |
|
|
|
except Exception as e: |
|
prediction = e |
|
|
|
|
|
|
|
|
|
|
|
with scheduler.lock: |
|
with log_file.open("a") as f: |
|
f.write(json.dumps( |
|
{ |
|
'user_input': user_input, |
|
'retrieved_context': context_for_query, |
|
'model_response': prediction |
|
} |
|
)) |
|
f.write("\n") |
|
|
|
return prediction |
|
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
with gr.Row(): |
|
question = gr.Textbox(label="Enter your question") |
|
company = gr.Radio(["aws", "IBM", "google", "meta", "msft"], label="Select a company") |
|
|
|
submit = gr.Button("Submit") |
|
output = gr.Textbox(label="Output") |
|
|
|
submit.click( |
|
fn=predict, |
|
inputs=[question, company], |
|
outputs=output |
|
) |
|
|
|
demo.launch() |
|
|
|
|
|
|
|
demo.queue() |
|
demo.launch(auth=("demouser", os.getenv('PASSWD'))) |