File size: 4,489 Bytes
57079d8 d01dce1 fe7e307 d01dce1 3826b22 11df492 57079d8 a459a34 3826b22 71b4a87 ba81c2d d01dce1 ba81c2d d01dce1 57079d8 d01dce1 57079d8 a6f3b3c 97b6d79 d01dce1 a4d1ee8 d01dce1 57079d8 a6cc993 57079d8 d01dce1 57079d8 d01dce1 57079d8 4bcea7d d01dce1 57079d8 d01dce1 57079d8 8930d4f eeb1057 45f0fbc 8930d4f 57079d8 93ff6cf 57079d8 93ff6cf 4bcea7d 97b6d79 d01dce1 81bd213 dcf16ce 81bd213 97b6d79 2fca03a 97b6d79 f947724 d01dce1 97b6d79 57079d8 4bcea7d 89551c9 4918f45 57079d8 4ae3ade 2199974 97b6d79 8e59f13 97b6d79 8e59f13 93ff6cf 4ae3ade 57079d8 2199974 57079d8 67135e9 5413275 93ff6cf 6b7ab71 67135e9 57079d8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
## Setup
# Import the necessary Libraries
import json
import gradio as gr
import uuid
import os
import pandas as pd
from openai import OpenAI
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings
)
from langchain_community.vectorstores import Chroma
from huggingface_hub import CommitScheduler
from dotenv import load_dotenv
from pathlib import Path
# Create Client
load_dotenv()
os.environ["anyscale_api_key"]=os.getenv("anyscale_api_key")
client = OpenAI(
base_url="https://api.endpoints.anyscale.com/v1",
api_key=os.environ['anyscale_api_key']
)
# Define the embedding model and the vectorstore
embedding_model = SentenceTransformerEmbeddings(model_name='thenlper/gte-large')
# Load the persisted vectorDB
collection_name = 'Dataset-10k'
dataset_db = Chroma(
collection_name=collection_name,
persist_directory='./dataset_db',
embedding_function=embedding_model
)
# Prepare the logging functionality
log_file = Path("logs/") / f"data_{uuid.uuid4()}.json"
log_folder = log_file.parent
scheduler = CommitScheduler(
repo_id="RAG-10K-log",
repo_type="dataset",
folder_path=log_folder,
path_in_repo="data",
every=2
)
# Define the Q&A system message
qna_system_message = """
You are an assistant to a financial services firm who answers user queries on annual reports.
User input will have the context required by you to answer user questions.
This context will begin with the token: ###Context.
The context contains references to specific portions of a document relevant to the user query.
User questions will begin with the token: ###Question.
Please answer only using the context provided in the input. Do not mention anything about the context in your final answer.
If the answer is not found in the context, respond "I don't know".
"""
# Define the user message template
qna_user_message_template = """
###Context
Here are some documents that are relevant to the question mentioned below.
{context}
###Question
{question}
"""
# Define the predict function that runs when 'Submit' is clicked or when a API request is made
def predict(user_input,company):
sample = {
'user_input':user_input,
'company':company_input
}
filter = "dataset/"+company+"-10-k-2023.pdf"
# Create context_for_query
relevant_document_chunks = reportsdb.similarity_search(user_question, k=5, filter = {"source":filter})
context_list = [d.page_content for d in relevant_document_chunks]
context_for_query = ". ".join(context_list)
prompt = [
{'role':'system', 'content': qna_system_message},
{'role': 'user', 'content': qna_user_message_template.format(
context=context_for_query,
question=user_question
)
}
]
# Create messages
try:
model_name = "mistralai/Mixtral-8x7B-Instruct-v0.1"
response = client.chat.completions.create(
model=model_name,
messages=prompt,
temperature=0
)
prediction = response.choices[0].message.content.strip()
except Exception as e:
prediction = f'Sorry, I encountered the following error: \n {e}'
# Get response from the LLM
# While the prediction is made, log both the inputs and outputs to a local log file
# While writing to the log file, ensure that the commit scheduler is locked to avoid parallel
# access
with scheduler.lock:
with log_file.open("a") as f:
f.write(json.dumps(
{
'user_input': user_input,
'retrieved_context': context_for_query,
'model_response': prediction
}
))
f.write("\n")
return prediction [0]
# Set-up the Gradio UI
user_input = gr.Textbox (label = 'Query')
company_input = gr.Radio(
['aws','google','IBM','Meta','msft'],
label = 'company'
)
model_output = gr.Text (label = 'Response')
# Add text box and radio button to the interface
# The radio button is used to select the company 10k report in which the context needs to be retrieved.
# Create the interface
# For the inputs parameter of Interface provide [textbox,company]
demo = gr.Interface(
fn=predict_output,
inputs=[user_input,company],
outputs=prediction,
title="RAG on 10k-reports",
description="This API allows you to query on annaul reports",
concurrency_limit=16
)
demo.queue()
demo.launch()
|