PierreBrunelle
commited on
Commit
•
ef89dbb
1
Parent(s):
74672af
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import pandas as pd
|
3 |
+
import pixeltable as pxt
|
4 |
+
from pixeltable.iterators import DocumentSplitter
|
5 |
+
import numpy as np
|
6 |
+
from pixeltable.functions.huggingface import sentence_transformer
|
7 |
+
from pixeltable.functions import openai
|
8 |
+
import os
|
9 |
+
|
10 |
+
# Ensure a clean slate for the demo
|
11 |
+
pxt.drop_dir('rag_demo', force=True)
|
12 |
+
pxt.create_dir('rag_demo')
|
13 |
+
|
14 |
+
# Set up embedding function
|
15 |
+
@pxt.expr_udf
|
16 |
+
def e5_embed(text: str) -> np.ndarray:
|
17 |
+
return sentence_transformer(text, model_id='intfloat/e5-large-v2')
|
18 |
+
|
19 |
+
# Create prompt function
|
20 |
+
@pxt.udf
|
21 |
+
def create_prompt(top_k_list: list[dict], question: str) -> str:
|
22 |
+
concat_top_k = '\n\n'.join(
|
23 |
+
elt['text'] for elt in reversed(top_k_list)
|
24 |
+
)
|
25 |
+
return f'''
|
26 |
+
PASSAGES:
|
27 |
+
|
28 |
+
{concat_top_k}
|
29 |
+
|
30 |
+
QUESTION:
|
31 |
+
|
32 |
+
{question}'''
|
33 |
+
|
34 |
+
def process_files(ground_truth_file, pdf_files):
|
35 |
+
# Process ground truth file
|
36 |
+
if ground_truth_file.name.endswith('.csv'):
|
37 |
+
df = pd.read_csv(ground_truth_file.name)
|
38 |
+
else:
|
39 |
+
df = pd.read_excel(ground_truth_file.name)
|
40 |
+
|
41 |
+
queries_t = pxt.create_table('rag_demo.queries', df)
|
42 |
+
|
43 |
+
# Process PDF files
|
44 |
+
documents_t = pxt.create_table(
|
45 |
+
'rag_demo.documents',
|
46 |
+
{'document': pxt.DocumentType()}
|
47 |
+
)
|
48 |
+
|
49 |
+
for pdf_file in pdf_files:
|
50 |
+
documents_t.insert({'document': pdf_file.name})
|
51 |
+
|
52 |
+
# Create chunks view
|
53 |
+
chunks_t = pxt.create_view(
|
54 |
+
'rag_demo.chunks',
|
55 |
+
documents_t,
|
56 |
+
iterator=DocumentSplitter.create(
|
57 |
+
document=documents_t.document,
|
58 |
+
separators='token_limit',
|
59 |
+
limit=300
|
60 |
+
)
|
61 |
+
)
|
62 |
+
|
63 |
+
# Add embedding index
|
64 |
+
chunks_t.add_embedding_index('text', string_embed=e5_embed)
|
65 |
+
|
66 |
+
# Create top_k query
|
67 |
+
@chunks_t.query
|
68 |
+
def top_k(query_text: str):
|
69 |
+
sim = chunks_t.text.similarity(query_text)
|
70 |
+
return (
|
71 |
+
chunks_t.order_by(sim, asc=False)
|
72 |
+
.select(chunks_t.text, sim=sim)
|
73 |
+
.limit(5)
|
74 |
+
)
|
75 |
+
|
76 |
+
# Add computed columns to queries_t
|
77 |
+
queries_t['question_context'] = chunks_t.top_k(queries_t.Question)
|
78 |
+
queries_t['prompt'] = create_prompt(
|
79 |
+
queries_t.question_context, queries_t.Question
|
80 |
+
)
|
81 |
+
|
82 |
+
# Prepare messages for OpenAI
|
83 |
+
messages = [
|
84 |
+
{
|
85 |
+
'role': 'system',
|
86 |
+
'content': 'Please read the following passages and answer the question based on their contents.'
|
87 |
+
},
|
88 |
+
{
|
89 |
+
'role': 'user',
|
90 |
+
'content': queries_t.prompt
|
91 |
+
}
|
92 |
+
]
|
93 |
+
|
94 |
+
# Add OpenAI response column
|
95 |
+
queries_t['response'] = openai.chat_completions(
|
96 |
+
model='gpt-4-0125-preview', messages=messages
|
97 |
+
)
|
98 |
+
queries_t['answer'] = queries_t.response.choices[0].message.content
|
99 |
+
|
100 |
+
return "Files processed successfully!"
|
101 |
+
|
102 |
+
def query_llm(question):
|
103 |
+
queries_t = pxt.get_table('rag_demo.queries')
|
104 |
+
chunks_t = pxt.get_table('rag_demo.chunks')
|
105 |
+
|
106 |
+
# Perform top-k lookup
|
107 |
+
context = chunks_t.top_k(question).collect()
|
108 |
+
|
109 |
+
# Create prompt
|
110 |
+
prompt = create_prompt(context, question)
|
111 |
+
|
112 |
+
# Prepare messages for OpenAI
|
113 |
+
messages = [
|
114 |
+
{
|
115 |
+
'role': 'system',
|
116 |
+
'content': 'Please read the following passages and answer the question based on their contents.'
|
117 |
+
},
|
118 |
+
{
|
119 |
+
'role': 'user',
|
120 |
+
'content': prompt
|
121 |
+
}
|
122 |
+
]
|
123 |
+
|
124 |
+
# Get LLM response
|
125 |
+
response = openai.chat_completions(model='gpt-4-0125-preview', messages=messages)
|
126 |
+
answer = response.choices[0].message.content
|
127 |
+
|
128 |
+
# Add new row to queries_t
|
129 |
+
new_row = {'Question': question, 'answer': answer}
|
130 |
+
queries_t.insert([new_row])
|
131 |
+
|
132 |
+
# Return updated dataframe
|
133 |
+
return queries_t.select(queries_t.Question, queries_t.answer).collect()
|
134 |
+
|
135 |
+
# Gradio interface
|
136 |
+
with gr.Blocks() as demo:
|
137 |
+
gr.Markdown("# RAG Demo App")
|
138 |
+
|
139 |
+
with gr.Row():
|
140 |
+
ground_truth_file = gr.File(label="Upload Ground Truth (CSV or XLSX)")
|
141 |
+
pdf_files = gr.File(label="Upload PDF Documents", file_count="multiple")
|
142 |
+
|
143 |
+
process_button = gr.Button("Process Files")
|
144 |
+
process_output = gr.Textbox(label="Processing Output")
|
145 |
+
|
146 |
+
question_input = gr.Textbox(label="Enter your question")
|
147 |
+
query_button = gr.Button("Query LLM")
|
148 |
+
|
149 |
+
output_dataframe = gr.Dataframe(label="LLM Outputs")
|
150 |
+
|
151 |
+
process_button.click(process_files, inputs=[ground_truth_file, pdf_files], outputs=process_output)
|
152 |
+
query_button.click(query_llm, inputs=question_input, outputs=output_dataframe)
|
153 |
+
|
154 |
+
if __name__ == "__main__":
|
155 |
+
demo.launch()
|