improved app.py
Browse files
app.py
CHANGED
@@ -13,6 +13,7 @@ import chardet
|
|
13 |
import gradio as gr
|
14 |
import pandas as pd
|
15 |
import json
|
|
|
16 |
|
17 |
# Enable logging for debugging
|
18 |
logging.basicConfig(level=logging.DEBUG)
|
@@ -70,13 +71,27 @@ def load_documents(file_paths):
|
|
70 |
logger.error(f"Error processing file {file_path}: {e}")
|
71 |
return docs
|
72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
# Initialize the LLM using ChatGroq with GROQ's API
|
74 |
def initialize_llm(model, temperature, max_tokens):
|
75 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
llm = ChatGroq(
|
77 |
model=model,
|
78 |
temperature=temperature,
|
79 |
-
max_tokens=max_tokens
|
80 |
api_key=api_key # Ensure the API key is passed correctly
|
81 |
)
|
82 |
logger.debug("LLM initialized successfully.")
|
@@ -114,7 +129,7 @@ def create_rag_pipeline(file_paths, model, temperature, max_tokens):
|
|
114 |
custom_prompt_template = PromptTemplate(
|
115 |
input_variables=["context", "question"],
|
116 |
template="""
|
117 |
-
You are an AI assistant with expertise in daily wellness
|
118 |
|
119 |
Context:
|
120 |
{context}
|
@@ -122,7 +137,7 @@ def create_rag_pipeline(file_paths, model, temperature, max_tokens):
|
|
122 |
Question:
|
123 |
{question}
|
124 |
|
125 |
-
Provide a detailed answer,
|
126 |
"""
|
127 |
)
|
128 |
|
@@ -138,7 +153,7 @@ def create_rag_pipeline(file_paths, model, temperature, max_tokens):
|
|
138 |
logger.error(f"Error creating RAG pipeline: {e}")
|
139 |
return None, f"Error creating RAG pipeline: {e}"
|
140 |
|
141 |
-
# Function to answer questions
|
142 |
def answer_question(file_paths, model, temperature, max_tokens, question):
|
143 |
rag_chain, message = create_rag_pipeline(file_paths, model, temperature, max_tokens)
|
144 |
if rag_chain is None:
|
@@ -146,7 +161,9 @@ def answer_question(file_paths, model, temperature, max_tokens, question):
|
|
146 |
try:
|
147 |
answer = rag_chain.run(question)
|
148 |
logger.debug("Question answered successfully.")
|
149 |
-
|
|
|
|
|
150 |
except Exception as e:
|
151 |
logger.error(f"Error during RAG pipeline execution: {e}")
|
152 |
return f"Error during RAG pipeline execution: {e}"
|
@@ -162,7 +179,7 @@ interface = gr.Interface(
|
|
162 |
inputs=[
|
163 |
gr.Textbox(label="Model Name", value="llama3-8b-8192"),
|
164 |
gr.Slider(label="Temperature", minimum=0, maximum=1, step=0.01, value=0.7),
|
165 |
-
gr.Slider(label="Max Tokens", minimum=
|
166 |
gr.Textbox(label="Question")
|
167 |
],
|
168 |
outputs="text",
|
|
|
13 |
import gradio as gr
|
14 |
import pandas as pd
|
15 |
import json
|
16 |
+
import re
|
17 |
|
18 |
# Enable logging for debugging
|
19 |
logging.basicConfig(level=logging.DEBUG)
|
|
|
71 |
logger.error(f"Error processing file {file_path}: {e}")
|
72 |
return docs
|
73 |
|
74 |
+
# Function to ensure the response ends with a complete sentence
|
75 |
+
def ensure_complete_sentences(text):
|
76 |
+
# Use regex to find all complete sentences
|
77 |
+
sentences = re.findall(r'[^.!?]*[.!?]', text)
|
78 |
+
if sentences:
|
79 |
+
return sentences[-1].strip()
|
80 |
+
return text # Return as is if no complete sentence is found
|
81 |
+
|
82 |
# Initialize the LLM using ChatGroq with GROQ's API
|
83 |
def initialize_llm(model, temperature, max_tokens):
|
84 |
try:
|
85 |
+
# Allocate some tokens for the prompt (e.g., 50 tokens)
|
86 |
+
prompt_tokens = 50
|
87 |
+
response_max_tokens = max_tokens - prompt_tokens
|
88 |
+
if response_max_tokens <= 0:
|
89 |
+
raise ValueError("max_tokens is too small to allocate for the response.")
|
90 |
+
|
91 |
llm = ChatGroq(
|
92 |
model=model,
|
93 |
temperature=temperature,
|
94 |
+
max_tokens=response_max_tokens, # Adjusted max_tokens
|
95 |
api_key=api_key # Ensure the API key is passed correctly
|
96 |
)
|
97 |
logger.debug("LLM initialized successfully.")
|
|
|
129 |
custom_prompt_template = PromptTemplate(
|
130 |
input_variables=["context", "question"],
|
131 |
template="""
|
132 |
+
You are an AI assistant with expertise in daily wellness. Your aim is to provide detailed yet concise solutions regarding daily wellness topics.
|
133 |
|
134 |
Context:
|
135 |
{context}
|
|
|
137 |
Question:
|
138 |
{question}
|
139 |
|
140 |
+
Provide a detailed but concise answer, ensuring that it is complete and does not end abruptly. Include relevant examples and a suggested schedule.
|
141 |
"""
|
142 |
)
|
143 |
|
|
|
153 |
logger.error(f"Error creating RAG pipeline: {e}")
|
154 |
return None, f"Error creating RAG pipeline: {e}"
|
155 |
|
156 |
+
# Function to answer questions with post-processing
|
157 |
def answer_question(file_paths, model, temperature, max_tokens, question):
|
158 |
rag_chain, message = create_rag_pipeline(file_paths, model, temperature, max_tokens)
|
159 |
if rag_chain is None:
|
|
|
161 |
try:
|
162 |
answer = rag_chain.run(question)
|
163 |
logger.debug("Question answered successfully.")
|
164 |
+
# Post-process to ensure the answer ends with a complete sentence
|
165 |
+
complete_answer = ensure_complete_sentences(answer)
|
166 |
+
return complete_answer
|
167 |
except Exception as e:
|
168 |
logger.error(f"Error during RAG pipeline execution: {e}")
|
169 |
return f"Error during RAG pipeline execution: {e}"
|
|
|
179 |
inputs=[
|
180 |
gr.Textbox(label="Model Name", value="llama3-8b-8192"),
|
181 |
gr.Slider(label="Temperature", minimum=0, maximum=1, step=0.01, value=0.7),
|
182 |
+
gr.Slider(label="Max Tokens", minimum=100, maximum=1024, step=1, value=500),
|
183 |
gr.Textbox(label="Question")
|
184 |
],
|
185 |
outputs="text",
|