Upload 5 files
Browse files- agent_workflow.py +317 -0
- llm_providers.py +133 -0
- requirements.txt +13 -0
- streamlit_app.py +275 -0
agent_workflow.py
ADDED
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import logging
|
3 |
+
from typing import List, Dict, Any, Optional, Tuple, Callable, Union
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
from llm_providers import LLMProvider
|
6 |
+
from langchain.schema import HumanMessage
|
7 |
+
from tantivy_search_agent import TantivySearchAgent
|
8 |
+
|
9 |
+
load_dotenv()
|
10 |
+
|
11 |
+
class SearchAgent:
|
12 |
+
def __init__(self, tantivy_agent: TantivySearchAgent, provider_name: str = "Claude"):
|
13 |
+
"""Initialize the search agent with Tantivy agent and LLM client"""
|
14 |
+
self.tantivy_agent = tantivy_agent
|
15 |
+
self.logger = logging.getLogger(__name__)
|
16 |
+
|
17 |
+
# Initialize LLM provider
|
18 |
+
self.llm_provider = LLMProvider()
|
19 |
+
self.llm = None
|
20 |
+
self.set_provider(provider_name)
|
21 |
+
|
22 |
+
self.min_confidence_threshold = 0.5
|
23 |
+
|
24 |
+
def set_provider(self, provider_name: str) -> None:
|
25 |
+
self.llm = self.llm_provider.get_provider(provider_name)
|
26 |
+
if not self.llm:
|
27 |
+
raise ValueError(f"Provider {provider_name} not available")
|
28 |
+
self.current_provider = provider_name
|
29 |
+
|
30 |
+
def get_available_providers(self) -> list[str]:
|
31 |
+
return self.llm_provider.get_available_providers()
|
32 |
+
|
33 |
+
def get_query(self, query: str, failed_queries: List[Dict[str, str]] = []) -> str:
|
34 |
+
"""Generate a Tantivy query using Claude, considering previously failed queries"""
|
35 |
+
try:
|
36 |
+
if not self.llm:
|
37 |
+
raise ValueError("LLM provider not initialized")
|
38 |
+
|
39 |
+
prompt = (
|
40 |
+
"Create a query for this search request with the following restrictions:\n"+
|
41 |
+
self.tantivy_agent.get_query_instructions()+
|
42 |
+
"\n\nAdditional instructions: \n"
|
43 |
+
"1. return only the search query without any other text\n"
|
44 |
+
"2. Use only Hebrew terms for the search query\n"
|
45 |
+
"3. the corpus to search in is an ancient Hebrew corpus - Tora and Talmud. so Try to use ancient Hebrew terms and or Talmudic expressions."
|
46 |
+
"4. prevent modern words that are not common in talmudic texts \n"
|
47 |
+
f"the search request: {query}"
|
48 |
+
)
|
49 |
+
|
50 |
+
if failed_queries:
|
51 |
+
prompt += (
|
52 |
+
f"\n\nPrevious failed queries:\n"+
|
53 |
+
"------------------------\n"+
|
54 |
+
'\n'.join(f"Query: {q['query']}, Reason: {q['reason']}" for q in failed_queries)+
|
55 |
+
"\n\n"
|
56 |
+
"Please generate an alternative query that:\n"
|
57 |
+
"1. Uses different Hebrew synonyms or related terms\n"
|
58 |
+
"2. Tries broader or more general terms\n"
|
59 |
+
"3. Adjusts proximity values or uses wildcards\n"
|
60 |
+
"4. Prevents using modern words that are not common in ancient hebrew and talmud texts\n"
|
61 |
+
)
|
62 |
+
|
63 |
+
response = self.llm.invoke([HumanMessage(content=prompt)])
|
64 |
+
tantivy_query = response.content.strip()
|
65 |
+
self.logger.info(f"Generated Tantivy query: {tantivy_query}")
|
66 |
+
return tantivy_query
|
67 |
+
|
68 |
+
except Exception as e:
|
69 |
+
self.logger.error(f"Error generating query: {e}")
|
70 |
+
# Fallback to basic quoted search
|
71 |
+
return f'"{query}"'
|
72 |
+
|
73 |
+
def _evaluate_results(self, results: List[Dict[str, Any]], query: str) -> Dict[str, Any]:
|
74 |
+
"""Evaluate search results using Claude with confidence scoring"""
|
75 |
+
if not self.llm:
|
76 |
+
raise ValueError("LLM provider not initialized")
|
77 |
+
|
78 |
+
# Prepare context from results
|
79 |
+
context = "\n".join(f"Result {i}. Source: {r.get('reference',[])}\n Text: {r.get('text', [])}"
|
80 |
+
for i, r in enumerate(results)
|
81 |
+
)
|
82 |
+
|
83 |
+
try:
|
84 |
+
message = self.llm.invoke([HumanMessage(content=f"""Evaluate the search results for answering this question:
|
85 |
+
Question: {query}
|
86 |
+
|
87 |
+
Search Results:
|
88 |
+
{context}
|
89 |
+
|
90 |
+
Provide evaluation in this format (3 lines):
|
91 |
+
Confidence score (0.0 to 1.0) indicating how well the results can answer the question. this line should include only the number return, don't include '[line 1]'
|
92 |
+
ACCEPT if score >= {self.min_confidence_threshold}, REFINE if score < {self.min_confidence_threshold}. return only the word ACCEPT or REFINE.
|
93 |
+
Detailed explanation of what information is present or missing, don't include '[line 3]'. it should be only in Hebrew
|
94 |
+
""")])
|
95 |
+
lines = message.content.strip().replace('\n\n', '\n').split('\n')
|
96 |
+
confidence = float(lines[0])
|
97 |
+
decision = lines[1].upper()
|
98 |
+
explanation = lines[2]
|
99 |
+
|
100 |
+
is_good = decision == 'ACCEPT'
|
101 |
+
|
102 |
+
self.logger.info(f"Evaluation: Confidence={confidence}, Decision={decision}")
|
103 |
+
self.logger.info(f"Explanation: {explanation}")
|
104 |
+
|
105 |
+
return {
|
106 |
+
"confidence": confidence,
|
107 |
+
"is_sufficient": is_good,
|
108 |
+
"explanation": explanation,
|
109 |
+
|
110 |
+
}
|
111 |
+
|
112 |
+
except Exception as e:
|
113 |
+
self.logger.error(f"Error evaluating results: {e}")
|
114 |
+
# Fallback to simple evaluation
|
115 |
+
return {
|
116 |
+
"confidence": 0.0,
|
117 |
+
"is_sufficient": False,
|
118 |
+
"explanation": "",
|
119 |
+
}
|
120 |
+
|
121 |
+
def _generate_answer(self, query: str, results: List[Dict[str, Any]]) -> str:
|
122 |
+
"""Generate answer using Claude with improved context utilization"""
|
123 |
+
if not self.llm:
|
124 |
+
raise ValueError("LLM provider not initialized")
|
125 |
+
|
126 |
+
if not results:
|
127 |
+
return "לא נמצאו תוצאות"
|
128 |
+
|
129 |
+
# Prepare context from results
|
130 |
+
context = "\n".join(f"Result {i+1}. Source: {r.get('reference',[])}\n Text: {r.get('text', [])}"
|
131 |
+
for i, r in enumerate(results)
|
132 |
+
)
|
133 |
+
|
134 |
+
try:
|
135 |
+
message = self.llm.invoke([HumanMessage(content=f"""Based on these search results, answer this question:
|
136 |
+
Question: {query}
|
137 |
+
|
138 |
+
Search Results:
|
139 |
+
{context}
|
140 |
+
|
141 |
+
Requirements for your answer:
|
142 |
+
1. Use only information from the search results
|
143 |
+
2. Be comprehensive but concise
|
144 |
+
3. Structure the answer clearly
|
145 |
+
4. If any aspect of the question cannot be fully answered, acknowledge this
|
146 |
+
5. cite sources for each fact or information you use
|
147 |
+
6. The answer should be only in Hebrew
|
148 |
+
""")])
|
149 |
+
return message.content.strip()
|
150 |
+
|
151 |
+
except Exception as e:
|
152 |
+
self.logger.error(f"Error generating answer: {e}")
|
153 |
+
return f"I encountered an error generating the answer: {str(e)}"
|
154 |
+
|
155 |
+
def search_and_answer(self, query: str, num_results: int = 10, max_iterations: int = 3,
|
156 |
+
on_step: Optional[Callable[[Dict[str, Any]], None]] = None) -> Dict[str, Any]:
|
157 |
+
"""Execute multi-step search process using Tantivy with streaming updates"""
|
158 |
+
steps = []
|
159 |
+
all_results = []
|
160 |
+
|
161 |
+
# Step 1: Generate Tantivy query
|
162 |
+
initial_query = self.get_query(query)
|
163 |
+
step = {
|
164 |
+
'action': 'יצירת שאילתת חיפוש',
|
165 |
+
'description': 'נוצרה שאילתת חיפוש עבור מנוע החיפוש',
|
166 |
+
'results': [{'type': 'query', 'content': initial_query}]
|
167 |
+
}
|
168 |
+
steps.append(step)
|
169 |
+
if on_step:
|
170 |
+
on_step(step)
|
171 |
+
|
172 |
+
# Step 2: Initial search with Tantivy query
|
173 |
+
results = self.tantivy_agent.search(initial_query, num_results)
|
174 |
+
|
175 |
+
step = {
|
176 |
+
'action': 'חיפוש במאגר',
|
177 |
+
'description': f'חיפוש במאגר עבור שאילתת חיפוש: {initial_query}',
|
178 |
+
'results': [{'type': 'document', 'content': {
|
179 |
+
'title': r['title'],
|
180 |
+
'reference': r['reference'],
|
181 |
+
'topics': r['topics'],
|
182 |
+
'highlights': r['highlights'],
|
183 |
+
'score': r['score']
|
184 |
+
}} for r in results]
|
185 |
+
}
|
186 |
+
steps.append(step)
|
187 |
+
if on_step:
|
188 |
+
on_step(step)
|
189 |
+
|
190 |
+
failed_queries = []
|
191 |
+
|
192 |
+
if results.__len__() == 0:
|
193 |
+
failed_queries.append({'query': initial_query, 'reason': 'no results'})
|
194 |
+
is_sufficient = False
|
195 |
+
else:
|
196 |
+
all_results.extend(results)
|
197 |
+
|
198 |
+
# Step 3: Evaluate results
|
199 |
+
evaluation = self._evaluate_results(results, query)
|
200 |
+
confidence = evaluation['confidence']
|
201 |
+
is_sufficient = evaluation['is_sufficient']
|
202 |
+
explanation = evaluation['explanation']
|
203 |
+
|
204 |
+
step = {
|
205 |
+
'action': 'דירוג תוצאות',
|
206 |
+
'description': 'דירוג תוצאות חיפוש',
|
207 |
+
'results': [{
|
208 |
+
'type': 'evaluation',
|
209 |
+
'content': {
|
210 |
+
'status': 'accepted' if is_sufficient else 'insufficient',
|
211 |
+
'confidence': confidence,
|
212 |
+
'explanation': explanation,
|
213 |
+
}
|
214 |
+
}]
|
215 |
+
}
|
216 |
+
steps.append(step)
|
217 |
+
if on_step:
|
218 |
+
on_step(step)
|
219 |
+
|
220 |
+
if not is_sufficient:
|
221 |
+
failed_queries.append({'query': initial_query, 'reason': explanation})
|
222 |
+
|
223 |
+
# Step 4: Additional searches if needed
|
224 |
+
attempt = 2
|
225 |
+
while not is_sufficient and attempt < max_iterations:
|
226 |
+
# Generate new query
|
227 |
+
new_query = self.get_query(query, failed_queries)
|
228 |
+
|
229 |
+
step = {
|
230 |
+
'action': f'יצירת שאילתה מחדש (ניסיון {attempt})',
|
231 |
+
'description': 'נוצרה שאילתת חיפוש נוספת עבור מנוע החיפוש',
|
232 |
+
'results': [
|
233 |
+
{'type': 'new_query', 'content': new_query}
|
234 |
+
]
|
235 |
+
}
|
236 |
+
steps.append(step)
|
237 |
+
if on_step:
|
238 |
+
on_step(step)
|
239 |
+
|
240 |
+
# Search with new query
|
241 |
+
results = self.tantivy_agent.search(new_query, num_results)
|
242 |
+
|
243 |
+
step = {
|
244 |
+
'action': f'חיפוש נוסף (ניסיון {attempt}) ',
|
245 |
+
'description': f'מחפש במאגר עבור שאילתת חיפוש: {new_query}',
|
246 |
+
'results': [{'type': 'document', 'content': {
|
247 |
+
'title': r['title'],
|
248 |
+
'reference': r['reference'],
|
249 |
+
'topics': r['topics'],
|
250 |
+
'highlights': r['highlights'],
|
251 |
+
'score': r['score']
|
252 |
+
}} for r in results]
|
253 |
+
}
|
254 |
+
steps.append(step)
|
255 |
+
if on_step:
|
256 |
+
on_step(step)
|
257 |
+
|
258 |
+
if results.__len__() == 0:
|
259 |
+
failed_queries.append({'query': new_query, 'reason': 'no results'})
|
260 |
+
|
261 |
+
else:
|
262 |
+
all_results.extend(results)
|
263 |
+
|
264 |
+
# Re-evaluate with current results
|
265 |
+
evaluation = self._evaluate_results(results, query)
|
266 |
+
confidence = evaluation['confidence']
|
267 |
+
is_sufficient = evaluation['is_sufficient']
|
268 |
+
explanation = evaluation['explanation']
|
269 |
+
|
270 |
+
step = {
|
271 |
+
'action': f'דירוג תוצאות (ניסיון {attempt})',
|
272 |
+
'description': 'דירוג תוצאות חיפוש לניסיון זה',
|
273 |
+
'explanation': explanation,
|
274 |
+
'results': [{
|
275 |
+
'type': 'evaluation',
|
276 |
+
'content': {
|
277 |
+
'status': 'accepted' if is_sufficient else 'insufficient',
|
278 |
+
'confidence': confidence,
|
279 |
+
'explanation': explanation,
|
280 |
+
}
|
281 |
+
}]
|
282 |
+
}
|
283 |
+
steps.append(step)
|
284 |
+
if on_step:
|
285 |
+
on_step(step)
|
286 |
+
|
287 |
+
if not is_sufficient:
|
288 |
+
failed_queries.append({'query': new_query, 'reason': explanation})
|
289 |
+
|
290 |
+
attempt += 1
|
291 |
+
|
292 |
+
# Step 5: Generate final answer
|
293 |
+
answer = self._generate_answer(query, all_results)
|
294 |
+
|
295 |
+
final_result = {
|
296 |
+
'steps': steps,
|
297 |
+
'answer': answer,
|
298 |
+
'sources': [{
|
299 |
+
'title': r['title'],
|
300 |
+
'reference': r['reference'],
|
301 |
+
'topics': r['topics'],
|
302 |
+
'path': r['file_path'],
|
303 |
+
'highlights': r['highlights'],
|
304 |
+
'text': r['text'],
|
305 |
+
'score': r['score']
|
306 |
+
} for r in all_results]
|
307 |
+
}
|
308 |
+
|
309 |
+
# Send final result through callback
|
310 |
+
if on_step:
|
311 |
+
on_step({
|
312 |
+
'action': 'סיום',
|
313 |
+
'description': 'החיפוש הושלם',
|
314 |
+
'final_result': final_result
|
315 |
+
})
|
316 |
+
|
317 |
+
return final_result
|
llm_providers.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_anthropic import ChatAnthropic
|
2 |
+
from langchain_openai import ChatOpenAI
|
3 |
+
from langchain_ollama import ChatOllama
|
4 |
+
from langchain_core.language_models.base import BaseLanguageModel
|
5 |
+
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
|
6 |
+
from typing import Optional, Dict, List, Any
|
7 |
+
import os
|
8 |
+
import requests
|
9 |
+
import json
|
10 |
+
from dotenv import load_dotenv
|
11 |
+
from dataclasses import dataclass
|
12 |
+
|
13 |
+
|
14 |
+
load_dotenv()
|
15 |
+
|
16 |
+
|
17 |
+
@dataclass
|
18 |
+
class GeminiResponse:
|
19 |
+
content: str
|
20 |
+
|
21 |
+
|
22 |
+
class GeminiProvider:
|
23 |
+
def __init__(self, api_key: str):
|
24 |
+
self.api_key = api_key
|
25 |
+
self.base_url = "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent"
|
26 |
+
|
27 |
+
def chat(self, messages: List[Dict[str, Any]]) -> GeminiResponse:
|
28 |
+
# Convert messages to Gemini format
|
29 |
+
gemini_messages = []
|
30 |
+
for msg in messages:
|
31 |
+
# Handle both dict and LangChain message objects
|
32 |
+
if isinstance(msg, BaseMessage):
|
33 |
+
role = "user" if isinstance(msg, HumanMessage) else "model"
|
34 |
+
content = msg.content
|
35 |
+
else:
|
36 |
+
role = "user" if msg["role"] == "human" else "model"
|
37 |
+
content = msg["content"]
|
38 |
+
|
39 |
+
gemini_messages.append({
|
40 |
+
"role": role,
|
41 |
+
"parts": [{"text": content}]
|
42 |
+
})
|
43 |
+
|
44 |
+
# Prepare the request
|
45 |
+
headers = {
|
46 |
+
"Content-Type": "application/json"
|
47 |
+
}
|
48 |
+
|
49 |
+
params = {
|
50 |
+
"key": self.api_key
|
51 |
+
}
|
52 |
+
|
53 |
+
data = {
|
54 |
+
"contents": gemini_messages,
|
55 |
+
"generationConfig": {
|
56 |
+
"temperature": 0.7,
|
57 |
+
"topP": 0.8,
|
58 |
+
"topK": 40,
|
59 |
+
"maxOutputTokens": 2048,
|
60 |
+
}
|
61 |
+
}
|
62 |
+
|
63 |
+
try:
|
64 |
+
response = requests.post(
|
65 |
+
self.base_url,
|
66 |
+
headers=headers,
|
67 |
+
params=params,
|
68 |
+
json=data,
|
69 |
+
verify='C:\\ProgramData\\NetFree\\CA\\netfree-ca-bundle-curl.crt'
|
70 |
+
)
|
71 |
+
response.raise_for_status()
|
72 |
+
|
73 |
+
result = response.json()
|
74 |
+
if "candidates" in result and len(result["candidates"]) > 0:
|
75 |
+
return GeminiResponse(content=result["candidates"][0]["content"]["parts"][0]["text"])
|
76 |
+
else:
|
77 |
+
raise Exception("No response generated")
|
78 |
+
|
79 |
+
except Exception as e:
|
80 |
+
raise Exception(f"Error calling Gemini API: {str(e)}")
|
81 |
+
|
82 |
+
def invoke(self, messages: List[BaseMessage], **kwargs) -> GeminiResponse:
|
83 |
+
return self.chat(messages)
|
84 |
+
|
85 |
+
def generate(self, prompts, **kwargs) -> GeminiResponse:
|
86 |
+
if isinstance(prompts, str):
|
87 |
+
return self.invoke([HumanMessage(content=prompts)])
|
88 |
+
elif isinstance(prompts, list):
|
89 |
+
return self.invoke([HumanMessage(content=prompts[0])])
|
90 |
+
raise ValueError("Unsupported prompt format")
|
91 |
+
|
92 |
+
class LLMProvider:
|
93 |
+
def __init__(self):
|
94 |
+
self.providers: Dict[str, Any] = {}
|
95 |
+
self._setup_providers()
|
96 |
+
|
97 |
+
def _setup_providers(self):
|
98 |
+
os.environ['REQUESTS_CA_BUNDLE'] = 'C:\\ProgramData\\NetFree\\CA\\netfree-ca-bundle-curl.crt'
|
99 |
+
|
100 |
+
# Google Gemini
|
101 |
+
if google_key := os.getenv('GOOGLE_API_KEY'):
|
102 |
+
self.providers['Gemini'] = GeminiProvider(api_key=google_key)
|
103 |
+
|
104 |
+
|
105 |
+
# Anthropic
|
106 |
+
if anthropic_key := os.getenv('ANTHROPIC_API_KEY'):
|
107 |
+
self.providers['Claude'] = ChatAnthropic(
|
108 |
+
api_key=anthropic_key,
|
109 |
+
model_name="claude-3-5-sonnet-20241022",
|
110 |
+
|
111 |
+
)
|
112 |
+
|
113 |
+
# OpenAI
|
114 |
+
if openai_key := os.getenv('OPENAI_API_KEY'):
|
115 |
+
self.providers['ChatGPT'] = ChatOpenAI(
|
116 |
+
api_key=openai_key,
|
117 |
+
model_name="gpt-4o-2024-11-20"
|
118 |
+
)
|
119 |
+
|
120 |
+
|
121 |
+
# Ollama (local)
|
122 |
+
try:
|
123 |
+
self.providers['Ollama-dictalm2.0'] = ChatOllama(model="dictaLM")
|
124 |
+
except Exception:
|
125 |
+
pass # Ollama not available
|
126 |
+
|
127 |
+
def get_available_providers(self) -> list[str]:
|
128 |
+
"""Return list of available provider names"""
|
129 |
+
return list(self.providers.keys())
|
130 |
+
|
131 |
+
def get_provider(self, name: str) -> Optional[Any]:
|
132 |
+
"""Get LLM provider by name"""
|
133 |
+
return self.providers.get(name)
|
requirements.txt
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
langchain
|
2 |
+
python-dotenv
|
3 |
+
flet
|
4 |
+
langchain-community
|
5 |
+
langchain-core
|
6 |
+
langchain-openai
|
7 |
+
langchain-anthropic
|
8 |
+
langchain-ollama
|
9 |
+
ollama
|
10 |
+
requests
|
11 |
+
tantivy
|
12 |
+
streamlit
|
13 |
+
gdown
|
streamlit_app.py
ADDED
@@ -0,0 +1,275 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from tantivy_search_agent import TantivySearchAgent
|
3 |
+
from agent_workflow import SearchAgent
|
4 |
+
import os
|
5 |
+
from typing import Optional, List
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
import gdown
|
8 |
+
|
9 |
+
# Load environment variables
|
10 |
+
load_dotenv()
|
11 |
+
|
12 |
+
class SearchAgentUI:
|
13 |
+
def __init__(self):
|
14 |
+
self.tantivy_agent: Optional[TantivySearchAgent] = None
|
15 |
+
self.agent: Optional[SearchAgent] = None
|
16 |
+
self.index_path ="./index" # os.getenv("INDEX_PATH", "./index")
|
17 |
+
# Google Drive folder ID for the index
|
18 |
+
self.gdrive_index_id = os.getenv("GDRIVE_INDEX_ID", "1lpbBCPimwcNfC0VZOlQueA4SHNGIp5_t")
|
19 |
+
|
20 |
+
def download_index_from_gdrive(self) -> bool:
|
21 |
+
"""Download index folder from Google Drive"""
|
22 |
+
try:
|
23 |
+
# Create a temporary zip file path
|
24 |
+
zip_path = "index.zip"
|
25 |
+
# Download the folder as a zip file
|
26 |
+
url = f"https://drive.google.com/uc?id={self.gdrive_index_id}"
|
27 |
+
|
28 |
+
# Create a progress bar and status text
|
29 |
+
progress_text = st.empty()
|
30 |
+
progress_bar = st.progress(0)
|
31 |
+
|
32 |
+
def progress_callback(progress):
|
33 |
+
progress_bar.progress(progress)
|
34 |
+
progress_text.text(f"מוריד... {progress:.1f}%")
|
35 |
+
|
36 |
+
# Download with progress callback
|
37 |
+
gdown.download(url, zip_path, quiet=False, callback=progress_callback)
|
38 |
+
|
39 |
+
# Update status for extraction
|
40 |
+
progress_text.text("מחלץ קבצים...")
|
41 |
+
progress_bar.progress(100)
|
42 |
+
|
43 |
+
# Extract the zip file
|
44 |
+
import zipfile
|
45 |
+
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
|
46 |
+
zip_ref.extractall(".")
|
47 |
+
|
48 |
+
# Remove the zip file
|
49 |
+
os.remove(zip_path)
|
50 |
+
|
51 |
+
# Clear the progress indicators
|
52 |
+
progress_text.empty()
|
53 |
+
progress_bar.empty()
|
54 |
+
|
55 |
+
return True
|
56 |
+
except Exception as e:
|
57 |
+
st.error(f"Failed to download index: {str(e)}")
|
58 |
+
return False
|
59 |
+
|
60 |
+
def get_available_providers(self) -> List[str]:
|
61 |
+
"""Get available providers without creating a SearchAgent instance"""
|
62 |
+
temp_tantivy = TantivySearchAgent(self.index_path)
|
63 |
+
temp_agent = SearchAgent(temp_tantivy)
|
64 |
+
return temp_agent.get_available_providers()
|
65 |
+
|
66 |
+
def initialize_system(self):
|
67 |
+
try:
|
68 |
+
# Check if index folder exists
|
69 |
+
if not os.path.exists(self.index_path):
|
70 |
+
st.warning("Index folder not found. Attempting to download from Google Drive...")
|
71 |
+
if not self.download_index_from_gdrive():
|
72 |
+
return False, "שגיאה: לא ניתן להוריד את האינדקס", []
|
73 |
+
st.success("Index downloaded successfully!")
|
74 |
+
|
75 |
+
self.tantivy_agent = TantivySearchAgent(self.index_path)
|
76 |
+
if self.tantivy_agent.validate_index():
|
77 |
+
available_providers = self.get_available_providers()
|
78 |
+
self.agent = SearchAgent(
|
79 |
+
self.tantivy_agent,
|
80 |
+
provider_name=st.session_state.get('provider', available_providers[0])
|
81 |
+
)
|
82 |
+
return True, "המערכת מוכנה לחיפוש", available_providers
|
83 |
+
else:
|
84 |
+
return False, "שגיאה: אינדקס לא תקין", []
|
85 |
+
except Exception as ex:
|
86 |
+
return False, f"שגיאה באתחול המערכת: {str(ex)}", []
|
87 |
+
|
88 |
+
def main(self):
|
89 |
+
st.set_page_config(
|
90 |
+
page_title="איתוריא",
|
91 |
+
layout="wide",
|
92 |
+
initial_sidebar_state="collapsed"
|
93 |
+
)
|
94 |
+
|
95 |
+
# Enhanced RTL support and styling
|
96 |
+
st.markdown("""
|
97 |
+
<style>
|
98 |
+
.stApp {
|
99 |
+
direction: rtl;
|
100 |
+
}
|
101 |
+
.stTextInput > div > div > input {
|
102 |
+
direction: rtl;
|
103 |
+
}
|
104 |
+
.stSelectbox > div > div > div {
|
105 |
+
direction: rtl;
|
106 |
+
}
|
107 |
+
.stNumberInput > div > div > input {
|
108 |
+
direction: rtl;
|
109 |
+
}
|
110 |
+
.search-step {
|
111 |
+
border: 1px solid #e0e0e0;
|
112 |
+
border-radius: 5px;
|
113 |
+
padding: 10px;
|
114 |
+
margin: 5px 0;
|
115 |
+
background-color: #f8f9fa;
|
116 |
+
}
|
117 |
+
.document-group {
|
118 |
+
border: 1px solid #e3f2fd;
|
119 |
+
border-radius: 5px;
|
120 |
+
padding: 10px;
|
121 |
+
margin: 5px 0;
|
122 |
+
background-color: #f5f9ff;
|
123 |
+
}
|
124 |
+
.document-item {
|
125 |
+
border: 1px solid #e0e0e0;
|
126 |
+
border-radius: 5px;
|
127 |
+
padding: 10px;
|
128 |
+
margin: 5px 0;
|
129 |
+
background-color: white;
|
130 |
+
}
|
131 |
+
</style>
|
132 |
+
""", unsafe_allow_html=True)
|
133 |
+
|
134 |
+
# Initialize system
|
135 |
+
success, status_msg, available_providers = self.initialize_system()
|
136 |
+
|
137 |
+
# Header layout
|
138 |
+
col1, col2, col3 = st.columns([2,1,1])
|
139 |
+
|
140 |
+
with col1:
|
141 |
+
if success:
|
142 |
+
st.success(status_msg)
|
143 |
+
else:
|
144 |
+
st.error(status_msg)
|
145 |
+
|
146 |
+
with col2:
|
147 |
+
if 'provider' not in st.session_state:
|
148 |
+
st.session_state.provider = available_providers[0] if available_providers else None
|
149 |
+
|
150 |
+
if available_providers:
|
151 |
+
provider = st.selectbox(
|
152 |
+
"ספק בינה מלאכותית",
|
153 |
+
options=available_providers,
|
154 |
+
key='provider'
|
155 |
+
)
|
156 |
+
if self.agent:
|
157 |
+
self.agent.set_provider(provider)
|
158 |
+
|
159 |
+
with col3:
|
160 |
+
col3_1, col3_2 = st.columns(2)
|
161 |
+
with col3_1:
|
162 |
+
max_iterations = st.number_input(
|
163 |
+
"מספר נסיונות מקסימלי",
|
164 |
+
min_value=1,
|
165 |
+
value=3,
|
166 |
+
key='max_iterations'
|
167 |
+
)
|
168 |
+
with col3_2:
|
169 |
+
results_per_search = st.number_input(
|
170 |
+
"תוצאות לכל חיפוש",
|
171 |
+
min_value=1,
|
172 |
+
value=5,
|
173 |
+
key='results_per_search'
|
174 |
+
)
|
175 |
+
|
176 |
+
# Search input
|
177 |
+
query = st.text_input(
|
178 |
+
"הכנס שאילתת חיפוש",
|
179 |
+
disabled=not success,
|
180 |
+
placeholder="הקלד את שאילתת החיפוש שלך כאן...",
|
181 |
+
key='search_query'
|
182 |
+
)
|
183 |
+
|
184 |
+
# Search button
|
185 |
+
if (st.button('חפש', disabled=not success) or query) and query!="" and self.agent:
|
186 |
+
try:
|
187 |
+
if 'steps' not in st.session_state:
|
188 |
+
st.session_state.steps = []
|
189 |
+
|
190 |
+
steps_container = st.container()
|
191 |
+
answer_container = st.container()
|
192 |
+
sources_container = st.container()
|
193 |
+
|
194 |
+
with steps_container:
|
195 |
+
st.subheader("צעדי תהליך החיפוש")
|
196 |
+
|
197 |
+
def handle_step_update(step):
|
198 |
+
if 'final_result' in step:
|
199 |
+
final_result = step['final_result']
|
200 |
+
|
201 |
+
with answer_container:
|
202 |
+
st.subheader("תשובה סופית")
|
203 |
+
st.info(final_result['answer'])
|
204 |
+
|
205 |
+
if final_result['sources']:
|
206 |
+
with sources_container:
|
207 |
+
st.subheader("מסמכי מקור")
|
208 |
+
st.markdown(f"נמצאו {len(final_result['sources'])} תוצאות")
|
209 |
+
|
210 |
+
for i, source in enumerate(final_result['sources']):
|
211 |
+
with st.expander(f"תוצאה {i+1}: {source['reference']} (ציון: {source['score']:.2f})"):
|
212 |
+
st.write(source['text'])
|
213 |
+
|
214 |
+
else:
|
215 |
+
with steps_container:
|
216 |
+
step_number = len(st.session_state.steps) + 1
|
217 |
+
st.markdown(f"""
|
218 |
+
<div class='search-step'>
|
219 |
+
<strong>צעד {step_number}. {step['action']}</strong>
|
220 |
+
</div>
|
221 |
+
""", unsafe_allow_html=True)
|
222 |
+
st.markdown(f"**{step['description']}**")
|
223 |
+
|
224 |
+
if 'results' in step:
|
225 |
+
documents = []
|
226 |
+
|
227 |
+
for r in step['results']:
|
228 |
+
if r['type'] == 'query':
|
229 |
+
st.markdown("**שאילתת חיפוש:**")
|
230 |
+
st.code(r['content'])
|
231 |
+
|
232 |
+
elif r['type'] == 'document':
|
233 |
+
documents.append(r['content'])
|
234 |
+
|
235 |
+
elif r['type'] == 'evaluation':
|
236 |
+
content = r['content']
|
237 |
+
status = "✓" if content['status'] == 'accepted' else "↻"
|
238 |
+
confidence = f"ביטחון: {content['confidence']}"
|
239 |
+
if content['status'] == 'accepted':
|
240 |
+
st.success(f"{status} {confidence}")
|
241 |
+
else:
|
242 |
+
st.warning(f"{status} {confidence}")
|
243 |
+
if content['explanation']:
|
244 |
+
st.info(content['explanation'])
|
245 |
+
|
246 |
+
elif r['type'] == 'new_query':
|
247 |
+
st.markdown("**ניסיון הבא:**")
|
248 |
+
st.code(r['content'])
|
249 |
+
|
250 |
+
# Display documents if any were found
|
251 |
+
if documents:
|
252 |
+
for i, doc in enumerate(documents):
|
253 |
+
with st.expander(f"{doc['reference']} (ציון: {doc['score']:.2f})"):
|
254 |
+
st.write(doc['highlights'][0])
|
255 |
+
|
256 |
+
st.markdown("---")
|
257 |
+
st.session_state.steps.append(step)
|
258 |
+
|
259 |
+
# Clear previous steps before starting new search
|
260 |
+
st.session_state.steps = []
|
261 |
+
|
262 |
+
# Start the search process
|
263 |
+
self.agent.search_and_answer(
|
264 |
+
query=query,
|
265 |
+
num_results=results_per_search,
|
266 |
+
max_iterations=max_iterations,
|
267 |
+
on_step=handle_step_update
|
268 |
+
)
|
269 |
+
|
270 |
+
except Exception as ex:
|
271 |
+
st.error(f"שגיאת חיפוש: {str(ex)}")
|
272 |
+
|
273 |
+
if __name__ == "__main__":
|
274 |
+
app = SearchAgentUI()
|
275 |
+
app.main()
|