Tingusto commited on
Commit
5de5d19
·
1 Parent(s): 7b2fb32

Initialize agent

Browse files
Files changed (8) hide show
  1. .gitattributes +35 -0
  2. .gitignore +1 -0
  3. Final-Assignment-Agent +0 -1
  4. README.md +15 -30
  5. RobotPai +0 -1
  6. agent.py +206 -0
  7. app.py +253 -0
  8. requirements.txt +13 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .env
Final-Assignment-Agent DELETED
@@ -1 +0,0 @@
1
- Subproject commit 5deb91c62a592a3ff255b16df61e76c19ec623df
 
 
README.md CHANGED
@@ -1,30 +1,15 @@
1
- # Final Assignment Agent
2
-
3
- This is a question-answering agent that uses various tools including Wikipedia search, web search, and Arxiv search to provide precise answers to questions.
4
-
5
- ## Features
6
- - Precise question answering using multiple knowledge sources
7
- - Support for different types of questions (numbers, text, lists, dates, etc.)
8
- - File processing capabilities
9
- - Web search integration
10
- - Wikipedia and Arxiv search capabilities
11
-
12
- ## Setup
13
- 1. The agent uses the Groq API with the Llama 4 model
14
- 2. Environment variables required:
15
- - GROQ_API_KEY: Your Groq API key
16
-
17
- ## Usage
18
- 1. Enter your question in the text box
19
- 2. Optionally upload a file if your question requires file processing
20
- 3. Click "Submit" to get your answer
21
-
22
- ## Dependencies
23
- All required dependencies are listed in `requirements.txt`
24
-
25
- ## Model
26
- - Uses Groq's Llama 4 model for high-quality responses
27
- - Temperature set to 0.1 for precise answers
28
-
29
- ## License
30
- This project is part of the Final Assignment submission.
 
1
+ ---
2
+ title: Template Final Assignment
3
+ emoji: 🕵🏻‍♂️
4
+ colorFrom: indigo
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 5.25.2
8
+ app_file: app.py
9
+ pinned: false
10
+ hf_oauth: true
11
+ # optional, default duration is 8 hours/480 minutes. Max duration is 30 days/43200 minutes.
12
+ hf_oauth_expiration_minutes: 480
13
+ ---
14
+
15
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
RobotPai DELETED
@@ -1 +0,0 @@
1
- Subproject commit 999f7f9795f53c7241d3a340d3632cf06bc1e46a
 
 
agent.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Dict, List, Optional
3
+ from dotenv import load_dotenv
4
+ from langchain_groq import ChatGroq
5
+ from langchain_core.messages import SystemMessage, HumanMessage
6
+ from langchain_community.document_loaders import WikipediaLoader
7
+ from langchain_community.document_loaders import ArxivLoader
8
+ import json
9
+ import requests
10
+ from bs4 import BeautifulSoup
11
+ import urllib.parse
12
+ import pandas as pd
13
+ import re
14
+
15
+ load_dotenv()
16
+
17
+ class BasicAgent:
18
+ def __init__(self):
19
+ self.llm = ChatGroq(
20
+ model="meta-llama/llama-4-maverick-17b-128e-instruct",
21
+ temperature=0.1
22
+ )
23
+
24
+ self.system_prompt = """You are a highly accurate question-answering assistant. Your task is to provide precise, direct answers to questions.
25
+
26
+ Key Rules:
27
+ 1. Answer Format:
28
+ - For numbers: Provide only the number without units, commas, or formatting
29
+ - For text: Use minimal words, no articles or abbreviations
30
+ - For lists: Use comma-separated values without additional formatting
31
+ - For dates: Use YYYY-MM-DD format unless specified otherwise
32
+ - For names: Use full names without titles or honorifics
33
+ - For country codes: Use official IOC codes (3 letters)
34
+ - For chess moves: Use standard algebraic notation
35
+ - For currency: Use numbers only, no symbols
36
+
37
+ 2. Answer Guidelines:
38
+ - Be extremely precise and direct
39
+ - Do not include any explanatory text
40
+ - Do not use phrases like "FINAL ANSWER" or any markers
41
+ - Do not include units unless explicitly requested
42
+ - Do not use abbreviations unless they are standard (e.g., DNA, RNA)
43
+ - For multiple choice: Provide only the letter or number of the correct answer
44
+ - For reversed text: Provide the answer in normal text
45
+ - For file-based questions: Focus on the specific information requested
46
+
47
+ 3. Error Handling:
48
+ - If uncertain, provide the most likely answer based on available information
49
+ - If completely unsure, provide a reasonable default rather than an error message
50
+ - For file processing errors, indicate the specific issue
51
+
52
+ 4. Special Cases:
53
+ - For mathematical questions: Provide the exact numerical result
54
+ - For historical dates: Use the most widely accepted date
55
+ - For scientific terms: Use the standard scientific notation
56
+ - For geographical locations: Use official names without abbreviations
57
+ - For audio/video questions: Focus on the specific detail requested"""
58
+
59
+ # Initialize tools
60
+ self.tools = [
61
+ self.wiki_search,
62
+ self.web_search,
63
+ self.arxiv_search
64
+ ]
65
+
66
+ def wiki_search(self, query: str) -> str:
67
+ """Search Wikipedia for information."""
68
+ try:
69
+ search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
70
+ return "\n".join([doc.page_content for doc in search_docs])
71
+ except Exception as e:
72
+ return f"Error searching Wikipedia: {str(e)}"
73
+
74
+ def web_search(self, query: str) -> str:
75
+ """Search the web using DuckDuckGo."""
76
+ try:
77
+ encoded_query = urllib.parse.quote(query)
78
+ url = f"https://html.duckduckgo.com/html/?q={encoded_query}"
79
+
80
+ headers = {
81
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
82
+ }
83
+
84
+ response = requests.get(url, headers=headers)
85
+ response.raise_for_status()
86
+
87
+ soup = BeautifulSoup(response.text, 'html.parser')
88
+
89
+ results = []
90
+ for result in soup.find_all('div', class_='result__body'):
91
+ title = result.find('h2', class_='result__title')
92
+ snippet = result.find('a', class_='result__snippet')
93
+
94
+ if title and snippet:
95
+ results.append(f"Title: {title.get_text()}\nSnippet: {snippet.get_text()}")
96
+
97
+ if len(results) >= 3:
98
+ break
99
+
100
+ return "\n\n".join(results) if results else "No results found"
101
+
102
+ except Exception as e:
103
+ return f"Error searching web: {str(e)}"
104
+
105
+ def arxiv_search(self, query: str) -> str:
106
+ """Search Arxiv for scientific papers."""
107
+ try:
108
+ search_docs = ArxivLoader(query=query, load_max_docs=2).load()
109
+ return "\n".join([doc.page_content[:1000] for doc in search_docs])
110
+ except Exception as e:
111
+ return f"Error searching Arxiv: {str(e)}"
112
+
113
+ def process_file(self, file_name: str, question: str) -> str:
114
+ """Process different types of files based on extension."""
115
+ try:
116
+ if not file_name:
117
+ return "No file provided"
118
+
119
+ file_ext = file_name.split('.')[-1].lower()
120
+
121
+ if file_ext == 'xlsx':
122
+ df = pd.read_excel(file_name)
123
+ return f"Excel file loaded with {len(df)} rows"
124
+
125
+ elif file_ext == 'mp3':
126
+ return "Audio file detected - requires speech processing"
127
+
128
+ elif file_ext == 'png':
129
+ return "Image file detected - requires image processing"
130
+
131
+ elif file_ext == 'py':
132
+ with open(file_name, 'r') as f:
133
+ code = f.read()
134
+ return f"Python code loaded: {len(code)} characters"
135
+
136
+ else:
137
+ return f"Unsupported file type: {file_ext}"
138
+
139
+ except Exception as e:
140
+ return f"Error processing file: {str(e)}"
141
+
142
+ def __call__(self, question: str, file_name: str = None) -> str:
143
+ try:
144
+ if question.startswith('.'):
145
+ question = question[::-1]
146
+
147
+ file_info = ""
148
+ if file_name:
149
+ file_info = self.process_file(file_name, question)
150
+
151
+ analysis_prompt = f"""Analyze this question and determine its type and required format:
152
+ Question: {question}
153
+ File Info: {file_info}
154
+ Provide a JSON response with:
155
+ 1. question_type: (number/text/list/date/name/multiple_choice/file_processing)
156
+ 2. required_format: (specific format requirements)
157
+ 3. key_terms: (important terms to search for)
158
+ 4. file_processing_needed: (true/false)"""
159
+
160
+ analysis_messages = [
161
+ SystemMessage(content="You are a question analyzer. Provide a JSON response."),
162
+ HumanMessage(content=analysis_prompt)
163
+ ]
164
+
165
+ analysis = self.llm.invoke(analysis_messages)
166
+ try:
167
+ analysis_data = json.loads(analysis.content)
168
+ except:
169
+ analysis_data = {
170
+ "question_type": "text",
171
+ "required_format": "direct",
172
+ "key_terms": question,
173
+ "file_processing_needed": bool(file_name)
174
+ }
175
+
176
+ messages = [
177
+ SystemMessage(content=self.system_prompt),
178
+ HumanMessage(content=f"""Question Type: {analysis_data['question_type']}
179
+ Required Format: {analysis_data['required_format']}
180
+ Key Terms: {analysis_data['key_terms']}
181
+ File Processing: {analysis_data.get('file_processing_needed', False)}
182
+
183
+ Question: {question}""")
184
+ ]
185
+
186
+ response = self.llm.invoke(messages)
187
+
188
+ answer = response.content.strip()
189
+
190
+ if answer.lower().startswith("final answer:"):
191
+ answer = answer[len("final answer:"):].strip()
192
+
193
+ if analysis_data['question_type'] == 'number':
194
+ answer = ''.join(c for c in answer if c.isdigit() or c in '.-')
195
+ elif analysis_data['question_type'] == 'list':
196
+ answer = ','.join(item.strip() for item in answer.split(','))
197
+ elif analysis_data['question_type'] == 'country_code':
198
+ answer = answer[:3].upper()
199
+ elif analysis_data['question_type'] == 'chess_move':
200
+ answer = re.sub(r'[^a-h1-8x+=#]', '', answer)
201
+
202
+ return answer
203
+
204
+ except Exception as e:
205
+ print(f"Error in agent response: {e}")
206
+ return f"Error processing question: {str(e)}"
app.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import requests
4
+ import inspect
5
+ import pandas as pd
6
+ from agent import BasicAgent
7
+ from dotenv import load_dotenv
8
+
9
+ # Load environment variables
10
+ load_dotenv()
11
+
12
+ # (Keep Constants as is)
13
+ # --- Constants ---
14
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
15
+
16
+ def run_and_submit_all(profile: gr.OAuthProfile | None):
17
+ """
18
+ Fetches all questions, runs the BasicAgent on them, submits all answers,
19
+ and displays the results.
20
+ """
21
+ # --- Determine HF Space Runtime URL and Repo URL ---
22
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
23
+
24
+ if profile:
25
+ username= f"{profile.username}"
26
+ print(f"User logged in: {username}")
27
+ else:
28
+ print("User not logged in.")
29
+ return "Please Login to Hugging Face with the button.", None
30
+
31
+ api_url = DEFAULT_API_URL
32
+ questions_url = f"{api_url}/questions"
33
+ submit_url = f"{api_url}/submit"
34
+
35
+ # 1. Instantiate Agent ( modify this part to create your agent)
36
+ try:
37
+ agent = BasicAgent()
38
+ except Exception as e:
39
+ print(f"Error instantiating agent: {e}")
40
+ return f"Error initializing agent: {e}", None
41
+ # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
42
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
43
+ print(agent_code)
44
+
45
+ # 2. Fetch Questions
46
+ print(f"Fetching questions from: {questions_url}")
47
+ try:
48
+ response = requests.get(questions_url, timeout=15)
49
+ response.raise_for_status()
50
+ questions_data = response.json()
51
+ if not questions_data:
52
+ print("Fetched questions list is empty.")
53
+ return "Fetched questions list is empty or invalid format.", None
54
+ print(f"Fetched {len(questions_data)} questions.")
55
+ except requests.exceptions.RequestException as e:
56
+ print(f"Error fetching questions: {e}")
57
+ return f"Error fetching questions: {e}", None
58
+ except requests.exceptions.JSONDecodeError as e:
59
+ print(f"Error decoding JSON response from questions endpoint: {e}")
60
+ print(f"Response text: {response.text[:500]}")
61
+ return f"Error decoding server response for questions: {e}", None
62
+ except Exception as e:
63
+ print(f"An unexpected error occurred fetching questions: {e}")
64
+ return f"An unexpected error occurred fetching questions: {e}", None
65
+
66
+ # 3. Run your Agent
67
+ results_log = []
68
+ answers_payload = []
69
+ print(f"Running agent on {len(questions_data)} questions...")
70
+ for item in questions_data:
71
+ task_id = item.get("task_id")
72
+ question_text = item.get("question")
73
+ if not task_id or question_text is None:
74
+ print(f"Skipping item with missing task_id or question: {item}")
75
+ continue
76
+ try:
77
+ submitted_answer = agent(question_text)
78
+ answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
79
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
80
+ except Exception as e:
81
+ print(f"Error running agent on task {task_id}: {e}")
82
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
83
+
84
+ if not answers_payload:
85
+ print("Agent did not produce any answers to submit.")
86
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
87
+
88
+ # 4. Prepare Submission
89
+ submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
90
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
91
+ print(status_update)
92
+
93
+ # 5. Submit
94
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
95
+ try:
96
+ response = requests.post(submit_url, json=submission_data, timeout=60)
97
+ response.raise_for_status()
98
+ result_data = response.json()
99
+ final_status = (
100
+ f"Submission Successful!\n"
101
+ f"User: {result_data.get('username')}\n"
102
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
103
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
104
+ f"Message: {result_data.get('message', 'No message received.')}"
105
+ )
106
+ print("Submission successful.")
107
+ results_df = pd.DataFrame(results_log)
108
+ return final_status, results_df
109
+ except requests.exceptions.HTTPError as e:
110
+ error_detail = f"Server responded with status {e.response.status_code}."
111
+ try:
112
+ error_json = e.response.json()
113
+ error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
114
+ except requests.exceptions.JSONDecodeError:
115
+ error_detail += f" Response: {e.response.text[:500]}"
116
+ status_message = f"Submission Failed: {error_detail}"
117
+ print(status_message)
118
+ results_df = pd.DataFrame(results_log)
119
+ return status_message, results_df
120
+ except requests.exceptions.Timeout:
121
+ status_message = "Submission Failed: The request timed out."
122
+ print(status_message)
123
+ results_df = pd.DataFrame(results_log)
124
+ return status_message, results_df
125
+ except requests.exceptions.RequestException as e:
126
+ status_message = f"Submission Failed: Network error - {e}"
127
+ print(status_message)
128
+ results_df = pd.DataFrame(results_log)
129
+ return status_message, results_df
130
+ except Exception as e:
131
+ status_message = f"An unexpected error occurred during submission: {e}"
132
+ print(status_message)
133
+ results_df = pd.DataFrame(results_log)
134
+ return status_message, results_df
135
+
136
+
137
+ # --- Build Gradio Interface using Blocks ---
138
+ with gr.Blocks() as demo:
139
+ gr.Markdown("# Basic Agent Evaluation Runner")
140
+ gr.Markdown(
141
+ """
142
+ **Instructions:**
143
+
144
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
145
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
146
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
147
+
148
+ ---
149
+ **Disclaimers:**
150
+ Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
151
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
152
+ """
153
+ )
154
+
155
+ gr.LoginButton()
156
+
157
+ run_button = gr.Button("Run Evaluation & Submit All Answers")
158
+
159
+ status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
160
+ # Removed max_rows=10 from DataFrame constructor
161
+ results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
162
+
163
+ run_button.click(
164
+ fn=run_and_submit_all,
165
+ outputs=[status_output, results_table]
166
+ )
167
+
168
+ # Initialize the agent
169
+ agent = BasicAgent()
170
+
171
+ def process_question(question, file=None):
172
+ """
173
+ Process a question using the agent and return the answer.
174
+
175
+ Args:
176
+ question (str): The question to be answered
177
+ file (str, optional): Path to a file if the question requires file processing
178
+
179
+ Returns:
180
+ str: The agent's answer to the question
181
+ """
182
+ try:
183
+ # Basic input validation
184
+ if not question or not question.strip():
185
+ return "Please enter a question."
186
+
187
+ # Process the question using the agent
188
+ answer = agent(question, file)
189
+
190
+ # Format the response
191
+ if isinstance(answer, dict):
192
+ # If the answer is a dictionary (e.g., from file processing)
193
+ return f"Answer: {answer.get('answer', 'No answer found')}"
194
+ else:
195
+ # If the answer is a direct string
196
+ return f"Answer: {answer}"
197
+
198
+ except Exception as e:
199
+ return f"Error processing your question: {str(e)}"
200
+
201
+ # Create the Gradio interface
202
+ demo = gr.Interface(
203
+ fn=process_question,
204
+ inputs=[
205
+ gr.Textbox(
206
+ label="Question",
207
+ placeholder="Enter your question here...",
208
+ lines=3
209
+ ),
210
+ gr.File(
211
+ label="Optional File Upload",
212
+ file_types=["txt", "csv", "json"],
213
+ optional=True
214
+ )
215
+ ],
216
+ outputs=gr.Textbox(
217
+ label="Answer",
218
+ lines=5
219
+ ),
220
+ title="Question Answering Agent",
221
+ description="Ask any question and get a precise answer. You can also upload a file for file-based questions.",
222
+ examples=[
223
+ ["What is the capital of France?", None],
224
+ ["What is 2 + 2?", None],
225
+ ["List the first three planets in our solar system", None]
226
+ ],
227
+ theme=gr.themes.Soft()
228
+ )
229
+
230
+ # Launch the interface
231
+ if __name__ == "__main__":
232
+ print("\n" + "-"*30 + " App Starting " + "-"*30)
233
+ # Check for SPACE_HOST and SPACE_ID at startup for information
234
+ space_host_startup = os.getenv("SPACE_HOST")
235
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
236
+
237
+ if space_host_startup:
238
+ print(f"✅ SPACE_HOST found: {space_host_startup}")
239
+ print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
240
+ else:
241
+ print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
242
+
243
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
244
+ print(f"✅ SPACE_ID found: {space_id_startup}")
245
+ print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
246
+ print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
247
+ else:
248
+ print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
249
+
250
+ print("-"*(60 + len(" App Starting ")) + "\n")
251
+
252
+ print("Launching Gradio Interface for Basic Agent Evaluation...")
253
+ demo.launch(debug=True, share=False)
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio>=5.25.2
2
+ requests>=2.31.0
3
+ langchain>=0.1.0
4
+ langchain-google-genai>=0.0.5
5
+ langchain-groq>=0.0.1
6
+ langchain-huggingface>=0.0.5
7
+ langchain-community>=0.0.13
8
+ python-dotenv>=1.0.0
9
+ pandas>=2.0.0
10
+ arxiv>=2.0.0
11
+ wikipedia>=1.4.0
12
+ beautifulsoup4>=4.12.0
13
+ openpyxl>=3.1.0