josondev commited on
Commit
2ba0de9
·
verified ·
1 Parent(s): c3e7222

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +157 -222
app.py CHANGED
@@ -1,242 +1,177 @@
1
  import os
2
  import gradio as gr
3
- import requests
4
- import pandas as pd
5
  from dotenv import load_dotenv
6
- from langchain_openai import ChatOpenAI
7
- from langchain_nvidia_ai_endpoints import ChatNVIDIA
8
- from langchain_groq import ChatGroq
9
  from langchain_google_genai import ChatGoogleGenerativeAI
 
 
 
 
 
 
 
 
 
10
 
11
  # Load environment variables
12
  load_dotenv()
13
 
14
- # --- Constants ---
15
- DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
16
-
17
- # --- Basic Agent Definition ---
18
- class BasicAgent:
19
- def __init__(self, provider="nvidia"):
20
- self.provider = provider.lower()
21
- if self.provider == "nvidia":
22
- self.llm = ChatNVIDIA(
23
- model="meta/llama-3.3-70b-instruct",
24
- nvidia_api_key=os.getenv("NVIDIA_API_KEY")
25
- )
26
- elif self.provider == "groq":
27
- self.llm = ChatGroq(
28
- model="llama3-70b-8192",
29
- api_key=os.getenv("GROQ_API_KEY")
30
- )
31
- elif self.provider == "google":
32
- self.llm = ChatGoogleGenerativeAI(
33
- model="gemini-2.0-flash",
34
- temperature=0.1,
35
- max_tokens=1024,
36
- api_key=os.getenv("GOOGLE_API_KEY"),
37
- streaming=False
38
- )
39
- elif self.provider == "openai":
40
- self.llm = ChatOpenAI(
41
- model="gpt-3.5-turbo",
42
- api_key=os.getenv("OPENAI_API_KEY")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  )
44
- else:
45
- raise ValueError("Unsupported provider. Choose from: nvidia, groq, google, openai.")
46
-
47
- self.instructions = (
48
- "You are a helpful assistant. For every question, reply with only the answer—no explanation, "
49
- "no units, and no extra words. If the answer is a number, just return the number. "
50
- "If it is a word or phrase, return only that. If it is a list, return a comma-separated list with no extra words. "
51
- "Do not include any prefix, suffix, or explanation."
52
  )
53
- print(f"BasicAgent initialized with provider: {self.provider}")
54
-
55
- def __call__(self, question: str) -> str:
56
- prompt = f"{self.instructions}\n\n{question}"
57
- print(f"Agent received question (first 50 chars): {question[:50]}...")
58
- response = self.llm.invoke(prompt)
59
- answer = response.content.strip() if hasattr(response, "content") else str(response)
60
- # Remove "FINAL ANSWER:" or similar prefixes if present
61
- for prefix in ["FINAL ANSWER:", "Final answer:", "final answer:"]:
62
- if answer.lower().startswith(prefix.lower()):
63
- answer = answer[len(prefix):].strip()
64
- print(f"Agent returning answer: {answer}")
65
- return answer
66
-
67
- def run_and_submit_all(profile: gr.OAuthProfile | None, provider="nvidia"):
68
- """
69
- Fetches all questions, runs the BasicAgent on them, submits all answers,
70
- and displays the results.
71
- """
72
- space_id = os.getenv("SPACE_ID") # For codebase link
73
-
74
- if profile:
75
- username = f"{profile.username}"
76
- print(f"User logged in: {username}")
77
  else:
78
- print("User not logged in.")
79
- return "Please Login to Hugging Face with the button.", None
80
-
81
- api_url = DEFAULT_API_URL
82
- questions_url = f"{api_url}/questions"
83
- submit_url = f"{api_url}/submit"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
- # 1. Instantiate Agent
 
86
  try:
87
- agent = BasicAgent(provider=provider)
 
 
 
 
88
  except Exception as e:
89
- print(f"Error instantiating agent: {e}")
90
- return f"Error initializing agent: {e}", None
91
-
92
- agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
93
- print(agent_code)
94
 
95
- # 2. Fetch Questions
96
- print(f"Fetching questions from: {questions_url}")
97
- try:
98
- response = requests.get(questions_url, timeout=15)
99
- response.raise_for_status()
100
- questions_data = response.json()
101
- if not questions_data:
102
- print("Fetched questions list is empty.")
103
- return "Fetched questions list is empty or invalid format.", None
104
- print(f"Fetched {len(questions_data)} questions.")
105
- except requests.exceptions.RequestException as e:
106
- print(f"Error fetching questions: {e}")
107
- return f"Error fetching questions: {e}", None
108
- except requests.exceptions.JSONDecodeError as e:
109
- print(f"Error decoding JSON response from questions endpoint: {e}")
110
- print(f"Response text: {response.text[:500]}")
111
- return f"Error decoding server response for questions: {e}", None
112
- except Exception as e:
113
- print(f"An unexpected error occurred fetching questions: {e}")
114
- return f"An unexpected error occurred fetching questions: {e}", None
115
-
116
- # 3. Run your Agent
117
- results_log = []
118
- answers_payload = []
119
- print(f"Running agent on {len(questions_data)} questions...")
120
- for item in questions_data:
121
- task_id = item.get("task_id")
122
- question_text = item.get("question")
123
- if not task_id or question_text is None:
124
- print(f"Skipping item with missing task_id or question: {item}")
125
- continue
126
- try:
127
- submitted_answer = agent(question_text)
128
- answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
129
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
130
- except Exception as e:
131
- print(f"Error running agent on task {task_id}: {e}")
132
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
133
-
134
- if not answers_payload:
135
- print("Agent did not produce any answers to submit.")
136
- return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
137
-
138
- # 4. Prepare Submission
139
- submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
140
- status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
141
- print(status_update)
142
- # 5. Submit
143
- print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
144
- try:
145
- response = requests.post(submit_url, json=submission_data, timeout=60)
146
- response.raise_for_status()
147
- result_data = response.json()
148
- final_status = (
149
- f"Submission Successful!\n"
150
- f"User: {result_data.get('username')}\n"
151
- f"Overall Score: {result_data.get('score', 'N/A')}% "
152
- f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
153
- f"Message: {result_data.get('message', 'No message received.')}"
154
- )
155
- print("Submission successful.")
156
- results_df = pd.DataFrame(results_log)
157
- return final_status, results_df
158
- except requests.exceptions.HTTPError as e:
159
- error_detail = f"Server responded with status {e.response.status_code}."
160
- try:
161
- error_json = e.response.json()
162
- error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
163
- except requests.exceptions.JSONDecodeError:
164
- error_detail += f" Response: {e.response.text[:500]}"
165
- status_message = f"Submission Failed: {error_detail}"
166
- print(status_message)
167
- results_df = pd.DataFrame(results_log)
168
- return status_message, results_df
169
- except requests.exceptions.Timeout:
170
- status_message = "Submission Failed: The request timed out."
171
- print(status_message)
172
- results_df = pd.DataFrame(results_log)
173
- return status_message, results_df
174
- except requests.exceptions.RequestException as e:
175
- status_message = f"Submission Failed: Network error - {e}"
176
- print(status_message)
177
- results_df = pd.DataFrame(results_log)
178
- return status_message, results_df
179
- except Exception as e:
180
- status_message = f"An unexpected error occurred during submission: {e}"
181
- print(status_message)
182
- results_df = pd.DataFrame(results_log)
183
- return status_message, results_df
184
-
185
- # --- Build Gradio Interface using Blocks ---
186
  with gr.Blocks() as demo:
187
- gr.Markdown("# Basic Agent Evaluation Runner")
188
- gr.Markdown(
189
- """
190
- **Instructions:**
191
- 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
192
- 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
193
- 3. Select your preferred provider and click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
194
- ---
195
- **Disclaimers:**
196
- Once clicking on the "submit" button, it can take quite some time (this is the time for the agent to go through all the questions).
197
- This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance, for the delay process of the submit button, a solution could be to cache the answers and submit in a separate action or even to answer the questions in async.
198
- """
199
- )
200
-
201
- gr.LoginButton()
202
-
203
- provider_dropdown = gr.Dropdown(
204
- choices=["nvidia", "groq", "google", "openai"],
205
- value="nvidia",
206
- label="Choose LLM Provider"
207
  )
208
-
209
- run_button = gr.Button("Run Evaluation & Submit All Answers")
210
-
211
- status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
212
- results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
213
-
214
- run_button.click(
215
- fn=lambda profile, provider: run_and_submit_all(profile, provider),
216
- inputs=[gr.OAuthProfile(), provider_dropdown],
217
- outputs=[status_output, results_table]
218
  )
219
 
220
  if __name__ == "__main__":
221
- print("\n" + "-"*30 + " App Starting " + "-"*30)
222
- space_host_startup = os.getenv("SPACE_HOST")
223
- space_id_startup = os.getenv("SPACE_ID")
224
-
225
- if space_host_startup:
226
- print(f"✅ SPACE_HOST found: {space_host_startup}")
227
- print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
228
- else:
229
- print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
230
-
231
- if space_id_startup:
232
- print(f"✅ SPACE_ID found: {space_id_startup}")
233
- print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
234
- print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
235
- else:
236
- print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
237
-
238
- print("-"*(60 + len(" App Starting ")) + "\n")
239
-
240
- print("Launching Gradio Interface for Basic Agent Evaluation...")
241
- demo.launch(debug=True, share=False)
242
-
 
1
  import os
2
  import gradio as gr
 
 
3
  from dotenv import load_dotenv
4
+ from langgraph.graph import START, StateGraph, MessagesState
5
+ from langgraph.prebuilt import tools_condition, ToolNode
 
6
  from langchain_google_genai import ChatGoogleGenerativeAI
7
+ from langchain_groq import ChatGroq
8
+ from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
9
+ from langchain_community.embeddings import HuggingFaceEmbeddings
10
+ from langchain_community.tools.tavily_search import TavilySearchResults
11
+ from langchain_community.document_loaders import WikipediaLoader, ArxivLoader
12
+ from langchain_community.vectorstores import SupabaseVectorStore
13
+ from langchain_core.messages import SystemMessage, HumanMessage
14
+ from langchain_core.tools import tool
15
+ from supabase import create_client, Client
16
 
17
  # Load environment variables
18
  load_dotenv()
19
 
20
+ # Tool definitions remain unchanged
21
+ @tool
22
+ def multiply(a: int, b: int) -> int:
23
+ return a * b
24
+
25
+ @tool
26
+ def add(a: int, b: int) -> int:
27
+ return a + b
28
+
29
+ @tool
30
+ def subtract(a: int, b: int) -> int:
31
+ return a - b
32
+
33
+ @tool
34
+ def divide(a: int, b: int) -> int:
35
+ if b == 0:
36
+ raise ValueError("Cannot divide by zero.")
37
+ return a / b
38
+
39
+ @tool
40
+ def modulus(a: int, b: int) -> int:
41
+ return a % b
42
+
43
+ @tool
44
+ def wiki_search(query: str) -> str:
45
+ search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
46
+ formatted_search_docs = "\n\n---\n\n".join(
47
+ [f'<Document source="{doc.metadata["source"]}"/>\n{doc.page_content}\n</Document>'
48
+ for doc in search_docs])
49
+ return {"wiki_results": formatted_search_docs}
50
+
51
+ @tool
52
+ def web_search(query: str) -> str:
53
+ search_docs = TavilySearchResults(max_results=3).invoke(query)
54
+ formatted_search_docs = "\n\n---\n\n".join(
55
+ [f'<Document source="{doc.metadata["source"]}"/>\n{doc.page_content}\n</Document>'
56
+ for doc in search_docs])
57
+ return {"web_results": formatted_search_docs}
58
+
59
+ @tool
60
+ def arvix_search(query: str) -> str:
61
+ search_docs = ArxivLoader(query=query, load_max_docs=3).load()
62
+ formatted_search_docs = "\n\n---\n\n".join(
63
+ [f'<Document source="{doc.metadata["source"]}"/>\n{doc.page_content[:1000]}\n</Document>'
64
+ for doc in search_docs])
65
+ return {"arvix_results": formatted_search_docs}
66
+
67
+ # System prompt definition
68
+ SYSTEM_PROMPT = """You are a helpful assistant. For every question, reply with only the answer—no explanation,
69
+ no units, and no extra words. If the answer is a number, just return the number.
70
+ If it is a word or phrase, return only that. If it is a list, return a comma-separated list with no extra words.
71
+ Do not include any prefix, suffix, or explanation."""
72
+ sys_msg = SystemMessage(content=SYSTEM_PROMPT)
73
+
74
+ # Initialize vector store
75
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
76
+ supabase: Client = create_client(
77
+ os.environ["SUPABASE_URL"],
78
+ os.environ["SUPABASE_SERVICE_KEY"]
79
+ )
80
+ vector_store = SupabaseVectorStore(
81
+ client=supabase,
82
+ embedding=embeddings,
83
+ table_name="documents",
84
+ query_name="match_documents_langchain",
85
+ )
86
+
87
+ tools = [multiply, add, subtract, divide, modulus,
88
+ wiki_search, web_search, arvix_search]
89
+
90
+ # Build graph function with multi-provider support
91
+ def build_graph(provider: str = "groq"):
92
+ # Provider selection
93
+ if provider == "google":
94
+ llm = ChatGoogleGenerativeAI(
95
+ model="gemini-2.0-flash",
96
+ temperature=0,
97
+ api_key=os.getenv("GOOGLE_API_KEY")
98
+ )
99
+ elif provider == "groq":
100
+ llm = ChatGroq(
101
+ model="llama3-70b-8192",
102
+ temperature=0,
103
+ api_key=os.getenv("GROQ_API_KEY")
104
+ )
105
+ elif provider == "huggingface":
106
+ llm = ChatHuggingFace(
107
+ llm=HuggingFaceEndpoint(
108
+ endpoint_url="https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2",
109
+ temperature=0,
110
+ api_key=os.getenv("HF_API_KEY")
111
  )
 
 
 
 
 
 
 
 
112
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  else:
114
+ raise ValueError("Invalid provider. Choose 'google', 'groq' or 'huggingface'.")
115
+
116
+ llm_with_tools = llm.bind_tools(tools)
117
+
118
+ # Graph nodes
119
+ def retriever(state: MessagesState):
120
+ similar_question = vector_store.similarity_search(state["messages"][-1].content, k=1)
121
+ if similar_question:
122
+ example_msg = HumanMessage(content=f"Similar reference: {similar_question[0].page_content[:200]}...")
123
+ return {"messages": state["messages"] + [example_msg]}
124
+ return {"messages": state["messages"]}
125
+
126
+ def assistant(state: MessagesState):
127
+ return {"messages": [llm_with_tools.invoke(state["messages"])]}
128
+
129
+ # Build graph
130
+ builder = StateGraph(MessagesState)
131
+ builder.add_node("retriever", retriever)
132
+ builder.add_node("assistant", assistant)
133
+ builder.add_node("tools", ToolNode(tools))
134
+
135
+ builder.add_edge(START, "retriever")
136
+ builder.add_edge("retriever", "assistant")
137
+ builder.add_conditional_edges(
138
+ "assistant",
139
+ tools_condition,
140
+ )
141
+ builder.add_edge("tools", "assistant")
142
+
143
+ return builder.compile()
144
 
145
+ # Gradio interface
146
+ def run_agent(question, provider):
147
  try:
148
+ graph = build_graph(provider)
149
+ messages = [HumanMessage(content=question)]
150
+ result = graph.invoke({"messages": messages})
151
+ final_answer = result["messages"][-1].content
152
+ return final_answer
153
  except Exception as e:
154
+ return f"Error: {str(e)}"
 
 
 
 
155
 
156
+ # Create Gradio interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  with gr.Blocks() as demo:
158
+ gr.Markdown("## LangGraph Multi-Provider Agent")
159
+
160
+ provider = gr.Dropdown(
161
+ choices=["groq", "google", "huggingface"],
162
+ value="groq",
163
+ label="LLM Provider"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  )
165
+
166
+ question = gr.Textbox(label="Your Question")
167
+ submit_btn = gr.Button("Run Agent")
168
+ output = gr.Textbox(label="Agent Response", interactive=False)
169
+
170
+ submit_btn.click(
171
+ fn=run_agent,
172
+ inputs=[question, provider],
173
+ outputs=output
 
174
  )
175
 
176
  if __name__ == "__main__":
177
+ demo.launch()