techy-ai commited on
Commit
1ea0be0
·
1 Parent(s): c949e67

build error for audio fixed

Browse files
Files changed (4) hide show
  1. Gradio_UI.py +79 -0
  2. README.md +1 -1
  3. app.py +163 -73
  4. evaluation_app.py +0 -169
Gradio_UI.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import json
3
+ from agent import build_graph
4
+ from langchain_core.messages import HumanMessage
5
+
6
+ # Load questions from metadata.jsonl
7
+ def load_questions(jsonl_path):
8
+ questions = []
9
+ with open(jsonl_path, 'r', encoding='utf-8') as f:
10
+ for line in f:
11
+ try:
12
+ obj = json.loads(line)
13
+ if 'Question' in obj:
14
+ questions.append(obj['Question'])
15
+ except Exception:
16
+ continue
17
+ return questions
18
+
19
+ questions_list = load_questions('metadata.jsonl')
20
+ # Basic filter for violence/intimate (simple keyword check)
21
+ def is_safe_question(q):
22
+ unsafe_keywords = ['kill', 'murder', 'sex', 'intimate', 'violence', 'abuse']
23
+ q_lower = q.lower()
24
+ return not any(word in q_lower for word in unsafe_keywords)
25
+
26
+ # Backend logic: send question to agent
27
+ def get_answer(question, history):
28
+ if not is_safe_question(question):
29
+ return "Sorry, this question is not allowed.", history
30
+ try:
31
+ graph = build_graph(provider="groq")
32
+ messages = [HumanMessage(content=question)]
33
+ result = graph.invoke({"messages": messages})
34
+ # Get last message as answer
35
+ answer = result["messages"][-1].content if result["messages"] else "No answer."
36
+ history = history + [(question, answer)]
37
+ return answer, history
38
+ except Exception as e:
39
+ return f"Error: {str(e)}", history
40
+
41
+ # Gradio UI
42
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="purple", secondary_hue="purple")) as demo:
43
+ gr.Markdown("# AI Agent Q&A", elem_id="title")
44
+ gr.Markdown("Ask your own question or select one from the list below.")
45
+ with gr.Row():
46
+ question_box = gr.Textbox(label="Type your question", lines=3)
47
+ question_list = gr.Dropdown(choices=questions_list, label="Or choose a question", interactive=True)
48
+ submit_btn = gr.Button("Submit", elem_id="submit-btn")
49
+ reset_btn = gr.Button("Reset", elem_id="reset-btn")
50
+ answer_box = gr.Textbox(label="Answer", interactive=False)
51
+ with gr.Accordion("Show previous Q&A", open=False):
52
+ history_box = gr.Dataframe(headers=["Question", "Answer"], datatype=["str", "str"], interactive=False)
53
+
54
+ state = gr.State([])
55
+ def submit_fn(q_text, q_list, history):
56
+ question = q_text if q_text else q_list
57
+ if not question:
58
+ return "Please enter or select a question.", history
59
+ return get_answer(question, history)
60
+
61
+ def reset_fn():
62
+ return "", []
63
+
64
+ submit_btn.click(
65
+ submit_fn,
66
+ inputs=[question_box, question_list, state],
67
+ outputs=[answer_box, state],
68
+ api_name="submit",
69
+ )
70
+ reset_btn.click(
71
+ reset_fn,
72
+ inputs=[],
73
+ outputs=[answer_box, state],
74
+ api_name="reset",
75
+ )
76
+ state.change(lambda h: h, inputs=state, outputs=history_box)
77
+
78
+ if __name__ == "__main__":
79
+ demo.launch()
README.md CHANGED
@@ -6,7 +6,7 @@ colorTo: pink
6
  sdk: gradio
7
  sdk_version: 5.44.1
8
  hf_oauth: true
9
- app_file: app.py
10
  pinned: false
11
  ---
12
 
 
6
  sdk: gradio
7
  sdk_version: 5.44.1
8
  hf_oauth: true
9
+ app_file: Gradio_UI.py
10
  pinned: false
11
  ---
12
 
app.py CHANGED
@@ -1,79 +1,169 @@
 
1
  import gradio as gr
2
- import json
 
 
 
 
 
 
3
  from agent import build_graph
4
  from langchain_core.messages import HumanMessage
5
 
6
- # Load questions from metadata.jsonl
7
- def load_questions(jsonl_path):
8
- questions = []
9
- with open(jsonl_path, 'r', encoding='utf-8') as f:
10
- for line in f:
11
- try:
12
- obj = json.loads(line)
13
- if 'Question' in obj:
14
- questions.append(obj['Question'])
15
- except Exception:
16
- continue
17
- return questions
18
-
19
- questions_list = load_questions('metadata.jsonl')
20
- # Basic filter for violence/intimate (simple keyword check)
21
- def is_safe_question(q):
22
- unsafe_keywords = ['kill', 'murder', 'sex', 'intimate', 'violence', 'abuse']
23
- q_lower = q.lower()
24
- return not any(word in q_lower for word in unsafe_keywords)
25
-
26
- # Backend logic: send question to agent
27
- def get_answer(question, history):
28
- if not is_safe_question(question):
29
- return "Sorry, this question is not allowed.", history
30
- try:
31
- graph = build_graph(provider="groq")
32
- messages = [HumanMessage(content=question)]
33
- result = graph.invoke({"messages": messages})
34
- # Get last message as answer
35
- answer = result["messages"][-1].content if result["messages"] else "No answer."
36
- history = history + [(question, answer)]
37
- return answer, history
38
- except Exception as e:
39
- return f"Error: {str(e)}", history
40
-
41
- # Gradio UI
42
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="purple", secondary_hue="purple")) as demo:
43
- gr.Markdown("# AI Agent Q&A", elem_id="title")
44
- gr.Markdown("Ask your own question or select one from the list below.")
45
- with gr.Row():
46
- question_box = gr.Textbox(label="Type your question", lines=3)
47
- question_list = gr.Dropdown(choices=questions_list, label="Or choose a question", interactive=True)
48
- submit_btn = gr.Button("Submit", elem_id="submit-btn")
49
- reset_btn = gr.Button("Reset", elem_id="reset-btn")
50
- answer_box = gr.Textbox(label="Answer", interactive=False)
51
- with gr.Accordion("Show previous Q&A", open=False):
52
- history_box = gr.Dataframe(headers=["Question", "Answer"], datatype=["str", "str"], interactive=False)
53
-
54
- state = gr.State([])
55
- def submit_fn(q_text, q_list, history):
56
- question = q_text if q_text else q_list
57
- if not question:
58
- return "Please enter or select a question.", history
59
- return get_answer(question, history)
60
-
61
- def reset_fn():
62
- return "", []
63
-
64
- submit_btn.click(
65
- submit_fn,
66
- inputs=[question_box, question_list, state],
67
- outputs=[answer_box, state],
68
- api_name="submit",
69
- )
70
- reset_btn.click(
71
- reset_fn,
72
- inputs=[],
73
- outputs=[answer_box, state],
74
- api_name="reset",
75
- )
76
- state.change(lambda h: h, inputs=state, outputs=history_box)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
  if __name__ == "__main__":
79
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
  import gradio as gr
3
+ import requests
4
+ import pandas as pd
5
+
6
+ # --- Constants ---
7
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
8
+
9
+ # --- Import your custom agent graph from agent.py ---
10
  from agent import build_graph
11
  from langchain_core.messages import HumanMessage
12
 
13
+ # --- Basic Agent Definition (wrapper around your graph) ---
14
+ class BasicAgent:
15
+ def __init__(self):
16
+ print("Initializing BasicAgent with agent.py graph...")
17
+ self.graph = build_graph()
18
+
19
+ def __call__(self, question: str) -> str:
20
+ print(f"Agent received question: {question[:50]}...")
21
+ try:
22
+ messages = [HumanMessage(content=question)]
23
+ result = self.graph.invoke({"messages": messages})
24
+ answer = result["messages"][-1].content
25
+ if answer.lower().startswith("final answer"):
26
+ answer = answer.split(":", 1)[-1].strip()
27
+ print(f"Agent returning: {answer}")
28
+ return answer
29
+ except Exception as e:
30
+ print(f"Error inside agent: {e}")
31
+ return f"AGENT ERROR: {e}"
32
+
33
+
34
+ def run_and_submit_all(profile: gr.OAuthProfile | None):
35
+ """
36
+ Fetches all questions, runs the BasicAgent on them, submits all answers,
37
+ and displays the results.
38
+ """
39
+ space_id = os.getenv("SPACE_ID")
40
+
41
+ if profile:
42
+ username = f"{profile.username}"
43
+ print(f"User logged in: {username}")
44
+ else:
45
+ print("User not logged in.")
46
+ return "Please Login to Hugging Face with the button.", None
47
+
48
+ api_url = DEFAULT_API_URL
49
+ questions_url = f"{api_url}/questions"
50
+ submit_url = f"{api_url}/submit"
51
+
52
+ # 1. Instantiate Agent
53
+ try:
54
+ agent = BasicAgent()
55
+ except Exception as e:
56
+ print(f"Error instantiating agent: {e}")
57
+ return f"Error initializing agent: {e}", None
58
+
59
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" if space_id else "N/A"
60
+ print(f"Agent code repo: {agent_code}")
61
+
62
+ # 2. Fetch Questions
63
+ print(f"Fetching questions from: {questions_url}")
64
+ try:
65
+ response = requests.get(questions_url, timeout=15)
66
+ response.raise_for_status()
67
+ questions_data = response.json()
68
+ if not questions_data:
69
+ return "Fetched questions list is empty or invalid format.", None
70
+ print(f"Fetched {len(questions_data)} questions.")
71
+ except Exception as e:
72
+ return f"Error fetching questions: {e}", None
73
+
74
+ # 3. Run your Agent
75
+ results_log = []
76
+ answers_payload = []
77
+ print(f"Running agent on {len(questions_data)} questions...")
78
+ for item in questions_data:
79
+ task_id = item.get("task_id")
80
+ question_text = item.get("question")
81
+ if not task_id or question_text is None:
82
+ continue
83
+ submitted_answer = agent(question_text)
84
+ answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
85
+ results_log.append({
86
+ "Task ID": task_id,
87
+ "Question": question_text,
88
+ "Submitted Answer": submitted_answer
89
+ })
90
+
91
+ if not answers_payload:
92
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
93
+
94
+ # 4. Prepare Submission
95
+ submission_data = {
96
+ "username": username.strip(),
97
+ "agent_code": agent_code,
98
+ "answers": answers_payload,
99
+ }
100
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
101
+ print(status_update)
102
+
103
+ # 5. Submit
104
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
105
+ try:
106
+ response = requests.post(submit_url, json=submission_data, timeout=60)
107
+ response.raise_for_status()
108
+ result_data = response.json()
109
+ final_status = (
110
+ f"Submission Successful!\n"
111
+ f"User: {result_data.get('username')}\n"
112
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
113
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
114
+ f"Message: {result_data.get('message', 'No message received.')}"
115
+ )
116
+ results_df = pd.DataFrame(results_log)
117
+ return final_status, results_df
118
+ except Exception as e:
119
+ return f"Submission Failed: {e}", pd.DataFrame(results_log)
120
+
121
+
122
+ # --- Build Gradio Interface using Blocks ---
123
+ with gr.Blocks() as demo:
124
+ gr.Markdown("# Basic Agent Evaluation Runner")
125
+ gr.Markdown(
126
+ """
127
+ **Instructions:**
128
+
129
+ 1. Clone this space and implement your logic in `agent.py`.
130
+ 2. Log in with your Hugging Face account.
131
+ 3. Click **Run Evaluation & Submit All Answers**.
132
+
133
+ ---
134
+ ⚠️ The process may take a while (agent needs to answer all questions).
135
+ """
136
+ )
137
+
138
+ gr.LoginButton()
139
+
140
+ run_button = gr.Button("Run Evaluation & Submit All Answers")
141
+ status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
142
+ results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
143
+
144
+ run_button.click(
145
+ fn=run_and_submit_all,
146
+ outputs=[status_output, results_table]
147
+ )
148
+
149
 
150
  if __name__ == "__main__":
151
+ print("\n--- App Starting ---")
152
+ space_host_startup = os.getenv("SPACE_HOST")
153
+ space_id_startup = os.getenv("SPACE_ID")
154
+
155
+ if space_host_startup:
156
+ print(f"✅ SPACE_HOST: {space_host_startup}")
157
+ print(f" Runtime URL: https://{space_host_startup}.hf.space")
158
+ else:
159
+ print("ℹ️ SPACE_HOST not found (running locally?).")
160
+
161
+ if space_id_startup:
162
+ print(f"✅ SPACE_ID: {space_id_startup}")
163
+ print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
164
+ else:
165
+ print("ℹ️ SPACE_ID not found (running locally?).")
166
+
167
+ print("--------------------\n")
168
+ print("Launching Gradio Interface...")
169
+ demo.launch(debug=True, share=False)
evaluation_app.py DELETED
@@ -1,169 +0,0 @@
1
- import os
2
- import gradio as gr
3
- import requests
4
- import pandas as pd
5
-
6
- # --- Constants ---
7
- DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
8
-
9
- # --- Import your custom agent graph from agent.py ---
10
- from agent import build_graph
11
- from langchain_core.messages import HumanMessage
12
-
13
- # --- Basic Agent Definition (wrapper around your graph) ---
14
- class BasicAgent:
15
- def __init__(self):
16
- print("Initializing BasicAgent with agent.py graph...")
17
- self.graph = build_graph()
18
-
19
- def __call__(self, question: str) -> str:
20
- print(f"Agent received question: {question[:50]}...")
21
- try:
22
- messages = [HumanMessage(content=question)]
23
- result = self.graph.invoke({"messages": messages})
24
- answer = result["messages"][-1].content
25
- if answer.lower().startswith("final answer"):
26
- answer = answer.split(":", 1)[-1].strip()
27
- print(f"Agent returning: {answer}")
28
- return answer
29
- except Exception as e:
30
- print(f"Error inside agent: {e}")
31
- return f"AGENT ERROR: {e}"
32
-
33
-
34
- def run_and_submit_all(profile: gr.OAuthProfile | None):
35
- """
36
- Fetches all questions, runs the BasicAgent on them, submits all answers,
37
- and displays the results.
38
- """
39
- space_id = os.getenv("SPACE_ID")
40
-
41
- if profile:
42
- username = f"{profile.username}"
43
- print(f"User logged in: {username}")
44
- else:
45
- print("User not logged in.")
46
- return "Please Login to Hugging Face with the button.", None
47
-
48
- api_url = DEFAULT_API_URL
49
- questions_url = f"{api_url}/questions"
50
- submit_url = f"{api_url}/submit"
51
-
52
- # 1. Instantiate Agent
53
- try:
54
- agent = BasicAgent()
55
- except Exception as e:
56
- print(f"Error instantiating agent: {e}")
57
- return f"Error initializing agent: {e}", None
58
-
59
- agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" if space_id else "N/A"
60
- print(f"Agent code repo: {agent_code}")
61
-
62
- # 2. Fetch Questions
63
- print(f"Fetching questions from: {questions_url}")
64
- try:
65
- response = requests.get(questions_url, timeout=15)
66
- response.raise_for_status()
67
- questions_data = response.json()
68
- if not questions_data:
69
- return "Fetched questions list is empty or invalid format.", None
70
- print(f"Fetched {len(questions_data)} questions.")
71
- except Exception as e:
72
- return f"Error fetching questions: {e}", None
73
-
74
- # 3. Run your Agent
75
- results_log = []
76
- answers_payload = []
77
- print(f"Running agent on {len(questions_data)} questions...")
78
- for item in questions_data:
79
- task_id = item.get("task_id")
80
- question_text = item.get("question")
81
- if not task_id or question_text is None:
82
- continue
83
- submitted_answer = agent(question_text)
84
- answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
85
- results_log.append({
86
- "Task ID": task_id,
87
- "Question": question_text,
88
- "Submitted Answer": submitted_answer
89
- })
90
-
91
- if not answers_payload:
92
- return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
93
-
94
- # 4. Prepare Submission
95
- submission_data = {
96
- "username": username.strip(),
97
- "agent_code": agent_code,
98
- "answers": answers_payload,
99
- }
100
- status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
101
- print(status_update)
102
-
103
- # 5. Submit
104
- print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
105
- try:
106
- response = requests.post(submit_url, json=submission_data, timeout=60)
107
- response.raise_for_status()
108
- result_data = response.json()
109
- final_status = (
110
- f"Submission Successful!\n"
111
- f"User: {result_data.get('username')}\n"
112
- f"Overall Score: {result_data.get('score', 'N/A')}% "
113
- f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
114
- f"Message: {result_data.get('message', 'No message received.')}"
115
- )
116
- results_df = pd.DataFrame(results_log)
117
- return final_status, results_df
118
- except Exception as e:
119
- return f"Submission Failed: {e}", pd.DataFrame(results_log)
120
-
121
-
122
- # --- Build Gradio Interface using Blocks ---
123
- with gr.Blocks() as demo:
124
- gr.Markdown("# Basic Agent Evaluation Runner")
125
- gr.Markdown(
126
- """
127
- **Instructions:**
128
-
129
- 1. Clone this space and implement your logic in `agent.py`.
130
- 2. Log in with your Hugging Face account.
131
- 3. Click **Run Evaluation & Submit All Answers**.
132
-
133
- ---
134
- ⚠️ The process may take a while (agent needs to answer all questions).
135
- """
136
- )
137
-
138
- gr.LoginButton()
139
-
140
- run_button = gr.Button("Run Evaluation & Submit All Answers")
141
- status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
142
- results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
143
-
144
- run_button.click(
145
- fn=run_and_submit_all,
146
- outputs=[status_output, results_table]
147
- )
148
-
149
-
150
- if __name__ == "__main__":
151
- print("\n--- App Starting ---")
152
- space_host_startup = os.getenv("SPACE_HOST")
153
- space_id_startup = os.getenv("SPACE_ID")
154
-
155
- if space_host_startup:
156
- print(f"✅ SPACE_HOST: {space_host_startup}")
157
- print(f" Runtime URL: https://{space_host_startup}.hf.space")
158
- else:
159
- print("ℹ️ SPACE_HOST not found (running locally?).")
160
-
161
- if space_id_startup:
162
- print(f"✅ SPACE_ID: {space_id_startup}")
163
- print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
164
- else:
165
- print("ℹ️ SPACE_ID not found (running locally?).")
166
-
167
- print("--------------------\n")
168
- print("Launching Gradio Interface...")
169
- demo.launch(debug=True, share=False)