asrarbw commited on
Commit
8e7badb
·
verified ·
1 Parent(s): d453564

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -30
app.py CHANGED
@@ -1,48 +1,51 @@
1
  import gradio as gr
2
  import os
 
3
  from huggingface_hub import InferenceClient
4
 
5
  # ===============================
6
- # LLM CLIENT
7
  # ===============================
8
  HF_TOKEN = os.getenv("HF")
9
-
10
- client = InferenceClient(model="Qwen/Qwen2.5-7B-Instruct", token=HF_TOKEN )
11
-
12
- # Problem Statement:
13
- # 1. Add your own HF token in the settings to get the LLM working.
14
- # 2. Update requirements.txt, app.py as needed.
15
- # 3. Develop a robust "Text-to-Code" analytical workflow hosted on a Hugging Face Space using the Qwen/Qwen2.5-7B-Instruct model.
16
- # The Workflow Requirements:
17
-
18
- # a.Code Generation (Planner): Transform natural language user queries into executable, sandboxed Python code (specifically using pandas).
19
-
20
- # b.Execution (Action): Securely execute the generated code on the Hugging Face Space server against the uploaded dataset.
21
-
22
- # c.Synthesis (Insight): Capture the raw output of the code execution and feed it back to the LLM to generate a natural language insight.
 
 
 
 
 
23
 
24
  # ===============================
25
- # UI
26
  # ===============================
27
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
28
- gr.Markdown("# 📊 Excel Analysis Agent (Code + Insight)")
29
-
30
- excel_file = gr.File(
31
- label="Upload Excel File",
32
- file_types=[".xlsx"]
33
- )
34
-
 
 
35
  gr.ChatInterface(
36
  fn=analyze_excel,
37
  additional_inputs=[excel_file],
38
  type="messages",
39
- examples=[
40
- ["Which provider has the highest average claim amount?", None],
41
- ["How many unique members are there?", None],
42
- ["Explain trends in processing time across regions.", None],
43
- ["Show python code to compute correlation between ClaimAmount and ProcessingCost.", None],
44
- ],
45
  )
46
 
47
  if __name__ == "__main__":
48
- demo.launch()
 
1
  import gradio as gr
2
  import os
3
+ import pandas as pd
4
  from huggingface_hub import InferenceClient
5
 
6
  # ===============================
7
+ # LLM CLIENT SETUP
8
  # ===============================
9
  HF_TOKEN = os.getenv("HF")
10
+ client = InferenceClient(model="Qwen/Qwen2.5-7B-Instruct", token=HF_TOKEN)
11
+
12
+ def analyze_excel(message, history, file):
13
+ """
14
+ Problem Statement:
15
+ 1. Add your own HF token in the settings to get the LLM working.
16
+ 2. Update requirements.txt, app.py as needed.
17
+ 3. Develop a robust "Text-to-Code" analytical workflow.
18
+
19
+ Requirements:
20
+ a.Code Generation : Transform natural language user queries into executable, sandboxed Python code (specifically using pandas).
21
+ b.Execution : Securely execute the generated code on the Hugging Face Space server against the uploaded dataset.
22
+ c.Synthesis : Capture the raw output of the code execution and feed it back to the LLM to generate a natural language insight.
23
+ """
24
+ if file is None:
25
+ return "Please upload an Excel file to begin."
26
+
27
+ # The function needs a return here to avoid a NoneType error in Gradio
28
+ return "File received! Candidate: Implement the Planner-Action-Synthesis logic here."
29
 
30
  # ===============================
31
+ # UI CONFIGURATION
32
  # ===============================
33
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
34
+ gr.Markdown("# 📊 Technical Assessment: Data Analysis Agent")
35
+ gr.Markdown("### Objective: Build a Text-to-Code workflow using Qwen 2.5")
36
+
37
+ with gr.Row():
38
+ excel_file = gr.File(
39
+ label="1. Upload Dataset (.xlsx)",
40
+ file_types=[".xlsx"]
41
+ )
42
+
43
  gr.ChatInterface(
44
  fn=analyze_excel,
45
  additional_inputs=[excel_file],
46
  type="messages",
47
+ description="2. Ask questions about your data (e.g., 'What is the average profit by region?')",
 
 
 
 
 
48
  )
49
 
50
  if __name__ == "__main__":
51
+ demo.launch()