triflix commited on
Commit
f9990dd
·
verified ·
1 Parent(s): 93ab69f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +239 -95
app.py CHANGED
@@ -1,120 +1,264 @@
1
- from fastapi import FastAPI, File, UploadFile, Form
2
- from fastapi.responses import JSONResponse, StreamingResponse
3
- from fastapi.middleware.cors import CORSMiddleware
 
 
 
 
 
 
 
 
4
  import pandas as pd
 
 
 
 
 
5
  from google import genai
6
  from google.genai import types
7
- import os
8
- import json
9
- import asyncio
10
-
11
- # -------------------------------
12
- # 🔑 Configuration
13
- # -------------------------------
14
- API_KEY = os.getenv("GEMINI_API_KEY", "AIzaSyB1jgGCuzg7ELPwNEEwaluQZoZhxhgLmAs")
15
- MODEL = "gemini-2.5-flash-lite"
16
- client = genai.Client(api_key=API_KEY)
17
-
18
- # -------------------------------
19
- # FastAPI Setup
20
- # -------------------------------
21
- app = FastAPI()
22
-
23
- # Enable CORS
24
- app.add_middleware(
25
- CORSMiddleware,
26
- allow_origins=["*"],
27
- allow_methods=["*"],
28
- allow_headers=["*"],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  )
30
 
31
- # -------------------------------
32
- # 🛠️ Helper Functions
33
- # -------------------------------
34
- def get_metadata(df: pd.DataFrame):
35
- # Convert all timestamps to string to avoid JSON serialization issues
36
- df_serializable = df.copy()
37
- for col in df_serializable.select_dtypes(include=['datetime64[ns]']).columns:
38
- df_serializable[col] = df_serializable[col].astype(str)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  return {
40
- "columns": list(df_serializable.columns),
41
- "dtypes": df_serializable.dtypes.apply(lambda x: str(x)).to_dict(),
42
- "num_rows": df_serializable.shape[0],
43
- "num_cols": df_serializable.shape[1],
44
- "null_counts": df_serializable.isnull().sum().to_dict(),
45
- "unique_counts": df_serializable.nunique().to_dict(),
46
- "sample_rows": df_serializable.head(3).to_dict(orient="records"),
47
  }
48
 
49
- async def stream_insights(user_query, metadata):
50
- """Stream insights step by step."""
51
- yield json.dumps({"status": "started", "message": "File received. Extracting metadata..."}) + "\n"
52
- await asyncio.sleep(0.5)
 
 
 
53
 
54
- yield json.dumps({"status": "metadata", "metadata": metadata}) + "\n"
55
- await asyncio.sleep(0.5)
56
 
57
- # Gemini system prompt
58
- system_prompt = """
59
- You are a data analysis assistant.
60
- Always return JSON with this schema:
61
  {
62
- "excel_info": {...},
63
- "data_type_context": "...",
64
- "auto_insights": {
65
- "insights": [
66
- {... Efficiency Analysis ...},
67
- {... Cumulative Performance ...},
68
- {... Process Issues ...},
69
- {... Planning vs Projection ...},
70
- {... Loss Analysis ...}
71
- ]
72
- },
73
- "query_insights": {...}
74
  }
75
  """
76
- user_prompt = f"Dataset metadata: {metadata}\nUser request: {user_query}"
77
 
78
- contents = [types.Content(role="user", parts=[types.Part.from_text(text=user_prompt)])]
79
- config = types.GenerateContentConfig(
80
- temperature=0,
81
- max_output_tokens=2000,
82
- system_instruction=[types.Part.from_text(text=system_prompt)],
83
- )
 
 
 
 
 
 
 
 
 
 
84
 
85
- result = ""
86
- for chunk in client.models.generate_content_stream(model=MODEL, contents=contents, config=config):
87
- if chunk.text:
88
- result += chunk.text
 
 
 
 
 
 
89
 
90
  try:
91
- parsed = json.loads(result)
92
- except Exception:
93
- yield json.dumps({"status": "error", "raw_output": result}) + "\n"
94
- return
 
 
 
 
 
 
 
95
 
96
- yield json.dumps({"status": "excel_info", "excel_info": parsed.get("excel_info", {})}) + "\n"
97
- await asyncio.sleep(0.5)
 
98
 
99
- yield json.dumps({"status": "context", "data_type_context": parsed.get("data_type_context", "")}) + "\n"
100
- await asyncio.sleep(0.5)
 
 
 
 
 
 
101
 
102
- for insight in parsed.get("auto_insights", {}).get("insights", []):
103
- yield json.dumps({"status": "insight", "insight": insight}) + "\n"
104
- await asyncio.sleep(0.5)
105
 
106
- yield json.dumps({"status": "query", "query_insights": parsed.get("query_insights", {})}) + "\n"
107
- yield json.dumps({"status": "completed", "message": "All insights generated"}) + "\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
 
109
- # -------------------------------
110
- # 🌐 Routes
111
- # -------------------------------
112
- @app.post("/stream_insights")
113
- async def stream_insight_file(file: UploadFile = File(...), query: str = Form("Analyze the dataset")):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  try:
115
- df = pd.read_excel(file.file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  except Exception as e:
117
- return JSONResponse({"success": False, "error": f"Failed to read file: {str(e)}"})
 
 
118
 
119
- metadata = get_metadata(df)
120
- return StreamingResponse(stream_insights(query, metadata), media_type="application/json")
 
 
1
+ # -----------------------------
2
+ # Imports
3
+ # -----------------------------
4
+ import os
5
+ import uuid
6
+ import json
7
+ import logging
8
+ import subprocess
9
+ import sys
10
+ from pathlib import Path
11
+
12
  import pandas as pd
13
+ from dotenv import load_dotenv
14
+ from fastapi import FastAPI, UploadFile, File, HTTPException, Body
15
+ from fastapi.responses import JSONResponse
16
+ from pydantic import BaseModel, Field
17
+
18
  from google import genai
19
  from google.genai import types
20
+
21
+ # -----------------------------
22
+ # Initial Configuration
23
+ # -----------------------------
24
+
25
+ # Load environment variables from .env file
26
+ load_dotenv()
27
+
28
+ # Set up logging
29
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
30
+ logger = logging.getLogger(__name__)
31
+
32
+ # Create an 'uploads' directory if it doesn't exist
33
+ UPLOADS_DIR = Path("uploads")
34
+ UPLOADS_DIR.mkdir(exist_ok=True)
35
+
36
+ # -----------------------------
37
+ # Initialize Gemini Client & FastAPI App
38
+ # -----------------------------
39
+
40
+ # Configure the Gemini client with the API key from environment variables
41
+ try:
42
+ api_key = "AIzaSyB1jgGCuzg7ELPwNEEwaluQZoZhxhgLmAs"
43
+ if not api_key:
44
+ raise ValueError("GOOGLE_API_KEY not found in environment variables.")
45
+ genai.configure(api_key=api_key)
46
+ logger.info("Google GenAI client configured successfully.")
47
+ except Exception as e:
48
+ logger.error(f"Failed to configure Google GenAI client: {e}")
49
+ # We exit if the client can't be configured as the app is useless without it.
50
+ sys.exit(1)
51
+
52
+ # Initialize FastAPI app
53
+ app = FastAPI(
54
+ title="Data Analysis and Visualization API",
55
+ description="An API to analyze Excel files and generate Python code for visualizations using Google's Gemini.",
56
+ version="1.0.0"
57
  )
58
 
59
+ # -----------------------------
60
+ # Pydantic Models for API I/O
61
+ # -----------------------------
62
+
63
+ class AnalysisResponse(BaseModel):
64
+ file_id: str = Field(..., description="Unique identifier for the uploaded file.")
65
+ summary: str = Field(..., description="AI-generated summary of the data.")
66
+ suggestions: list[str] = Field(..., description="List of AI-generated analysis/visualization suggestions.")
67
+
68
+ class VisualizationRequest(BaseModel):
69
+ file_id: str = Field(..., description="The unique identifier of the file to be visualized.")
70
+ command: str = Field(..., description="The selected suggestion/command from the analysis step.")
71
+
72
+ class VisualizationResponse(BaseModel):
73
+ type: str = Field(..., description="The type of visualization (e.g., 'bar', 'pie').")
74
+ explanation: str = Field(..., description="A one-sentence description of the visualization.")
75
+ data: dict | list = Field(..., description="The numeric JSON data produced by the executed code.")
76
+ generated_code: str = Field(..., description="The Python code that was generated and executed.")
77
+
78
+
79
+ # -----------------------------
80
+ # Helper Functions (Adapted from your script)
81
+ # -----------------------------
82
+
83
+ def get_metadata(df: pd.DataFrame) -> dict:
84
+ """Extracts metadata from a pandas DataFrame."""
85
  return {
86
+ "columns": list(df.columns),
87
+ "dtypes": df.dtypes.apply(str).to_dict(),
88
+ "null_counts": df.isnull().sum().to_dict(),
89
+ "unique_counts": df.nunique().to_dict(),
90
+ "sample_rows": df.head(3).to_dict(orient="records")
 
 
91
  }
92
 
93
+ def generate_metadata_analysis(metadata: dict) -> dict:
94
+ """Generates a JSON summary and suggestions from metadata using Gemini."""
95
+ metadata_text = json.dumps(metadata, indent=2)
96
+ model = "gemini-pro" # Using gemini-pro as it's better for this kind of structured generation
97
+
98
+ system_instruction = """
99
+ You are a structured data analysis AI. Your output must be strict JSON.
100
 
101
+ 1. Summary:
102
+ Provide a concise description of what kind of data this is, what it likely represents, and its domain or use-case. Indicate assumptions if needed.
103
 
104
+ 2. Suggestions:
105
+ Provide exactly three actionable analyses and visualizations based on the metadata. For each, specify the columns to use and the type of insight to be gained.
106
+
107
+ Respond in this exact JSON format:
108
  {
109
+ "summary": "<short summary>",
110
+ "suggestions": ["<analysis #1>", "<analysis #2>", "<analysis #3>"]
 
 
 
 
 
 
 
 
 
 
111
  }
112
  """
 
113
 
114
+ try:
115
+ response = genai.GenerativeModel(
116
+ model_name=model,
117
+ system_instruction=system_instruction
118
+ ).generate_content(
119
+ f"Analyze the following structured data metadata:\n{metadata_text}",
120
+ generation_config=types.GenerationConfig(response_mime_type="application/json")
121
+ )
122
+ return json.loads(response.text)
123
+ except Exception as e:
124
+ logger.error(f"Error generating metadata analysis from Gemini: {e}")
125
+ raise HTTPException(status_code=500, detail="Failed to get analysis from AI model.")
126
+
127
+ def generate_visualization_code(file_path: str, command: str) -> dict:
128
+ """Generates Python code for visualization based on a user command."""
129
+ model = "gemini-pro"
130
 
131
+ system_instruction = f"""
132
+ You are a Python assistant that MUST return output strictly in JSON format and NOTHING else.
133
+ The top-level JSON MUST contain exactly three keys in this order: "type", "code", "explanation".
134
+
135
+ Requirements:
136
+ - "type": The suggested visualization type as a lowercase string (e.g., "bar", "pie", "line", "scatter").
137
+ - "code": A string of Python code. This code MUST print a JSON object to standard output. The JSON should contain the data needed for the plot. Use pandas to process the data.
138
+ - The code must access the data using this exact line: df = pd.read_excel(r"{file_path}")
139
+ - "explanation": A concise, one-sentence description of what the visualization shows.
140
+ """
141
 
142
  try:
143
+ response = genai.GenerativeModel(
144
+ model_name=model,
145
+ system_instruction=system_instruction
146
+ ).generate_content(
147
+ f"Generate Python code to create a {command}",
148
+ generation_config=types.GenerationConfig(response_mime_type="application/json")
149
+ )
150
+ return json.loads(response.text)
151
+ except Exception as e:
152
+ logger.error(f"Error generating visualization code from Gemini: {e}")
153
+ raise HTTPException(status_code=500, detail="Failed to generate visualization code from AI model.")
154
 
155
+ # -----------------------------
156
+ # API Endpoints
157
+ # -----------------------------
158
 
159
+ @app.post("/analyze", response_model=AnalysisResponse)
160
+ async def analyze_file(file: UploadFile = File(...)):
161
+ """
162
+ Upload an Excel file, get its metadata, and receive an AI-generated
163
+ summary and a list of visualization suggestions.
164
+ """
165
+ if not file.filename.endswith(('.xlsx', '.xls')):
166
+ raise HTTPException(status_code=400, detail="Invalid file type. Please upload an Excel file.")
167
 
168
+ file_id = str(uuid.uuid4())
169
+ file_path = UPLOADS_DIR / f"{file_id}_{file.filename}"
 
170
 
171
+ try:
172
+ # Save the uploaded file
173
+ with open(file_path, "wb") as buffer:
174
+ buffer.write(await file.read())
175
+ logger.info(f"File '{file.filename}' saved as '{file_path.name}'")
176
+
177
+ # Process the file
178
+ df = pd.read_excel(file_path)
179
+ metadata = get_metadata(df)
180
+ logger.info(f"Metadata extracted for file_id: {file_id}")
181
+
182
+ analysis = generate_metadata_analysis(metadata)
183
+ logger.info(f"Metadata analysis generated for file_id: {file_id}")
184
+
185
+ return AnalysisResponse(
186
+ file_id=file_id,
187
+ summary=analysis.get("summary", "No summary provided."),
188
+ suggestions=analysis.get("suggestions", [])
189
+ )
190
+
191
+ except Exception as e:
192
+ logger.error(f"An error occurred during file analysis: {e}")
193
+ # Clean up the saved file in case of an error
194
+ if file_path.exists():
195
+ os.remove(file_path)
196
+ raise HTTPException(status_code=500, detail=f"An internal error occurred: {e}")
197
 
198
+
199
+ @app.post("/visualize", response_model=VisualizationResponse)
200
+ async def visualize_data(request: VisualizationRequest):
201
+ """
202
+ Generate and execute Python code for a visualization based on a file_id
203
+ and a selected command from the analysis step.
204
+ """
205
+ # Find the file corresponding to the file_id
206
+ matching_files = list(UPLOADS_DIR.glob(f"{request.file_id}_*"))
207
+ if not matching_files:
208
+ logger.error(f"File with ID '{request.file_id}' not found.")
209
+ raise HTTPException(status_code=404, detail="File not found. Please re-upload the file.")
210
+
211
+ file_path = matching_files[0]
212
+ logger.info(f"Found file '{file_path}' for file_id '{request.file_id}'")
213
+
214
+ # Generate the visualization code from Gemini
215
+ agent_output = generate_visualization_code(str(file_path), request.command)
216
+ code_to_run = agent_output.get("code")
217
+
218
+ if not code_to_run:
219
+ raise HTTPException(status_code=500, detail="AI model failed to generate valid code.")
220
+
221
+ logger.info(f"Code generated for command: '{request.command}'")
222
+
223
+ # --- Safe Code Execution using subprocess ---
224
  try:
225
+ logger.info("Executing generated code in a sandboxed subprocess...")
226
+ process = subprocess.run(
227
+ [sys.executable, "-c", code_to_run],
228
+ capture_output=True,
229
+ text=True,
230
+ check=True, # Raises CalledProcessError for non-zero exit codes
231
+ timeout=15 # Add a timeout for safety
232
+ )
233
+
234
+ # The output from the script is expected to be a JSON string
235
+ stdout = process.stdout.strip()
236
+ logger.info(f"Code executed successfully. Stdout: {stdout[:200]}...") # Log first 200 chars
237
+
238
+ # Parse the JSON output from the executed code
239
+ chart_data = json.loads(stdout)
240
+
241
+ return VisualizationResponse(
242
+ type=agent_output.get("type", "unknown"),
243
+ explanation=agent_output.get("explanation", "No explanation provided."),
244
+ data=chart_data,
245
+ generated_code=code_to_run
246
+ )
247
+
248
+ except subprocess.CalledProcessError as e:
249
+ logger.error(f"Error executing generated code. Stderr: {e.stderr}")
250
+ raise HTTPException(status_code=500, detail=f"Error during code execution: {e.stderr}")
251
+ except json.JSONDecodeError:
252
+ logger.error(f"Failed to decode JSON from executed code's stdout. Output was: {stdout}")
253
+ raise HTTPException(status_code=500, detail="Generated code did not produce valid JSON output.")
254
+ except subprocess.TimeoutExpired:
255
+ logger.error("Code execution timed out.")
256
+ raise HTTPException(status_code=408, detail="Code execution took too long and was terminated.")
257
  except Exception as e:
258
+ logger.error(f"An unexpected error occurred during visualization: {e}")
259
+ raise HTTPException(status_code=500, detail=f"An unexpected error occurred: {e}")
260
+
261
 
262
+ @app.get("/", include_in_schema=False)
263
+ def root():
264
+ return {"message": "Welcome to the Data Analysis and Visualization API. Visit /docs for more info."}