Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,7 @@ import logging
|
|
5 |
import plotly.express as px
|
6 |
import plotly.graph_objects as go
|
7 |
from sklearn.ensemble import IsolationForest
|
8 |
-
from concurrent.futures import ThreadPoolExecutor
|
9 |
import os
|
10 |
import io
|
11 |
import time
|
@@ -314,70 +314,22 @@ def generate_pdf_content(summary, preview_df, anomalies, amc_reminders, insights
|
|
314 |
logging.error(f"Failed to generate PDF: {str(e)}")
|
315 |
return None
|
316 |
|
317 |
-
# Update filters
|
318 |
-
def update_filters(current_file_path, cached_df_state):
|
319 |
-
try:
|
320 |
-
if not current_file_path or cached_df_state is None:
|
321 |
-
logging.warning("No file or cached DataFrame available for filter update")
|
322 |
-
return gr.update(choices=['All'], value='All'), gr.update(choices=['All'], value='All')
|
323 |
-
|
324 |
-
df = cached_df_state
|
325 |
-
if df.empty:
|
326 |
-
logging.warning("Cached DataFrame is empty")
|
327 |
-
return gr.update(choices=['All'], value='All'), gr.update(choices=['All'], value='All')
|
328 |
-
|
329 |
-
lab_site_options = ['All']
|
330 |
-
if 'lab_site' in df.columns:
|
331 |
-
sites = df['lab_site'].dropna().astype(str).unique().tolist()
|
332 |
-
lab_site_options.extend([site for site in sites if site.strip()])
|
333 |
-
logging.info(f"Lab site options populated: {lab_site_options}")
|
334 |
-
|
335 |
-
equipment_type_options = ['All']
|
336 |
-
if 'equipment_type' in df.columns:
|
337 |
-
types = df['equipment_type'].dropna().astype(str).unique().tolist()
|
338 |
-
equipment_type_options.extend([t for t in types if t.strip()])
|
339 |
-
logging.info(f"Equipment type options populated: {equipment_type_options}")
|
340 |
-
|
341 |
-
if len(lab_site_options) == 1:
|
342 |
-
logging.warning("No valid lab_site values found in DataFrame")
|
343 |
-
if len(equipment_type_options) == 1:
|
344 |
-
logging.warning("No valid equipment_type values found in DataFrame")
|
345 |
-
|
346 |
-
return gr.update(choices=lab_site_options, value='All'), gr.update(choices=equipment_type_options, value='All')
|
347 |
-
except Exception as e:
|
348 |
-
logging.error(f"Failed to update filters: {str(e)}")
|
349 |
-
return gr.update(choices=['All'], value='All'), gr.update(choices=['All'], value='All')
|
350 |
-
|
351 |
# Main processing function
|
352 |
-
async def process_logs(file_obj, lab_site_filter, equipment_type_filter, date_range, last_modified_state, cached_df_state, cached_filtered_df_state
|
353 |
start_time = time.time()
|
354 |
try:
|
355 |
-
if not file_obj
|
356 |
-
|
357 |
-
return "No file uploaded.", pd.DataFrame(), None, '<p>No device cards available.</p>', None, None, None, None, "No anomalies detected.", "No AMC reminders.", "No insights generated.", None, last_modified_state, cached_df_state, cached_filtered_df_state, current_file_path
|
358 |
-
|
359 |
-
file_path = file_obj.name if file_obj else current_file_path
|
360 |
-
if not os.path.exists(file_path):
|
361 |
-
logging.error(f"File path does not exist: {file_path}")
|
362 |
-
return "File not found.", pd.DataFrame(), None, '<p>No device cards available.</p>', None, None, None, None, "No anomalies detected.", "No AMC reminders.", "No insights generated.", None, last_modified_state, cached_df_state, cached_filtered_df_state, current_file_path
|
363 |
|
|
|
364 |
current_modified_time = os.path.getmtime(file_path)
|
365 |
-
|
366 |
-
|
367 |
-
# Check if we can use cached filtered data
|
368 |
-
if (last_modified_state == current_modified_time and
|
369 |
-
cached_filtered_df_state is not None and
|
370 |
-
file_path == current_file_path):
|
371 |
-
logging.info("Using cached filtered DataFrame")
|
372 |
filtered_df = cached_filtered_df_state
|
373 |
else:
|
374 |
-
|
375 |
-
|
376 |
-
current_modified_time != last_modified_state or
|
377 |
-
file_path != current_file_path):
|
378 |
-
logging.info(f"Reading new CSV file: {file_path}")
|
379 |
if not file_path.endswith(".csv"):
|
380 |
-
return "Please upload a CSV file.", pd.DataFrame(), None, '<p>No device cards available.</p>', None, None, None, None, "", "", "", None, last_modified_state, cached_df_state, cached_filtered_df_state
|
381 |
|
382 |
required_columns = ["device_id", "log_type", "status", "timestamp", "usage_hours", "downtime", "amc_date"]
|
383 |
dtypes = {
|
@@ -391,16 +343,15 @@ async def process_logs(file_obj, lab_site_filter, equipment_type_filter, date_ra
|
|
391 |
df = pd.read_csv(file_path, dtype=dtypes)
|
392 |
missing_columns = [col for col in required_columns if col not in df.columns]
|
393 |
if missing_columns:
|
394 |
-
return f"Missing columns: {missing_columns}", pd.DataFrame(), None, '<p>No device cards available.</p>', None, None, None, None, None, None, None, None, last_modified_state,
|
395 |
|
396 |
df["timestamp"] = pd.to_datetime(df["timestamp"], errors='coerce')
|
397 |
df["amc_date"] = pd.to_datetime(df["amc_date"], errors='coerce')
|
398 |
if df["timestamp"].dt.tz is None:
|
399 |
df["timestamp"] = df["timestamp"].dt.tz_localize('UTC').dt.tz_convert('Asia/Kolkata')
|
400 |
if df.empty:
|
401 |
-
return "No data available.", pd.DataFrame(), None, '<p>No device cards available.</p>', None, None, None, None, None, None, None, None, last_modified_state, df, cached_filtered_df_state
|
402 |
else:
|
403 |
-
logging.info("Using cached raw DataFrame")
|
404 |
df = cached_df_state
|
405 |
|
406 |
# Apply filters
|
@@ -417,7 +368,7 @@ async def process_logs(file_obj, lab_site_filter, equipment_type_filter, date_ra
|
|
417 |
filtered_df = filtered_df[(filtered_df['timestamp'] >= start_date) & (filtered_df['timestamp'] <= end_date)]
|
418 |
|
419 |
if filtered_df.empty:
|
420 |
-
return "No data after applying filters.", pd.DataFrame(), None, '<p>No device cards available.</p>', None, None, None, None, None, None, None, None, last_modified_state, df, filtered_df
|
421 |
|
422 |
# Generate table for preview
|
423 |
preview_df = filtered_df[['device_id', 'log_type', 'status', 'timestamp', 'usage_hours', 'downtime', 'amc_date']].head(5)
|
@@ -456,10 +407,10 @@ async def process_logs(file_obj, lab_site_filter, equipment_type_filter, date_ra
|
|
456 |
if elapsed_time > 3:
|
457 |
logging.warning(f"Processing time exceeded 3 seconds: {elapsed_time:.2f} seconds")
|
458 |
|
459 |
-
return (summary, preview_html, usage_chart, device_cards, daily_log_chart, weekly_uptime_chart, anomaly_alerts_chart, downtime_chart, anomalies, amc_reminders, insights, None, current_modified_time, df, filtered_df
|
460 |
except Exception as e:
|
461 |
logging.error(f"Failed to process file: {str(e)}")
|
462 |
-
return f"Error: {str(e)}", pd.DataFrame(), None, '<p>Error processing data.</p>', None, None, None, None, None, None, None, None, last_modified_state, cached_df_state, cached_filtered_df_state
|
463 |
|
464 |
# Generate PDF separately
|
465 |
async def generate_pdf(summary, preview_html, usage_chart, device_cards, daily_log_chart, weekly_uptime_chart, anomaly_alerts_chart, downtime_chart, anomalies, amc_reminders, insights):
|
@@ -471,6 +422,24 @@ async def generate_pdf(summary, preview_html, usage_chart, device_cards, daily_l
|
|
471 |
logging.error(f"Failed to generate PDF: {str(e)}")
|
472 |
return None
|
473 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
474 |
# Gradio Interface
|
475 |
try:
|
476 |
logging.info("Initializing Gradio interface...")
|
@@ -487,12 +456,12 @@ try:
|
|
487 |
.table tr:nth-child(even) {background-color: #f9f9f9;}
|
488 |
""") as iface:
|
489 |
gr.Markdown("<h1>LabOps Log Analyzer Dashboard</h1>")
|
490 |
-
gr.Markdown("Upload a CSV file to analyze. Click 'Analyze' to refresh the dashboard. Use 'Export PDF' for report download.
|
491 |
|
492 |
last_modified_state = gr.State(value=None)
|
|
|
493 |
cached_df_state = gr.State(value=None)
|
494 |
cached_filtered_df_state = gr.State(value=None)
|
495 |
-
current_file_path = gr.State(value=None)
|
496 |
|
497 |
with gr.Row():
|
498 |
with gr.Column(scale=1):
|
@@ -542,48 +511,17 @@ try:
|
|
542 |
gr.Markdown("### Export Report")
|
543 |
pdf_output = gr.File(label="Download Status Report as PDF")
|
544 |
|
545 |
-
# Update file path and filters when CSV is uploaded
|
546 |
file_input.change(
|
547 |
-
fn=lambda file_obj, cached_df, file_path: (file_obj.name if file_obj else file_path, cached_df, file_obj.name if file_obj else file_path),
|
548 |
-
inputs=[file_input, cached_df_state, current_file_path],
|
549 |
-
outputs=[current_file_path, cached_df_state, current_file_path],
|
550 |
-
queue=False
|
551 |
-
).then(
|
552 |
-
fn=process_logs,
|
553 |
-
inputs=[file_input, lab_site_filter, equipment_type_filter, date_range_filter, last_modified_state, cached_df_state, cached_filtered_df_state, current_file_path],
|
554 |
-
outputs=[summary_output, preview_output, usage_chart_output, device_cards_output, daily_log_trends_output, weekly_uptime_output, anomaly_alerts_output, downtime_chart_output, anomaly_output, amc_output, insights_output, pdf_output, last_modified_state, cached_df_state, cached_filtered_df_state, current_file_path],
|
555 |
-
queue=False
|
556 |
-
).then(
|
557 |
fn=update_filters,
|
558 |
-
inputs=[
|
559 |
-
outputs=[lab_site_filter, equipment_type_filter],
|
560 |
queue=False
|
561 |
)
|
562 |
|
563 |
-
# Process logs on submit or filter change
|
564 |
submit_button.click(
|
565 |
fn=process_logs,
|
566 |
-
inputs=[file_input, lab_site_filter, equipment_type_filter, date_range_filter, last_modified_state, cached_df_state, cached_filtered_df_state
|
567 |
-
outputs=[summary_output, preview_output, usage_chart_output, device_cards_output, daily_log_trends_output, weekly_uptime_output, anomaly_alerts_output, downtime_chart_output, anomaly_output, amc_output, insights_output, pdf_output, last_modified_state, cached_df_state, cached_filtered_df_state
|
568 |
-
)
|
569 |
-
|
570 |
-
# Update on filter change without requiring new file
|
571 |
-
lab_site_filter.change(
|
572 |
-
fn=process_logs,
|
573 |
-
inputs=[file_input, lab_site_filter, equipment_type_filter, date_range_filter, last_modified_state, cached_df_state, cached_filtered_df_state, current_file_path],
|
574 |
-
outputs=[summary_output, preview_output, usage_chart_output, device_cards_output, daily_log_trends_output, weekly_uptime_output, anomaly_alerts_output, downtime_chart_output, anomaly_output, amc_output, insights_output, pdf_output, last_modified_state, cached_df_state, cached_filtered_df_state, current_file_path]
|
575 |
-
)
|
576 |
-
|
577 |
-
equipment_type_filter.change(
|
578 |
-
fn=process_logs,
|
579 |
-
inputs=[file_input, lab_site_filter, equipment_type_filter, date_range_filter, last_modified_state, cached_df_state, cached_filtered_df_state, current_file_path],
|
580 |
-
outputs=[summary_output, preview_output, usage_chart_output, device_cards_output, daily_log_trends_output, weekly_uptime_output, anomaly_alerts_output, downtime_chart_output, anomaly_output, amc_output, insights_output, pdf_output, last_modified_state, cached_df_state, cached_filtered_df_state, current_file_path]
|
581 |
-
)
|
582 |
-
|
583 |
-
date_range_filter.change(
|
584 |
-
fn=process_logs,
|
585 |
-
inputs=[file_input, lab_site_filter, equipment_type_filter, date_range_filter, last_modified_state, cached_df_state, cached_filtered_df_state, current_file_path],
|
586 |
-
outputs=[summary_output, preview_output, usage_chart_output, device_cards_output, daily_log_trends_output, weekly_uptime_output, anomaly_alerts_output, downtime_chart_output, anomaly_output, amc_output, insights_output, pdf_output, last_modified_state, cached_df_state, cached_filtered_df_state, current_file_path]
|
587 |
)
|
588 |
|
589 |
pdf_button.click(
|
|
|
5 |
import plotly.express as px
|
6 |
import plotly.graph_objects as go
|
7 |
from sklearn.ensemble import IsolationForest
|
8 |
+
from concurrent.futures import ThreadPoolExecutor # Added missing import
|
9 |
import os
|
10 |
import io
|
11 |
import time
|
|
|
314 |
logging.error(f"Failed to generate PDF: {str(e)}")
|
315 |
return None
|
316 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
317 |
# Main processing function
|
318 |
+
async def process_logs(file_obj, lab_site_filter, equipment_type_filter, date_range, last_modified_state, cached_df_state, cached_filtered_df_state):
|
319 |
start_time = time.time()
|
320 |
try:
|
321 |
+
if not file_obj:
|
322 |
+
return "No file uploaded.", pd.DataFrame(), None, '<p>No device cards available.</p>', None, None, None, None, "No anomalies detected.", "No AMC reminders.", "No insights generated.", None, last_modified_state, cached_df_state, cached_filtered_df_state
|
|
|
|
|
|
|
|
|
|
|
|
|
323 |
|
324 |
+
file_path = file_obj.name
|
325 |
current_modified_time = os.path.getmtime(file_path)
|
326 |
+
if last_modified_state and current_modified_time == last_modified_state and cached_filtered_df_state is not None:
|
|
|
|
|
|
|
|
|
|
|
|
|
327 |
filtered_df = cached_filtered_df_state
|
328 |
else:
|
329 |
+
if cached_df_state is None or current_modified_time != last_modified_state:
|
330 |
+
logging.info(f"Processing file: {file_path}")
|
|
|
|
|
|
|
331 |
if not file_path.endswith(".csv"):
|
332 |
+
return "Please upload a CSV file.", pd.DataFrame(), None, '<p>No device cards available.</p>', None, None, None, None, "", "", "", None, last_modified_state, cached_df_state, cached_filtered_df_state
|
333 |
|
334 |
required_columns = ["device_id", "log_type", "status", "timestamp", "usage_hours", "downtime", "amc_date"]
|
335 |
dtypes = {
|
|
|
343 |
df = pd.read_csv(file_path, dtype=dtypes)
|
344 |
missing_columns = [col for col in required_columns if col not in df.columns]
|
345 |
if missing_columns:
|
346 |
+
return f"Missing columns: {missing_columns}", pd.DataFrame(), None, '<p>No device cards available.</p>', None, None, None, None, None, None, None, None, last_modified_state, cached_df_state, cached_filtered_df_state
|
347 |
|
348 |
df["timestamp"] = pd.to_datetime(df["timestamp"], errors='coerce')
|
349 |
df["amc_date"] = pd.to_datetime(df["amc_date"], errors='coerce')
|
350 |
if df["timestamp"].dt.tz is None:
|
351 |
df["timestamp"] = df["timestamp"].dt.tz_localize('UTC').dt.tz_convert('Asia/Kolkata')
|
352 |
if df.empty:
|
353 |
+
return "No data available.", pd.DataFrame(), None, '<p>No device cards available.</p>', None, None, None, None, None, None, None, None, last_modified_state, df, cached_filtered_df_state
|
354 |
else:
|
|
|
355 |
df = cached_df_state
|
356 |
|
357 |
# Apply filters
|
|
|
368 |
filtered_df = filtered_df[(filtered_df['timestamp'] >= start_date) & (filtered_df['timestamp'] <= end_date)]
|
369 |
|
370 |
if filtered_df.empty:
|
371 |
+
return "No data after applying filters.", pd.DataFrame(), None, '<p>No device cards available.</p>', None, None, None, None, None, None, None, None, last_modified_state, df, filtered_df
|
372 |
|
373 |
# Generate table for preview
|
374 |
preview_df = filtered_df[['device_id', 'log_type', 'status', 'timestamp', 'usage_hours', 'downtime', 'amc_date']].head(5)
|
|
|
407 |
if elapsed_time > 3:
|
408 |
logging.warning(f"Processing time exceeded 3 seconds: {elapsed_time:.2f} seconds")
|
409 |
|
410 |
+
return (summary, preview_html, usage_chart, device_cards, daily_log_chart, weekly_uptime_chart, anomaly_alerts_chart, downtime_chart, anomalies, amc_reminders, insights, None, current_modified_time, df, filtered_df)
|
411 |
except Exception as e:
|
412 |
logging.error(f"Failed to process file: {str(e)}")
|
413 |
+
return f"Error: {str(e)}", pd.DataFrame(), None, '<p>Error processing data.</p>', None, None, None, None, None, None, None, None, last_modified_state, cached_df_state, cached_filtered_df_state
|
414 |
|
415 |
# Generate PDF separately
|
416 |
async def generate_pdf(summary, preview_html, usage_chart, device_cards, daily_log_chart, weekly_uptime_chart, anomaly_alerts_chart, downtime_chart, anomalies, amc_reminders, insights):
|
|
|
422 |
logging.error(f"Failed to generate PDF: {str(e)}")
|
423 |
return None
|
424 |
|
425 |
+
# Update filters
|
426 |
+
def update_filters(file_obj, current_file_state):
|
427 |
+
if not file_obj or file_obj.name == current_file_state:
|
428 |
+
return gr.update(), gr.update(), current_file_state
|
429 |
+
try:
|
430 |
+
with open(file_obj.name, 'rb') as f:
|
431 |
+
csv_content = f.read().decode('utf-8')
|
432 |
+
df = pd.read_csv(io.StringIO(csv_content))
|
433 |
+
df['timestamp'] = pd.to_datetime(df['timestamp'], errors='coerce')
|
434 |
+
|
435 |
+
lab_site_options = ['All'] + [site for site in df['lab_site'].dropna().astype(str).unique().tolist() if site.strip()] if 'lab_site' in df.columns else ['All']
|
436 |
+
equipment_type_options = ['All'] + [equip for equip in df['equipment_type'].dropna().astype(str).unique().tolist() if equip.strip()] if 'equipment_type' in df.columns else ['All']
|
437 |
+
|
438 |
+
return gr.update(choices=lab_site_options, value='All'), gr.update(choices=equipment_type_options, value='All'), file_obj.name
|
439 |
+
except Exception as e:
|
440 |
+
logging.error(f"Failed to update filters: {str(e)}")
|
441 |
+
return gr.update(choices=['All'], value='All'), gr.update(choices=['All'], value='All'), current_file_state
|
442 |
+
|
443 |
# Gradio Interface
|
444 |
try:
|
445 |
logging.info("Initializing Gradio interface...")
|
|
|
456 |
.table tr:nth-child(even) {background-color: #f9f9f9;}
|
457 |
""") as iface:
|
458 |
gr.Markdown("<h1>LabOps Log Analyzer Dashboard</h1>")
|
459 |
+
gr.Markdown("Upload a CSV file to analyze. Click 'Analyze' to refresh the dashboard. Use 'Export PDF' for report download.")
|
460 |
|
461 |
last_modified_state = gr.State(value=None)
|
462 |
+
current_file_state = gr.State(value=None)
|
463 |
cached_df_state = gr.State(value=None)
|
464 |
cached_filtered_df_state = gr.State(value=None)
|
|
|
465 |
|
466 |
with gr.Row():
|
467 |
with gr.Column(scale=1):
|
|
|
511 |
gr.Markdown("### Export Report")
|
512 |
pdf_output = gr.File(label="Download Status Report as PDF")
|
513 |
|
|
|
514 |
file_input.change(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
515 |
fn=update_filters,
|
516 |
+
inputs=[file_input, current_file_state],
|
517 |
+
outputs=[lab_site_filter, equipment_type_filter, current_file_state],
|
518 |
queue=False
|
519 |
)
|
520 |
|
|
|
521 |
submit_button.click(
|
522 |
fn=process_logs,
|
523 |
+
inputs=[file_input, lab_site_filter, equipment_type_filter, date_range_filter, last_modified_state, cached_df_state, cached_filtered_df_state],
|
524 |
+
outputs=[summary_output, preview_output, usage_chart_output, device_cards_output, daily_log_trends_output, weekly_uptime_output, anomaly_alerts_output, downtime_chart_output, anomaly_output, amc_output, insights_output, pdf_output, last_modified_state, cached_df_state, cached_filtered_df_state]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
525 |
)
|
526 |
|
527 |
pdf_button.click(
|