Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -14,6 +14,7 @@ from simple_salesforce import Salesforce
|
|
| 14 |
import os
|
| 15 |
import json
|
| 16 |
import io
|
|
|
|
| 17 |
|
| 18 |
# Configure logging
|
| 19 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
@@ -367,34 +368,82 @@ def generate_dashboard_insights(df):
|
|
| 367 |
logging.error(f"Dashboard insights generation failed: {str(e)}")
|
| 368 |
return f"Dashboard insights generation failed: {str(e)}"
|
| 369 |
|
| 370 |
-
# Create usage chart
|
| 371 |
def create_usage_chart(df):
|
| 372 |
try:
|
| 373 |
usage_data = df.groupby("device_id")["usage_hours"].sum().reset_index()
|
| 374 |
if len(usage_data) > 5:
|
| 375 |
usage_data = usage_data.nlargest(5, "usage_hours")
|
| 376 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 377 |
fig = px.bar(
|
| 378 |
usage_data,
|
| 379 |
x="device_id",
|
| 380 |
y="usage_hours",
|
| 381 |
-
title="Usage Hours per Device",
|
| 382 |
labels={"device_id": "Device ID", "usage_hours": "Usage Hours"},
|
| 383 |
-
color="
|
| 384 |
-
|
| 385 |
)
|
| 386 |
fig.update_layout(
|
| 387 |
title_font_size=16,
|
| 388 |
margin=dict(l=20, r=20, t=40, b=20),
|
| 389 |
plot_bgcolor="white",
|
| 390 |
paper_bgcolor="white",
|
| 391 |
-
font=dict(size=12)
|
|
|
|
| 392 |
)
|
| 393 |
return fig
|
| 394 |
except Exception as e:
|
| 395 |
logging.error(f"Failed to create usage chart: {str(e)}")
|
| 396 |
return None
|
| 397 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 398 |
# Create daily log trends chart
|
| 399 |
def create_daily_log_trends_chart(df):
|
| 400 |
try:
|
|
@@ -557,12 +606,34 @@ def generate_device_cards(df):
|
|
| 557 |
logging.error(f"Failed to generate device cards: {str(e)}", exc_info=True)
|
| 558 |
return f'<p>Error generating device cards: {str(e)}</p>'
|
| 559 |
|
| 560 |
-
# Generate
|
| 561 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 562 |
if not reportlab_available:
|
| 563 |
return None
|
| 564 |
try:
|
| 565 |
-
pdf_path = f"
|
| 566 |
doc = SimpleDocTemplate(pdf_path, pagesize=letter)
|
| 567 |
styles = getSampleStyleSheet()
|
| 568 |
story = []
|
|
@@ -570,10 +641,17 @@ def generate_pdf_content(summary, preview, anomalies, amc_reminders, insights, d
|
|
| 570 |
def safe_paragraph(text, style):
|
| 571 |
return Paragraph(str(text).replace('\n', '<br/>'), style) if text else Paragraph("", style)
|
| 572 |
|
| 573 |
-
story.append(Paragraph("LabOps
|
| 574 |
story.append(Paragraph(f"Generated on {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}", styles['Normal']))
|
| 575 |
story.append(Spacer(1, 12))
|
| 576 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 577 |
story.append(Paragraph("Summary Report", styles['Heading2']))
|
| 578 |
story.append(safe_paragraph(summary or "No summary available.", styles['Normal']))
|
| 579 |
story.append(Spacer(1, 12))
|
|
@@ -611,6 +689,10 @@ def generate_pdf_content(summary, preview, anomalies, amc_reminders, insights, d
|
|
| 611 |
|
| 612 |
story.append(Paragraph("Anomaly Alerts Chart", styles['Heading2']))
|
| 613 |
story.append(Paragraph("[Chart placeholder - see dashboard for Anomaly Alerts]", styles['Normal']))
|
|
|
|
|
|
|
|
|
|
|
|
|
| 614 |
|
| 615 |
doc.build(story)
|
| 616 |
logging.info(f"PDF generated at {pdf_path}")
|
|
@@ -620,17 +702,25 @@ def generate_pdf_content(summary, preview, anomalies, amc_reminders, insights, d
|
|
| 620 |
return None
|
| 621 |
|
| 622 |
# Main Gradio function with optimized performance
|
| 623 |
-
async def process_logs(file_obj, lab_site_filter, equipment_type_filter, date_range):
|
| 624 |
try:
|
| 625 |
start_time = datetime.now()
|
|
|
|
|
|
|
| 626 |
if not file_obj:
|
| 627 |
-
return "No file uploaded.", "No data to preview.", None, '<p>No device cards available.</p>', None, None, None, "No anomalies detected.", "No AMC reminders.", "No insights generated.", None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 628 |
|
| 629 |
-
|
| 630 |
-
logging.info(f"Processing file: {file_name}")
|
| 631 |
|
| 632 |
-
if not
|
| 633 |
-
return "Please upload a CSV file.", "", None, '<p>No device cards available.</p>', None, None, None, "", "", "", None
|
| 634 |
|
| 635 |
required_columns = ["device_id", "log_type", "status", "timestamp", "usage_hours", "downtime", "amc_date"]
|
| 636 |
dtypes = {
|
|
@@ -641,16 +731,16 @@ async def process_logs(file_obj, lab_site_filter, equipment_type_filter, date_ra
|
|
| 641 |
"downtime": "float32",
|
| 642 |
"amc_date": "string"
|
| 643 |
}
|
| 644 |
-
df = pd.read_csv(
|
| 645 |
missing_columns = [col for col in required_columns if col not in df.columns]
|
| 646 |
if missing_columns:
|
| 647 |
-
return f"Missing columns: {missing_columns}", None, None, '<p>No device cards available.</p>', None, None, None, None, None, None, None
|
| 648 |
|
| 649 |
# Convert timestamp and amc_date to datetime
|
| 650 |
df["timestamp"] = pd.to_datetime(df["timestamp"], errors='coerce')
|
| 651 |
df["amc_date"] = pd.to_datetime(df["amc_date"], errors='coerce')
|
| 652 |
if df.empty:
|
| 653 |
-
return "No data available.", None, None, '<p>No device cards available.</p>', None, None, None, None, None, None, None
|
| 654 |
|
| 655 |
# Log DataFrame before filtering
|
| 656 |
logging.info(f"DataFrame before filtering:\n{df.head().to_string()}")
|
|
@@ -680,10 +770,19 @@ async def process_logs(file_obj, lab_site_filter, equipment_type_filter, date_ra
|
|
| 680 |
filtered_df = filtered_df[(filtered_df['timestamp'] >= start_date) & (filtered_df['timestamp'] <= end_date)]
|
| 681 |
logging.info(f"After date range filter: {filtered_df.shape[0]} rows")
|
| 682 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 683 |
# Check if filtered_df is empty
|
| 684 |
if filtered_df.empty:
|
| 685 |
logging.warning("Filtered DataFrame is empty after applying filters.")
|
| 686 |
-
return "No data after applying filters.", None, None, '<p>No device cards available.</p>', None, None, None, None, None, None, None
|
| 687 |
|
| 688 |
# Log the state of filtered_df
|
| 689 |
logging.info(f"Filtered DataFrame:\n{filtered_df.head().to_string()}")
|
|
@@ -695,6 +794,7 @@ async def process_logs(file_obj, lab_site_filter, equipment_type_filter, date_ra
|
|
| 695 |
future_amc = executor.submit(check_amc_reminders, filtered_df, datetime.now())
|
| 696 |
future_insights = executor.submit(generate_dashboard_insights, filtered_df)
|
| 697 |
future_usage_chart = executor.submit(create_usage_chart, filtered_df)
|
|
|
|
| 698 |
future_daily_log_chart = executor.submit(create_daily_log_trends_chart, filtered_df)
|
| 699 |
future_weekly_uptime_chart = executor.submit(create_weekly_uptime_chart, filtered_df)
|
| 700 |
future_device_cards = executor.submit(generate_device_cards, filtered_df)
|
|
@@ -707,6 +807,7 @@ async def process_logs(file_obj, lab_site_filter, equipment_type_filter, date_ra
|
|
| 707 |
amc_reminders = f"AMC Reminders\n{amc_reminders}"
|
| 708 |
insights = f"Dashboard Insights (AI)\n{future_insights.result()}"
|
| 709 |
usage_chart = future_usage_chart.result()
|
|
|
|
| 710 |
daily_log_chart = future_daily_log_chart.result()
|
| 711 |
weekly_uptime_chart = future_weekly_uptime_chart.result()
|
| 712 |
device_cards = future_device_cards.result()
|
|
@@ -728,20 +829,23 @@ async def process_logs(file_obj, lab_site_filter, equipment_type_filter, date_ra
|
|
| 728 |
save_to_salesforce(filtered_df, reminders_df, summary, anomalies, amc_reminders, insights)
|
| 729 |
|
| 730 |
# Generate PDF with updated content
|
| 731 |
-
pdf_file = generate_pdf_content(summary, preview, anomalies, amc_reminders, insights, device_cards, daily_log_chart, weekly_uptime_chart, anomaly_alerts_chart)
|
| 732 |
|
| 733 |
elapsed_time = (datetime.now() - start_time).total_seconds()
|
| 734 |
logging.info(f"Processing completed in {elapsed_time:.2f} seconds")
|
| 735 |
-
|
|
|
|
|
|
|
|
|
|
| 736 |
except Exception as e:
|
| 737 |
logging.error(f"Failed to process file: {str(e)}")
|
| 738 |
-
return f"Error: {str(e)}", None, None, '<p>Error processing data.</p>', None, None, None, None, None, None, None
|
| 739 |
|
| 740 |
# Dynamically update filter options (if columns exist in the uploaded CSV)
|
| 741 |
def update_filters(file_obj):
|
| 742 |
if not file_obj:
|
| 743 |
logging.info("No file uploaded for filter update, returning default options.")
|
| 744 |
-
return gr.update(choices=['All'], value='All'), gr.update(choices=['All'], value='All')
|
| 745 |
|
| 746 |
try:
|
| 747 |
logging.info(f"Attempting to read CSV file: {file_obj.name}")
|
|
@@ -749,6 +853,7 @@ def update_filters(file_obj):
|
|
| 749 |
with open(file_obj.name, 'rb') as f:
|
| 750 |
csv_content = f.read().decode('utf-8')
|
| 751 |
df = pd.read_csv(io.StringIO(csv_content))
|
|
|
|
| 752 |
logging.info(f"CSV file read successfully. Columns found: {list(df.columns)}")
|
| 753 |
|
| 754 |
# Lab site options
|
|
@@ -769,11 +874,21 @@ def update_filters(file_obj):
|
|
| 769 |
else:
|
| 770 |
logging.warning("Column 'equipment_type' not found in CSV.")
|
| 771 |
|
| 772 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 773 |
except Exception as e:
|
| 774 |
logging.error(f"Failed to update filters: {str(e)}")
|
| 775 |
# Fallback: return default options
|
| 776 |
-
return gr.update(choices=['All'], value='All'), gr.update(choices=['All'], value='All')
|
| 777 |
|
| 778 |
# Gradio Interface
|
| 779 |
try:
|
|
@@ -787,7 +902,9 @@ try:
|
|
| 787 |
.dashboard-section ul {margin: 2px 0; padding-left: 20px;}
|
| 788 |
""") as iface:
|
| 789 |
gr.Markdown("<h1>LabOps Log Analyzer Dashboard (Hugging Face AI)</h1>")
|
| 790 |
-
gr.Markdown("Upload a CSV file to analyze.")
|
|
|
|
|
|
|
| 791 |
|
| 792 |
with gr.Row():
|
| 793 |
with gr.Column(scale=1):
|
|
@@ -816,6 +933,12 @@ try:
|
|
| 816 |
value=[-30, 0], # Default: last 30 days
|
| 817 |
info="Select the range of days relative to today (e.g., -30 to 0 for the last 30 days)."
|
| 818 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 819 |
|
| 820 |
submit_button = gr.Button("Analyze", variant="primary")
|
| 821 |
|
|
@@ -839,6 +962,8 @@ try:
|
|
| 839 |
gr.Markdown("### Charts")
|
| 840 |
with gr.Tab("Usage Hours per Device"):
|
| 841 |
usage_chart_output = gr.Plot()
|
|
|
|
|
|
|
| 842 |
with gr.Tab("Daily Log Trends"):
|
| 843 |
daily_log_trends_output = gr.Plot()
|
| 844 |
with gr.Tab("Weekly Uptime Percentage"):
|
|
@@ -860,23 +985,52 @@ try:
|
|
| 860 |
|
| 861 |
with gr.Group(elem_classes="dashboard-section"):
|
| 862 |
gr.Markdown("### Export Report")
|
| 863 |
-
pdf_output = gr.File(label="Download
|
| 864 |
|
| 865 |
# Update filters when a new file is uploaded
|
| 866 |
file_input.change(
|
| 867 |
fn=update_filters,
|
| 868 |
inputs=[file_input],
|
| 869 |
-
outputs=[lab_site_filter, equipment_type_filter],
|
| 870 |
queue=False # Disable queue to ensure immediate update
|
| 871 |
)
|
| 872 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 873 |
submit_button.click(
|
| 874 |
fn=process_logs,
|
| 875 |
inputs=[
|
| 876 |
file_input,
|
| 877 |
lab_site_filter,
|
| 878 |
equipment_type_filter,
|
| 879 |
-
date_range_filter
|
|
|
|
|
|
|
| 880 |
],
|
| 881 |
outputs=[
|
| 882 |
summary_output,
|
|
@@ -886,10 +1040,12 @@ try:
|
|
| 886 |
daily_log_trends_output,
|
| 887 |
weekly_uptime_output,
|
| 888 |
anomaly_alerts_output,
|
|
|
|
| 889 |
anomaly_output,
|
| 890 |
amc_output,
|
| 891 |
insights_output,
|
| 892 |
-
pdf_output
|
|
|
|
| 893 |
]
|
| 894 |
)
|
| 895 |
|
|
|
|
| 14 |
import os
|
| 15 |
import json
|
| 16 |
import io
|
| 17 |
+
import time
|
| 18 |
|
| 19 |
# Configure logging
|
| 20 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
|
|
| 368 |
logging.error(f"Dashboard insights generation failed: {str(e)}")
|
| 369 |
return f"Dashboard insights generation failed: {str(e)}"
|
| 370 |
|
| 371 |
+
# Create usage chart with visual indicators for spikes
|
| 372 |
def create_usage_chart(df):
|
| 373 |
try:
|
| 374 |
usage_data = df.groupby("device_id")["usage_hours"].sum().reset_index()
|
| 375 |
if len(usage_data) > 5:
|
| 376 |
usage_data = usage_data.nlargest(5, "usage_hours")
|
| 377 |
+
|
| 378 |
+
# Define a threshold for usage spikes (e.g., 75th percentile + 1.5 * IQR)
|
| 379 |
+
q75, q25 = usage_data["usage_hours"].quantile([0.75, 0.25])
|
| 380 |
+
iqr = q75 - q25
|
| 381 |
+
spike_threshold = q75 + 1.5 * iqr
|
| 382 |
+
usage_data["color"] = usage_data["usage_hours"].apply(
|
| 383 |
+
lambda x: "red" if x > spike_threshold else "blue"
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
custom_colors = usage_data["color"].tolist()
|
| 387 |
fig = px.bar(
|
| 388 |
usage_data,
|
| 389 |
x="device_id",
|
| 390 |
y="usage_hours",
|
| 391 |
+
title="Usage Hours per Device (Red = Usage Spike)",
|
| 392 |
labels={"device_id": "Device ID", "usage_hours": "Usage Hours"},
|
| 393 |
+
color="color",
|
| 394 |
+
color_discrete_map={"blue": "#4ECDC4", "red": "#FF0000"}
|
| 395 |
)
|
| 396 |
fig.update_layout(
|
| 397 |
title_font_size=16,
|
| 398 |
margin=dict(l=20, r=20, t=40, b=20),
|
| 399 |
plot_bgcolor="white",
|
| 400 |
paper_bgcolor="white",
|
| 401 |
+
font=dict(size=12),
|
| 402 |
+
showlegend=False
|
| 403 |
)
|
| 404 |
return fig
|
| 405 |
except Exception as e:
|
| 406 |
logging.error(f"Failed to create usage chart: {str(e)}")
|
| 407 |
return None
|
| 408 |
|
| 409 |
+
# Create downtime chart with visual indicators for spikes
|
| 410 |
+
def create_downtime_chart(df):
|
| 411 |
+
try:
|
| 412 |
+
downtime_data = df.groupby("device_id")["downtime"].sum().reset_index()
|
| 413 |
+
if len(downtime_data) > 5:
|
| 414 |
+
downtime_data = downtime_data.nlargest(5, "downtime")
|
| 415 |
+
|
| 416 |
+
# Define a threshold for downtime spikes (e.g., 75th percentile + 1.5 * IQR)
|
| 417 |
+
q75, q25 = downtime_data["downtime"].quantile([0.75, 0.25])
|
| 418 |
+
iqr = q75 - q25
|
| 419 |
+
spike_threshold = q75 + 1.5 * iqr
|
| 420 |
+
downtime_data["color"] = downtime_data["downtime"].apply(
|
| 421 |
+
lambda x: "red" if x > spike_threshold else "purple"
|
| 422 |
+
)
|
| 423 |
+
|
| 424 |
+
custom_colors = downtime_data["color"].tolist()
|
| 425 |
+
fig = px.bar(
|
| 426 |
+
downtime_data,
|
| 427 |
+
x="device_id",
|
| 428 |
+
y="downtime",
|
| 429 |
+
title="Downtime per Device (Red = Downtime Spike)",
|
| 430 |
+
labels={"device_id": "Device ID", "downtime": "Downtime (Hours)"},
|
| 431 |
+
color="color",
|
| 432 |
+
color_discrete_map={"purple": "#800080", "red": "#FF0000"}
|
| 433 |
+
)
|
| 434 |
+
fig.update_layout(
|
| 435 |
+
title_font_size=16,
|
| 436 |
+
margin=dict(l=20, r=20, t=40, b=20),
|
| 437 |
+
plot_bgcolor="white",
|
| 438 |
+
paper_bgcolor="white",
|
| 439 |
+
font=dict(size=12),
|
| 440 |
+
showlegend=False
|
| 441 |
+
)
|
| 442 |
+
return fig
|
| 443 |
+
except Exception as e:
|
| 444 |
+
logging.error(f"Failed to create downtime chart: {str(e)}")
|
| 445 |
+
return None
|
| 446 |
+
|
| 447 |
# Create daily log trends chart
|
| 448 |
def create_daily_log_trends_chart(df):
|
| 449 |
try:
|
|
|
|
| 606 |
logging.error(f"Failed to generate device cards: {str(e)}", exc_info=True)
|
| 607 |
return f'<p>Error generating device cards: {str(e)}</p>'
|
| 608 |
|
| 609 |
+
# Generate monthly status summary for PDF
|
| 610 |
+
def generate_monthly_status(df, selected_month):
|
| 611 |
+
try:
|
| 612 |
+
total_devices = df['device_id'].nunique()
|
| 613 |
+
total_usage_hours = df['usage_hours'].sum()
|
| 614 |
+
total_downtime = df['downtime'].sum()
|
| 615 |
+
avg_usage_per_device = total_usage_hours / total_devices if total_devices > 0 else 0
|
| 616 |
+
avg_downtime_per_device = total_downtime / total_devices if total_devices > 0 else 0
|
| 617 |
+
|
| 618 |
+
summary = f"""
|
| 619 |
+
Monthly Status for {selected_month}:
|
| 620 |
+
- Total Devices: {total_devices}
|
| 621 |
+
- Total Usage Hours: {total_usage_hours:.2f}
|
| 622 |
+
- Total Downtime Hours: {total_downtime:.2f}
|
| 623 |
+
- Average Usage per Device: {avg_usage_per_device:.2f} hours
|
| 624 |
+
- Average Downtime per Device: {avg_downtime_per_device:.2f} hours
|
| 625 |
+
"""
|
| 626 |
+
return summary
|
| 627 |
+
except Exception as e:
|
| 628 |
+
logging.error(f"Failed to generate monthly status: {str(e)}")
|
| 629 |
+
return f"Failed to generate monthly status: {str(e)}"
|
| 630 |
+
|
| 631 |
+
# Generate PDF content with monthly status
|
| 632 |
+
def generate_pdf_content(summary, preview, anomalies, amc_reminders, insights, device_cards_html, daily_log_chart, weekly_uptime_chart, anomaly_alerts_chart, downtime_chart, df, selected_month):
|
| 633 |
if not reportlab_available:
|
| 634 |
return None
|
| 635 |
try:
|
| 636 |
+
pdf_path = f"monthly_status_report_{selected_month.replace(' ', '_')}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.pdf"
|
| 637 |
doc = SimpleDocTemplate(pdf_path, pagesize=letter)
|
| 638 |
styles = getSampleStyleSheet()
|
| 639 |
story = []
|
|
|
|
| 641 |
def safe_paragraph(text, style):
|
| 642 |
return Paragraph(str(text).replace('\n', '<br/>'), style) if text else Paragraph("", style)
|
| 643 |
|
| 644 |
+
story.append(Paragraph("LabOps Monthly Status Report", styles['Title']))
|
| 645 |
story.append(Paragraph(f"Generated on {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}", styles['Normal']))
|
| 646 |
story.append(Spacer(1, 12))
|
| 647 |
|
| 648 |
+
# Add monthly status summary if a month is selected
|
| 649 |
+
if selected_month != "All":
|
| 650 |
+
monthly_status = generate_monthly_status(df, selected_month)
|
| 651 |
+
story.append(Paragraph("Monthly Status Summary", styles['Heading2']))
|
| 652 |
+
story.append(safe_paragraph(monthly_status, styles['Normal']))
|
| 653 |
+
story.append(Spacer(1, 12))
|
| 654 |
+
|
| 655 |
story.append(Paragraph("Summary Report", styles['Heading2']))
|
| 656 |
story.append(safe_paragraph(summary or "No summary available.", styles['Normal']))
|
| 657 |
story.append(Spacer(1, 12))
|
|
|
|
| 689 |
|
| 690 |
story.append(Paragraph("Anomaly Alerts Chart", styles['Heading2']))
|
| 691 |
story.append(Paragraph("[Chart placeholder - see dashboard for Anomaly Alerts]", styles['Normal']))
|
| 692 |
+
story.append(Spacer(1, 12))
|
| 693 |
+
|
| 694 |
+
story.append(Paragraph("Downtime Chart", styles['Heading2']))
|
| 695 |
+
story.append(Paragraph("[Chart placeholder - see dashboard for Downtime per Device]", styles['Normal']))
|
| 696 |
|
| 697 |
doc.build(story)
|
| 698 |
logging.info(f"PDF generated at {pdf_path}")
|
|
|
|
| 702 |
return None
|
| 703 |
|
| 704 |
# Main Gradio function with optimized performance
|
| 705 |
+
async def process_logs(file_obj, lab_site_filter, equipment_type_filter, date_range, month_filter, last_modified_state):
|
| 706 |
try:
|
| 707 |
start_time = datetime.now()
|
| 708 |
+
|
| 709 |
+
# Check if file exists and get its last modified time
|
| 710 |
if not file_obj:
|
| 711 |
+
return "No file uploaded.", "No data to preview.", None, '<p>No device cards available.</p>', None, None, None, None, "No anomalies detected.", "No AMC reminders.", "No insights generated.", None, last_modified_state
|
| 712 |
+
|
| 713 |
+
file_path = file_obj.name
|
| 714 |
+
current_modified_time = os.path.getmtime(file_path)
|
| 715 |
+
|
| 716 |
+
# Skip processing if the file hasn't changed
|
| 717 |
+
if last_modified_state and current_modified_time == last_modified_state:
|
| 718 |
+
return None, None, None, None, None, None, None, None, None, None, None, None, last_modified_state
|
| 719 |
|
| 720 |
+
logging.info(f"Processing file: {file_path}, last modified: {current_modified_time}")
|
|
|
|
| 721 |
|
| 722 |
+
if not file_path.endswith(".csv"):
|
| 723 |
+
return "Please upload a CSV file.", "", None, '<p>No device cards available.</p>', None, None, None, None, "", "", "", None, last_modified_state
|
| 724 |
|
| 725 |
required_columns = ["device_id", "log_type", "status", "timestamp", "usage_hours", "downtime", "amc_date"]
|
| 726 |
dtypes = {
|
|
|
|
| 731 |
"downtime": "float32",
|
| 732 |
"amc_date": "string"
|
| 733 |
}
|
| 734 |
+
df = pd.read_csv(file_path, dtype=dtypes)
|
| 735 |
missing_columns = [col for col in required_columns if col not in df.columns]
|
| 736 |
if missing_columns:
|
| 737 |
+
return f"Missing columns: {missing_columns}", None, None, '<p>No device cards available.</p>', None, None, None, None, None, None, None, None, last_modified_state
|
| 738 |
|
| 739 |
# Convert timestamp and amc_date to datetime
|
| 740 |
df["timestamp"] = pd.to_datetime(df["timestamp"], errors='coerce')
|
| 741 |
df["amc_date"] = pd.to_datetime(df["amc_date"], errors='coerce')
|
| 742 |
if df.empty:
|
| 743 |
+
return "No data available.", None, None, '<p>No device cards available.</p>', None, None, None, None, None, None, None, None, last_modified_state
|
| 744 |
|
| 745 |
# Log DataFrame before filtering
|
| 746 |
logging.info(f"DataFrame before filtering:\n{df.head().to_string()}")
|
|
|
|
| 770 |
filtered_df = filtered_df[(filtered_df['timestamp'] >= start_date) & (filtered_df['timestamp'] <= end_date)]
|
| 771 |
logging.info(f"After date range filter: {filtered_df.shape[0]} rows")
|
| 772 |
|
| 773 |
+
# Month filter
|
| 774 |
+
if month_filter and month_filter != "All":
|
| 775 |
+
selected_date = pd.to_datetime(month_filter, format="%B %Y")
|
| 776 |
+
filtered_df = filtered_df[
|
| 777 |
+
(filtered_df['timestamp'].dt.year == selected_date.year) &
|
| 778 |
+
(filtered_df['timestamp'].dt.month == selected_date.month)
|
| 779 |
+
]
|
| 780 |
+
logging.info(f"After month filter ({month_filter}): {filtered_df.shape[0]} rows")
|
| 781 |
+
|
| 782 |
# Check if filtered_df is empty
|
| 783 |
if filtered_df.empty:
|
| 784 |
logging.warning("Filtered DataFrame is empty after applying filters.")
|
| 785 |
+
return "No data after applying filters.", None, None, '<p>No device cards available.</p>', None, None, None, None, None, None, None, None, last_modified_state
|
| 786 |
|
| 787 |
# Log the state of filtered_df
|
| 788 |
logging.info(f"Filtered DataFrame:\n{filtered_df.head().to_string()}")
|
|
|
|
| 794 |
future_amc = executor.submit(check_amc_reminders, filtered_df, datetime.now())
|
| 795 |
future_insights = executor.submit(generate_dashboard_insights, filtered_df)
|
| 796 |
future_usage_chart = executor.submit(create_usage_chart, filtered_df)
|
| 797 |
+
future_downtime_chart = executor.submit(create_downtime_chart, filtered_df)
|
| 798 |
future_daily_log_chart = executor.submit(create_daily_log_trends_chart, filtered_df)
|
| 799 |
future_weekly_uptime_chart = executor.submit(create_weekly_uptime_chart, filtered_df)
|
| 800 |
future_device_cards = executor.submit(generate_device_cards, filtered_df)
|
|
|
|
| 807 |
amc_reminders = f"AMC Reminders\n{amc_reminders}"
|
| 808 |
insights = f"Dashboard Insights (AI)\n{future_insights.result()}"
|
| 809 |
usage_chart = future_usage_chart.result()
|
| 810 |
+
downtime_chart = future_downtime_chart.result()
|
| 811 |
daily_log_chart = future_daily_log_chart.result()
|
| 812 |
weekly_uptime_chart = future_weekly_uptime_chart.result()
|
| 813 |
device_cards = future_device_cards.result()
|
|
|
|
| 829 |
save_to_salesforce(filtered_df, reminders_df, summary, anomalies, amc_reminders, insights)
|
| 830 |
|
| 831 |
# Generate PDF with updated content
|
| 832 |
+
pdf_file = generate_pdf_content(summary, preview, anomalies, amc_reminders, insights, device_cards, daily_log_chart, weekly_uptime_chart, anomaly_alerts_chart, downtime_chart, filtered_df, month_filter)
|
| 833 |
|
| 834 |
elapsed_time = (datetime.now() - start_time).total_seconds()
|
| 835 |
logging.info(f"Processing completed in {elapsed_time:.2f} seconds")
|
| 836 |
+
if elapsed_time > 10:
|
| 837 |
+
logging.warning(f"Processing time exceeded 10 seconds: {elapsed_time:.2f} seconds")
|
| 838 |
+
|
| 839 |
+
return (summary, preview, usage_chart, device_cards, daily_log_chart, weekly_uptime_chart, anomaly_alerts_chart, downtime_chart, anomalies, amc_reminders, insights, pdf_file, current_modified_time)
|
| 840 |
except Exception as e:
|
| 841 |
logging.error(f"Failed to process file: {str(e)}")
|
| 842 |
+
return f"Error: {str(e)}", None, None, '<p>Error processing data.</p>', None, None, None, None, None, None, None, None, last_modified_state
|
| 843 |
|
| 844 |
# Dynamically update filter options (if columns exist in the uploaded CSV)
|
| 845 |
def update_filters(file_obj):
|
| 846 |
if not file_obj:
|
| 847 |
logging.info("No file uploaded for filter update, returning default options.")
|
| 848 |
+
return gr.update(choices=['All'], value='All'), gr.update(choices=['All'], value='All'), gr.update(choices=['All'], value='All')
|
| 849 |
|
| 850 |
try:
|
| 851 |
logging.info(f"Attempting to read CSV file: {file_obj.name}")
|
|
|
|
| 853 |
with open(file_obj.name, 'rb') as f:
|
| 854 |
csv_content = f.read().decode('utf-8')
|
| 855 |
df = pd.read_csv(io.StringIO(csv_content))
|
| 856 |
+
df['timestamp'] = pd.to_datetime(df['timestamp'], errors='coerce')
|
| 857 |
logging.info(f"CSV file read successfully. Columns found: {list(df.columns)}")
|
| 858 |
|
| 859 |
# Lab site options
|
|
|
|
| 874 |
else:
|
| 875 |
logging.warning("Column 'equipment_type' not found in CSV.")
|
| 876 |
|
| 877 |
+
# Month options based on timestamp
|
| 878 |
+
month_options = ['All']
|
| 879 |
+
if 'timestamp' in df.columns:
|
| 880 |
+
df['month_year'] = df['timestamp'].dt.strftime('%B %Y')
|
| 881 |
+
unique_months = df['month_year'].dropna().unique().tolist()
|
| 882 |
+
month_options.extend(sorted(unique_months))
|
| 883 |
+
logging.info(f"Month options extracted: {month_options}")
|
| 884 |
+
else:
|
| 885 |
+
logging.warning("Column 'timestamp' not found in CSV.")
|
| 886 |
+
|
| 887 |
+
return gr.update(choices=lab_site_options, value='All'), gr.update(choices=equipment_type_options, value='All'), gr.update(choices=month_options, value='All')
|
| 888 |
except Exception as e:
|
| 889 |
logging.error(f"Failed to update filters: {str(e)}")
|
| 890 |
# Fallback: return default options
|
| 891 |
+
return gr.update(choices=['All'], value='All'), gr.update(choices=['All'], value='All'), gr.update(choices=['All'], value='All')
|
| 892 |
|
| 893 |
# Gradio Interface
|
| 894 |
try:
|
|
|
|
| 902 |
.dashboard-section ul {margin: 2px 0; padding-left: 20px;}
|
| 903 |
""") as iface:
|
| 904 |
gr.Markdown("<h1>LabOps Log Analyzer Dashboard (Hugging Face AI)</h1>")
|
| 905 |
+
gr.Markdown("Upload a CSV file to analyze. Dashboard refreshes every 5 seconds if the file changes.")
|
| 906 |
+
|
| 907 |
+
last_modified_state = gr.State(value=None)
|
| 908 |
|
| 909 |
with gr.Row():
|
| 910 |
with gr.Column(scale=1):
|
|
|
|
| 933 |
value=[-30, 0], # Default: last 30 days
|
| 934 |
info="Select the range of days relative to today (e.g., -30 to 0 for the last 30 days)."
|
| 935 |
)
|
| 936 |
+
month_filter = gr.Dropdown(
|
| 937 |
+
label="Select Month for Report",
|
| 938 |
+
choices=['All'],
|
| 939 |
+
value='All',
|
| 940 |
+
interactive=True
|
| 941 |
+
)
|
| 942 |
|
| 943 |
submit_button = gr.Button("Analyze", variant="primary")
|
| 944 |
|
|
|
|
| 962 |
gr.Markdown("### Charts")
|
| 963 |
with gr.Tab("Usage Hours per Device"):
|
| 964 |
usage_chart_output = gr.Plot()
|
| 965 |
+
with gr.Tab("Downtime per Device"):
|
| 966 |
+
downtime_chart_output = gr.Plot()
|
| 967 |
with gr.Tab("Daily Log Trends"):
|
| 968 |
daily_log_trends_output = gr.Plot()
|
| 969 |
with gr.Tab("Weekly Uptime Percentage"):
|
|
|
|
| 985 |
|
| 986 |
with gr.Group(elem_classes="dashboard-section"):
|
| 987 |
gr.Markdown("### Export Report")
|
| 988 |
+
pdf_output = gr.File(label="Download Monthly Status Report as PDF")
|
| 989 |
|
| 990 |
# Update filters when a new file is uploaded
|
| 991 |
file_input.change(
|
| 992 |
fn=update_filters,
|
| 993 |
inputs=[file_input],
|
| 994 |
+
outputs=[lab_site_filter, equipment_type_filter, month_filter],
|
| 995 |
queue=False # Disable queue to ensure immediate update
|
| 996 |
)
|
| 997 |
|
| 998 |
+
# Periodic update to check for file changes
|
| 999 |
+
def update_dashboard(file_obj, lab_site_filter, equipment_type_filter, date_range, month_filter, last_modified_state):
|
| 1000 |
+
outputs = process_logs(file_obj, lab_site_filter, equipment_type_filter, date_range, month_filter, last_modified_state)
|
| 1001 |
+
return outputs
|
| 1002 |
+
|
| 1003 |
+
gr.Timer(
|
| 1004 |
+
fn=update_dashboard,
|
| 1005 |
+
inputs=[file_input, lab_site_filter, equipment_type_filter, date_range_filter, month_filter, last_modified_state],
|
| 1006 |
+
outputs=[
|
| 1007 |
+
summary_output,
|
| 1008 |
+
preview_output,
|
| 1009 |
+
usage_chart_output,
|
| 1010 |
+
device_cards_output,
|
| 1011 |
+
daily_log_trends_output,
|
| 1012 |
+
weekly_uptime_output,
|
| 1013 |
+
anomaly_alerts_output,
|
| 1014 |
+
downtime_chart_output,
|
| 1015 |
+
anomaly_output,
|
| 1016 |
+
amc_output,
|
| 1017 |
+
insights_output,
|
| 1018 |
+
pdf_output,
|
| 1019 |
+
last_modified_state
|
| 1020 |
+
],
|
| 1021 |
+
value=5, # Check every 5 seconds
|
| 1022 |
+
_js="() => { return window.setInterval(() => { return true; }, 5000); }"
|
| 1023 |
+
)
|
| 1024 |
+
|
| 1025 |
submit_button.click(
|
| 1026 |
fn=process_logs,
|
| 1027 |
inputs=[
|
| 1028 |
file_input,
|
| 1029 |
lab_site_filter,
|
| 1030 |
equipment_type_filter,
|
| 1031 |
+
date_range_filter,
|
| 1032 |
+
month_filter,
|
| 1033 |
+
last_modified_state
|
| 1034 |
],
|
| 1035 |
outputs=[
|
| 1036 |
summary_output,
|
|
|
|
| 1040 |
daily_log_trends_output,
|
| 1041 |
weekly_uptime_output,
|
| 1042 |
anomaly_alerts_output,
|
| 1043 |
+
downtime_chart_output,
|
| 1044 |
anomaly_output,
|
| 1045 |
amc_output,
|
| 1046 |
insights_output,
|
| 1047 |
+
pdf_output,
|
| 1048 |
+
last_modified_state
|
| 1049 |
]
|
| 1050 |
)
|
| 1051 |
|