VaneshDev commited on
Commit
653a168
·
verified ·
1 Parent(s): 7b9c6d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -61
app.py CHANGED
@@ -1,79 +1,105 @@
1
  import gradio as gr
2
  import pandas as pd
3
  import numpy as np
4
- from sklearn.linear_model import LogisticRegression
5
- from sklearn.model_selection import train_test_split
6
- from statsmodels.tsa.arima.model import ARIMA
7
  import matplotlib.pyplot as plt
 
 
 
 
 
8
 
9
- # Sample data (this would be replaced with real equipment usage/maintenance data)
10
- data = {
11
- 'equipment_id': ['Excavator', 'Crane', 'Tractor'],
12
- 'usage_hours': [120, 140, 100],
13
- 'idle_hours': [30, 20, 50],
14
- 'movement_frequency': [5, 7, 3],
15
- 'cost_per_hour': [10, 15, 12],
16
- }
17
-
18
- # Convert data to DataFrame
19
- df = pd.DataFrame(data)
20
 
21
- # Function to perform Time Series Analysis (using ARIMA)
22
- def time_series_analysis(equipment_id, usage_hours):
23
- # Simulate time series data (here using random data for demonstration)
24
- dates = pd.date_range('2025-01-01', periods=10, freq='D')
25
- usage_data = np.random.randint(100, 200, size=10) # Dummy usage data
 
 
 
 
 
 
26
 
27
- # Create a DataFrame for time series data
28
- ts_df = pd.DataFrame({'ds': dates, 'usage': usage_data})
 
 
 
 
 
 
 
 
 
 
29
 
30
- # Fit ARIMA model (for simplicity, we use ARIMA(1,1,1) here)
31
- model = ARIMA(ts_df['usage'], order=(1, 1, 1))
32
- model_fit = model.fit()
 
 
 
 
 
 
 
 
 
 
33
 
34
- # Forecast the next value (predict the future usage)
35
- forecast = model_fit.forecast(steps=1)
 
 
 
 
 
 
36
 
37
- # Return forecasted value
38
- return f"Forecasted usage for the next day: {forecast[0]:.2f}"
 
 
 
 
39
 
40
- # Logistic Regression model (using usage and idle hours to predict equipment status)
41
- def model_prediction(equipment_id, usage_hours, idle_hours, movement_frequency, cost_per_hour):
42
- # Feature extraction (from usage data)
43
- features = np.array([usage_hours, idle_hours, movement_frequency, cost_per_hour]).reshape(1, -1)
44
 
45
- # Logistic Regression model (this is an example, replace with your trained model)
46
- model = LogisticRegression()
 
 
 
 
 
 
 
 
 
 
47
 
48
- # Train on dummy data (replace this with historical usage data for actual training)
49
- X = df[['usage_hours', 'idle_hours', 'movement_frequency', 'cost_per_hour']]
50
- y = [0, 1, 0] # 0 = Repair, 1 = Move (example)
51
- model.fit(X, y)
52
 
53
- # Predict suggestion (0 = Repair, 1 = Move)
54
- prediction = model.predict(features)[0]
55
 
56
- # Define suggestions
57
- suggestions = {0: 'Repair', 1: 'Move'}
58
 
59
- # Confidence score (example)
60
- confidence = model.predict_proba(features)[0][prediction] * 100
61
 
62
- # Return the suggestion and confidence
63
- return f"Suggestion: {suggestions[prediction]}\nConfidence: {confidence:.2f}%"
64
-
65
- # Gradio Interface
66
- interface = gr.Interface(
67
- fn=model_prediction,
68
- inputs=[
69
- gr.Dropdown(choices=df['equipment_id'].tolist(), label="Select Equipment"),
70
- gr.Number(label="Usage Hours"),
71
- gr.Number(label="Idle Hours"),
72
- gr.Number(label="Movement Frequency"),
73
- gr.Number(label="Cost per Hour")
74
- ],
75
- outputs="text"
76
- )
77
 
78
- # Launch Gradio interface
79
- interface.launch()
 
1
  import gradio as gr
2
  import pandas as pd
3
  import numpy as np
 
 
 
4
  import matplotlib.pyplot as plt
5
+ import seaborn as sns
6
+ from sklearn.linear_model import LogisticRegression
7
+ from sklearn.preprocessing import StandardScaler
8
+ import io
9
+ import base64
10
 
11
+ # Load equipment data
12
+ def load_data():
13
+ df = pd.read_csv("equipment_data.csv")
14
+ return df
 
 
 
 
 
 
 
15
 
16
+ # Mock AI model for suggestions
17
+ def predict_suggestions(df):
18
+ X = df[["Usage_Hours__c", "Idle_Hours__c", "Utilization_Score__c"]].fillna(0)
19
+ scaler = StandardScaler()
20
+ X_scaled = scaler.fit_transform(X)
21
+
22
+ suggestions = ["Move", "Pause Rent", "Repair", "Replace"]
23
+ y = np.random.choice(suggestions, size=len(df)) # Mock model
24
+
25
+ model = LogisticRegression()
26
+ model.fit(X_scaled, y)
27
 
28
+ probs = model.predict_proba(X_scaled)
29
+ df["AI_Suggestion__c"] = model.predict(X_scaled)
30
+ df["Suggestion_Confidence__c"] = probs.max(axis=1) * 100
31
+ return df
32
+
33
+ # Generate equipment cards
34
+ def generate_cards(df, project_filter, suggestion_filter):
35
+ filtered_df = df
36
+ if project_filter and project_filter != "All":
37
+ filtered_df = filtered_df[filtered_df["Project_ID__c"] == project_filter]
38
+ if suggestion_filter and suggestion_filter != "All":
39
+ filtered_df = filtered_df[filtered_df["AI_Suggestion__c"] == suggestion_filter]
40
 
41
+ cards = ""
42
+ for _, row in filtered_df.iterrows():
43
+ card = f"""
44
+ **Equipment ID**: {row['Equipment_ID__c']}
45
+ **Project**: {row['Project_ID__c']}
46
+ **Usage Hours**: {row['Usage_Hours__c']}
47
+ **Idle Hours**: {row['Idle_Hours__c']}
48
+ **Utilization Score**: {row['Utilization_Score__c']}%
49
+ **AI Suggestion**: {row['AI_Suggestion__c']} (Confidence: {row['Suggestion_Confidence__c']:.2f}%)
50
+ **Last Maintenance**: {row['Last_Maintenance__c']}
51
+ """
52
+ cards += card + "\n---\n"
53
+ return cards if cards else "No equipment matches the filters."
54
 
55
+ # Generate heatmap
56
+ def generate_heatmap(df):
57
+ dates = pd.date_range(start="2025-05-01", end="2025-05-07", freq="D")
58
+ equipment = df["Equipment_ID__c"].unique()
59
+ heatmap_data = np.random.rand(len(equipment), len(dates)) * 100 # Mock data
60
+ plt.figure(figsize=(10, 6))
61
+ sns.heatmap(heatmap_data, xticklabels=dates.strftime("%Y-%m-%d"), yticklabels=equipment, cmap="YlGnBu")
62
+ plt.title("Weekly Utilization Heatmap")
63
 
64
+ buffer = io.BytesIO()
65
+ plt.savefig(buffer, format="png")
66
+ buffer.seek(0)
67
+ img_str = base64.b64encode(buffer.getvalue()).decode()
68
+ plt.close()
69
+ return f"![Heatmap](data:image/png;base64,{img_str})"
70
 
71
+ # Export data as CSV
72
+ def export_csv(df):
73
+ return df.to_csv(index=False)
 
74
 
75
+ # Main Gradio interface
76
+ def main(project_filter="All", suggestion_filter="All"):
77
+ df = load_data()
78
+ df = predict_suggestions(df)
79
+ cards = generate_cards(df, project_filter, suggestion_filter)
80
+ heatmap = generate_heatmap(df)
81
+ csv_data = export_csv(df)
82
+ return cards, heatmap, csv_data
83
+
84
+ # Gradio interface
85
+ with gr.Blocks() as demo:
86
+ gr.Markdown("# Equipment Utilization Dashboard")
87
 
88
+ with gr.Row():
89
+ project_filter = gr.Dropdown(choices=["All", "PRJ001", "PRJ002"], label="Filter by Project")
90
+ suggestion_filter = gr.Dropdown(choices=["All", "Move", "Pause Rent", "Repair", "Replace"], label="Filter by Suggestion")
 
91
 
92
+ with gr.Row():
93
+ cards_output = gr.Markdown(label="Equipment Cards")
94
 
95
+ with gr.Row():
96
+ heatmap_output = gr.Markdown(label="Utilization Heatmap")
97
 
98
+ with gr.Row():
99
+ export_button = gr.File(label="Download CSV", value=lambda: export_csv(load_data()))
100
 
101
+ project_filter.change(main, inputs=[project_filter, suggestion_filter], outputs=[cards_output, heatmap_output, export_button])
102
+ suggestion_filter.change(main, inputs=[project_filter, suggestion_filter], outputs=[cards_output, heatmap_output, export_button])
 
 
 
 
 
 
 
 
 
 
 
 
 
103
 
104
+ if __name__ == "__main__":
105
+ demo.launch()