File size: 5,114 Bytes
61a60e1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dff591c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
import gradio as gr
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path

def search_data(sku, group_name, date):
    
    
    if(sku == '' and group_name == '' and date != ''):
        filtered_df = initial_df[initial_df['Date'] == date]
    
    elif(sku == '' and group_name != '' and date == ''):
        filtered_df = initial_df[initial_df['Group Name'] == group_name]
        
    elif(sku != '' and group_name == '' and date == ''):
        filtered_df = initial_df[initial_df['Current Job'] == sku]
    
    elif(sku == '' and group_name != '' and date != ''):
        filtered_df = initial_df[(initial_df['Group Name'] == group_name) & (initial_df['Date'] == date)]
        
    elif(sku != '' and group_name == '' and date != ''):
        filtered_df = initial_df[(initial_df['Current Job'] == sku) & (initial_df['Date'] == date)]
        
    elif(sku != '' and group_name != '' and date == ''):
        filtered_df = initial_df[(initial_df['Current Job'] == sku) & (initial_df['Group Name'] == group_name)]
    
    else:
        filtered_df = initial_df[(initial_df['Current Job'] == sku) & (initial_df['Group Name'] == group_name) & (initial_df['Date'] == date)]
    
    
    records = len(filtered_df)
    
    compliant_records = len(filtered_df[(filtered_df['Units Per Hour'] >= filtered_df['Lower Target']) & (filtered_df['Units Per Hour'] <= filtered_df['Upper Target'])])
    
    compliance_percentage = round((compliant_records / records),2) * 100 if records != 0 else 0
    
    avg_units_per_hour = round(filtered_df['Units Per Hour'].mean(),2)
    
    stdev_units_per_hour = round(filtered_df['Units Per Hour'].std(),2)
    
    most_common_value = filtered_df['Description'].value_counts().idxmax()
    
    fig1 = plt.figure()
    plt.hist(filtered_df['Units Per Hour'], bins=20, color='skyblue', edgecolor='black')
    plt.xlabel('Units Per Hour')
    plt.ylabel('Frequency')
    plt.grid(True)
    
    return most_common_value, records, compliance_percentage, avg_units_per_hour, stdev_units_per_hour, fig1




def upload_file(filepath):
    global initial_df
    name = Path(filepath).name
    initial_df = prepare_df(filepath)
    initial_analysis_output = initial_analysis(initial_df)
    return initial_analysis_output

def initial_analysis(df):
    
    compliant_df = df[(df['Units Per Hour'] >= df['Lower Target']) & (df['Units Per Hour'] <= df['Upper Target'])]
    non_compliant_df = df[(df['Units Per Hour'] < df['Lower Target']) | (df['Units Per Hour'] > df['Upper Target'])]

    total_observations = len(df)
    pct_compliant = 100 * round(len(compliant_df)/total_observations,2)
    pct_non_compliant = 100 * round(len(non_compliant_df)/total_observations,2)
    
    return total_observations, pct_compliant, pct_non_compliant

def prepare_df(file_path):

    df = pd.read_excel(file_path)
    
    df['Current Job'] = df['Current Job'].astype(str)

    df['Start of Batch Date/Time'] = pd.to_datetime(df['Start of Batch Date/Time'])
    df['End of Batch Date/Time'] = pd.to_datetime(df['End of Batch Date/Time'])

    df['Batch Length (Hours)'] = (df['End of Batch Date/Time'] - df['Start of Batch Date/Time']).dt.total_seconds() / 3600

    df['Units Per Hour'] = df['Batch Count'] / df['Batch Length (Hours)']

    df.dropna(subset=['Optimal Cases Per Hour'], inplace=True)

    return df

        
with gr.Blocks() as tab_search_by_sku:
    with gr.Row():
        sku = gr.Textbox(label="SKU")
        group_name = gr.Textbox(label="Group Name")
        date = gr.Textbox(label="Date(yyyy-mm-dd)")
        
    with gr.Row():
        description = gr.Textbox(label="Description")
        
    with gr.Row():
        records = gr.Textbox(label="Records")
        compliance = gr.Textbox(label="Compliance %")
        avgunits = gr.Textbox(label="Avg Units/Hr")
        stddev = gr.Textbox(label="Std Dev Units/Hr")
        
    with gr.Row():
            graph = gr.Plot(label="Units Per Hour Distribution")
        
    with gr.Row():
        submit_btn = gr.Button('Submit')
        submit_btn.click(
            fn=search_data, 
            inputs=[sku, group_name, date], 
            outputs=[description, records, compliance, avgunits, stddev, graph]
        )

#         clear_btn = gr.Button('Clear')        
#         clear_btn.click(
#             fn=lambda: ("", "", "", "", "", plt.figure()),
#             inputs=[],
#             outputs=[description, records, compliance, avgunits, stddev, graph]
#         )
            
    


tab_files = gr.Interface(
    fn = upload_file,
    inputs = [
        gr.File(label="Upload Template File", file_types=[".xlsx"], file_count="single")
    ],
    outputs = [
        gr.Textbox(label="Total Observations", interactive=False), 
        gr.Textbox(label="Compliant Observations %", interactive=False),
        gr.Textbox(label="Non-Compliant Observations %", interactive=False)
    ],
    allow_flagging = "never"
    
)
    
demo = gr.TabbedInterface([tab_files, tab_search_by_sku], ["Upload Sheet", "Search"])

demo.launch(auth=("BrianA", "Brian@Muffintown1"))