epalvarez's picture
First upload of the app.py without the scheduler
a86b5b2 verified
raw
history blame
3.22 kB
# +++
import os
import uuid
import joblib
import json
# IMPORTANT: I already installed the package "gradio" in my current Virtual Environment (VEnvDSDIL_gpu_Py3.12) as: pip install -q gradio_client
# Do NOT install "gradio_client" package again in Anaconda otherwise it will mess up the package.
import gradio as gr
import pandas as pd
# must install the package "huggingface_hub" first in the current python Virtual Environment, with pip, not with conda, as follows
# pip install huggingface_hub
# i.e., in the command line interface within the activated Virtual Environment:
# (VEnvDSDIL_gpu_Py3.12) epalvarez@DSDILmStation01:~ $ pip install huggingface_hub
from huggingface_hub import CommitScheduler
from pathlib import Path
log_file = Path("logs/") / f"data_{uuid.uuid4()}.json"
log_folder = log_file.parent
# Scheduler will log every 2 API calls:
# scheduler = CommitScheduler(
# repo_id="machine-failure-logs",
# repo_type="dataset",
# folder_path=log_folder,
# path_in_repo="data",
# every=2
# )
machine_failure_predictor = joblib.load('model_mf.joblib')
air_temperature_input = gr.Number(label='Air temperature [K]')
process_temperature_input = gr.Number(label='Process temperature [K]')
rotational_speed_input = gr.Number(label='Rotational speed [rpm]')
torque_input = gr.Number(label='Torque [Nm]')
tool_wear_input = gr.Number(label='Tool wear [min]')
type_input = gr.Dropdown(
['L', 'M', 'H'],
label='Type'
)
model_output = gr.Label(label="Machine failure")
def predict_machine_failure(air_temperature, process_temperature, rotational_speed, torque, tool_wear, type):
sample = {
'Air temperature [K]': air_temperature,
'Process temperature [K]': process_temperature,
'Rotational speed [rpm]': rotational_speed,
'Torque [Nm]': torque,
'Tool wear [min]': tool_wear,
'Type': type
}
data_point = pd.DataFrame([sample])
prediction = machine_failure_predictor.predict(data_point).tolist()
# Each time we get a prediction we will determine if we should log it to a hugging_face dataset according to the schedule definition outside this function
# with scheduler.lock:
# with log_file.open("a") as f:
# f.write(json.dumps(
# {
# 'Air temperature [K]': air_temperature,
# 'Process temperature [K]': process_temperature,
# 'Rotational speed [rpm]': rotational_speed,
# 'Torque [Nm]': torque,
# 'Tool wear [min]': tool_wear,
# 'Type': type,
# 'prediction': prediction[0]
# }
# ))
# f.write("\n")
return prediction[0]
demo = gr.Interface(
fn=predict_machine_failure,
inputs=[air_temperature_input, process_temperature_input, rotational_speed_input,
torque_input, tool_wear_input, type_input],
outputs=model_output,
title="Machine Failure Predictor",
description="This API allows you to predict the machine failure status of an equipment",
allow_flagging="auto",
concurrency_limit=8
)
demo.queue()
demo.launch(share=False)