ikoghoemmanuell commited on
Commit
689a7a4
1 Parent(s): 146214e

Upload 4 files

Browse files
Files changed (4) hide show
  1. app/main.py +88 -0
  2. app/model.pkl +3 -0
  3. app/requirements.txt +5 -0
  4. app/scaler.pkl +3 -0
app/main.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from pydantic import BaseModel
3
+ import pickle
4
+ import pandas as pd
5
+ import numpy as np
6
+ import uvicorn
7
+ import os
8
+
9
+ # call the app
10
+ app = FastAPI(title="API")
11
+
12
+ # Load the model and scaler
13
+ def load_model_and_scaler():
14
+ with open("model.pkl", "rb") as f1, open("scaler.pkl", "rb") as f2:
15
+ return pickle.load(f1), pickle.load(f2)
16
+
17
+ model, scaler = load_model_and_scaler()
18
+
19
+ def predict(df, endpoint="simple"):
20
+ # Scaling
21
+ scaled_df = scaler.transform(df) # Scale the input data using a pre-defined scaler
22
+
23
+ # Prediction
24
+ prediction = model.predict_proba(scaled_df) # Make predictions using a pre-trained model
25
+
26
+ highest_proba = prediction.max(axis=1) # Get the highest probability for each prediction
27
+
28
+ # Assign predicted labels based on the highest probabilities
29
+ predicted_labels = ["Patient does not have sepsis" if i == 0 else "Patient has sepsis" for i in highest_proba]
30
+ print(f"Predicted labels: {predicted_labels}") # Print the predicted labels to the terminal
31
+ print(highest_proba) # Print the highest probabilities to the terminal
32
+
33
+ response = []
34
+ for label, proba in zip(predicted_labels, highest_proba):
35
+ # Create a response for each prediction with the predicted label and probability
36
+ output = {
37
+ "prediction": label,
38
+ "probability of prediction": str(round(proba * 100)) + '%' # Convert the probability to a percentage
39
+ }
40
+ response.append(output) # Add the response to the list of responses
41
+
42
+ return response # Return the list of responses
43
+
44
+
45
+ class Patient(BaseModel):
46
+ Blood_Work_R1: int
47
+ Blood_Pressure: int
48
+ Blood_Work_R3: int
49
+ BMI: float
50
+ Blood_Work_R4: float
51
+ Patient_age: int
52
+
53
+ class Patients(BaseModel):
54
+ all_patients: list[Patient]
55
+
56
+ @classmethod
57
+ def return_list_of_dict(cls, patients: "Patients"):
58
+ patient_list = []
59
+ for patient in patients.all_patients: #for each item in all_patients,
60
+ patient_dict = patient.dict() #convert to a dictionary
61
+ patient_list.append(patient_dict) #add it to the empty list called patient_list
62
+ return patient_list
63
+
64
+ # Endpoints
65
+ # Root Endpoint
66
+ @app.get("/")
67
+ def root():
68
+ return {"API": "This is an API for sepsis prediction."}
69
+
70
+ # Prediction endpoint
71
+ @app.post("/predict")
72
+ def predict_sepsis(patient: Patient):
73
+ # Make prediction
74
+ data = pd.DataFrame(patient.dict(), index=[0])
75
+ parsed = predict(df=data)
76
+ return {"output": parsed}
77
+
78
+ # Multiple Prediction Endpoint
79
+ @app.post("/predict_multiple")
80
+ def predict_sepsis_for_multiple_patients(patients: Patients):
81
+ """Make prediction with the passed data"""
82
+ data = pd.DataFrame(Patients.return_list_of_dict(patients))
83
+ parsed = predict(df=data, endpoint="multi")
84
+ return {"output": parsed}
85
+
86
+ if __name__ == "__main__":
87
+ uvicorn.run("main:app", reload=True)
88
+
app/model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e03b9e36a896ad1fdce4008c8f0bbaf734571a4a757197fadcb36821bc00f215
3
+ size 937816
app/requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ fastapi[all]
2
+ scikit-learn==1.2.1
3
+ numpy
4
+ pandas==1.3.1
5
+ pydantic
app/scaler.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb4a535138b890f96a1f33737c29b408c1f673a6ed4500882a3ed382f65126b
3
+ size 814