Added all my project file and deployed my App
Browse files- Dockerfile.txt +19 -0
- main.py +86 -0
- model.pkl +3 -0
- requirements.txt +5 -0
- scaler.pkl +3 -0
Dockerfile.txt
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use the official Python image as the base image
|
2 |
+
FROM python:3.9
|
3 |
+
|
4 |
+
# Set the working directory in the container
|
5 |
+
WORKDIR /code
|
6 |
+
|
7 |
+
# Copy requirements.txt file
|
8 |
+
COPY requirements.txt .
|
9 |
+
|
10 |
+
# Install the Python dependencies
|
11 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
12 |
+
|
13 |
+
# copy all the files in the app folder
|
14 |
+
COPY app/ .
|
15 |
+
|
16 |
+
EXPOSE 7860
|
17 |
+
|
18 |
+
# Start the FastAPI app with Uvicorn when the container starts
|
19 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
main.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI
|
2 |
+
from pydantic import BaseModel
|
3 |
+
import pickle
|
4 |
+
import pandas as pd
|
5 |
+
import numpy as np
|
6 |
+
import uvicorn
|
7 |
+
|
8 |
+
# call the app
|
9 |
+
app = FastAPI(title="API")
|
10 |
+
|
11 |
+
# Load the model and scaler
|
12 |
+
def load_model_and_scaler():
|
13 |
+
with open("model.pkl", "rb") as f1, open("scaler.pkl", "rb") as f2:
|
14 |
+
return pickle.load(f1), pickle.load(f2)
|
15 |
+
|
16 |
+
model, scaler = load_model_and_scaler()
|
17 |
+
|
18 |
+
def predict(df, endpoint="simple"):
|
19 |
+
# Scaling
|
20 |
+
scaled_df = scaler.transform(df) # Scale the input data using a pre-defined scaler
|
21 |
+
|
22 |
+
# Prediction
|
23 |
+
prediction = model.predict_proba(scaled_df) # Make predictions using a pre-trained model
|
24 |
+
|
25 |
+
highest_proba = prediction.max(axis=1) # Get the highest probability for each prediction
|
26 |
+
|
27 |
+
# Assign predicted labels based on the highest probabilities
|
28 |
+
predicted_labels = ["Patient does not have sepsis" if i == 0 else "Patient has sepsis" for i in highest_proba]
|
29 |
+
print(f"Predicted labels: {predicted_labels}") # Print the predicted labels to the terminal
|
30 |
+
print(highest_proba) # Print the highest probabilities to the terminal
|
31 |
+
|
32 |
+
response = []
|
33 |
+
for label, proba in zip(predicted_labels, highest_proba):
|
34 |
+
# Create a response for each prediction with the predicted label and probability
|
35 |
+
output = {
|
36 |
+
"prediction": label,
|
37 |
+
"probability of prediction": str(round(proba * 100)) + '%' # Convert the probability to a percentage
|
38 |
+
}
|
39 |
+
response.append(output) # Add the response to the list of responses
|
40 |
+
|
41 |
+
return response # Return the list of responses
|
42 |
+
|
43 |
+
|
44 |
+
class Patient(BaseModel):
|
45 |
+
Blood_Work_R1: int
|
46 |
+
Blood_Pressure: int
|
47 |
+
Blood_Work_R3: int
|
48 |
+
BMI: float
|
49 |
+
Blood_Work_R4: float
|
50 |
+
Patient_age: int
|
51 |
+
|
52 |
+
class Patients(BaseModel):
|
53 |
+
all_patients: list[Patient]
|
54 |
+
|
55 |
+
@classmethod
|
56 |
+
def return_list_of_dict(cls, patients: "Patients"):
|
57 |
+
patient_list = []
|
58 |
+
for patient in patients.all_patients: #for each item in all_patients,
|
59 |
+
patient_dict = patient.dict() #convert to a dictionary
|
60 |
+
patient_list.append(patient_dict) #add it to the empty list called patient_list
|
61 |
+
return patient_list
|
62 |
+
|
63 |
+
# Endpoints
|
64 |
+
# Root Endpoint
|
65 |
+
@app.get("/")
|
66 |
+
def root():
|
67 |
+
return {"Welcome to the Sepsis Prediction API! This API provides endpoints for predicting sepsis based on patient data."}
|
68 |
+
|
69 |
+
# Prediction endpoint
|
70 |
+
@app.post("/predict")
|
71 |
+
def predict_sepsis(patient: Patient):
|
72 |
+
# Make prediction
|
73 |
+
data = pd.DataFrame(patient.dict(), index=[0])
|
74 |
+
parsed = predict(df=data)
|
75 |
+
return {"output": parsed}
|
76 |
+
|
77 |
+
# Multiple Prediction Endpoint
|
78 |
+
@app.post("/predict_multiple")
|
79 |
+
def predict_sepsis_for_multiple_patients(patients: Patients):
|
80 |
+
"""Make prediction with the passed data"""
|
81 |
+
data = pd.DataFrame(Patients.return_list_of_dict(patients))
|
82 |
+
parsed = predict(df=data, endpoint="multi")
|
83 |
+
return {"output": parsed}
|
84 |
+
|
85 |
+
if __name__ == "__main__":
|
86 |
+
uvicorn.run("main:app", reload=True)
|
model.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e03b9e36a896ad1fdce4008c8f0bbaf734571a4a757197fadcb36821bc00f215
|
3 |
+
size 937816
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi[all]
|
2 |
+
scikit-learn==1.2.1
|
3 |
+
numpy==1.21.5
|
4 |
+
pandas==1.3.1
|
5 |
+
pydantic
|
scaler.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aeb4a535138b890f96a1f33737c29b408c1f673a6ed4500882a3ed382f65126b
|
3 |
+
size 814
|