test_churn / main.py
guggellagupta's picture
Upload 6 files
4109702 verified
raw
history blame contribute delete
No virus
3.38 kB
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import pandas as pd
import pickle # Import the pickle module
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # You can replace "*" with your frontend's URL
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
df_1 = pd.read_csv("first_telc.csv")
# Define the input data model
class InputData(BaseModel):
SeniorCitizen: int
MonthlyCharges: float
TotalCharges: float
gender: str
Partner: str
Dependents: str
PhoneService: str
MultipleLines: str
InternetService: str
OnlineSecurity: str
OnlineBackup: str
DeviceProtection: str
TechSupport: str
StreamingTV: str
StreamingMovies: str
Contract: str
PaperlessBilling: str
PaymentMethod: str
tenure: int
model = pickle.load(open("model.sav", "rb"))
# Endpoint to make predictions
@app.post("/predict")
async def predict_churn(data: InputData):
# Transform the input data into a DataFrame
data_list = [[data.SeniorCitizen, data.MonthlyCharges, data.TotalCharges, data.gender, data.Partner,
data.Dependents, data.PhoneService, data.MultipleLines, data.InternetService,
data.OnlineSecurity, data.OnlineBackup, data.DeviceProtection, data.TechSupport,
data.StreamingTV, data.StreamingMovies, data.Contract, data.PaperlessBilling,
data.PaymentMethod, data.tenure]]
new_df = pd.DataFrame(data_list, columns=['SeniorCitizen', 'MonthlyCharges', 'TotalCharges', 'gender',
'Partner', 'Dependents', 'PhoneService', 'MultipleLines',
'InternetService',
'OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport',
'StreamingTV', 'StreamingMovies', 'Contract', 'PaperlessBilling',
'PaymentMethod', 'tenure'])
df_2 = pd.concat([df_1, new_df], ignore_index=True)
# Group the tenure in bins of 12 months
labels = ["{0} - {1}".format(i, i + 11) for i in range(1, 72, 12)]
df_2['tenure_group'] = pd.cut(df_2.tenure.astype(int), range(1, 80, 12), right=False, labels=labels)
# Drop column 'tenure'
df_2.drop(columns=['tenure'], axis=1, inplace=True)
new_df_dummies = pd.get_dummies(df_2[['gender', 'SeniorCitizen', 'Partner', 'Dependents', 'PhoneService',
'MultipleLines', 'InternetService', 'OnlineSecurity', 'OnlineBackup',
'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies',
'Contract', 'PaperlessBilling', 'PaymentMethod', 'tenure_group']])
# Make predictions
prediction = model.predict(new_df_dummies.tail(1))
probability = model.predict_proba(new_df_dummies.tail(1))[:, 1]
if prediction == 1:
result = {"output_1": "This customer is likely to be churned!!", "output_2": f"Confidence: {probability * 100}"}
else:
result = {"output_1": "This customer is likely to continue!!", "output_2": f"Confidence: {probability * 100}"}
return result