File size: 3,379 Bytes
4109702
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import pandas as pd
import pickle  # Import the pickle module

app = FastAPI()

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # You can replace "*" with your frontend's URL
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


df_1 = pd.read_csv("first_telc.csv")


# Define the input data model
class InputData(BaseModel):
    SeniorCitizen: int
    MonthlyCharges: float
    TotalCharges: float
    gender: str
    Partner: str
    Dependents: str
    PhoneService: str
    MultipleLines: str
    InternetService: str
    OnlineSecurity: str
    OnlineBackup: str
    DeviceProtection: str
    TechSupport: str
    StreamingTV: str
    StreamingMovies: str
    Contract: str
    PaperlessBilling: str
    PaymentMethod: str
    tenure: int


model = pickle.load(open("model.sav", "rb"))


# Endpoint to make predictions
@app.post("/predict")
async def predict_churn(data: InputData):
    # Transform the input data into a DataFrame
    data_list = [[data.SeniorCitizen, data.MonthlyCharges, data.TotalCharges, data.gender, data.Partner,
                  data.Dependents, data.PhoneService, data.MultipleLines, data.InternetService,
                  data.OnlineSecurity, data.OnlineBackup, data.DeviceProtection, data.TechSupport,
                  data.StreamingTV, data.StreamingMovies, data.Contract, data.PaperlessBilling,
                  data.PaymentMethod, data.tenure]]

    new_df = pd.DataFrame(data_list, columns=['SeniorCitizen', 'MonthlyCharges', 'TotalCharges', 'gender',
                                              'Partner', 'Dependents', 'PhoneService', 'MultipleLines',
                                              'InternetService',
                                              'OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport',
                                              'StreamingTV', 'StreamingMovies', 'Contract', 'PaperlessBilling',
                                              'PaymentMethod', 'tenure'])

    df_2 = pd.concat([df_1, new_df], ignore_index=True)

    # Group the tenure in bins of 12 months
    labels = ["{0} - {1}".format(i, i + 11) for i in range(1, 72, 12)]
    df_2['tenure_group'] = pd.cut(df_2.tenure.astype(int), range(1, 80, 12), right=False, labels=labels)

    # Drop column 'tenure'
    df_2.drop(columns=['tenure'], axis=1, inplace=True)

    new_df_dummies = pd.get_dummies(df_2[['gender', 'SeniorCitizen', 'Partner', 'Dependents', 'PhoneService',
                                          'MultipleLines', 'InternetService', 'OnlineSecurity', 'OnlineBackup',
                                          'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies',
                                          'Contract', 'PaperlessBilling', 'PaymentMethod', 'tenure_group']])

    # Make predictions
    prediction = model.predict(new_df_dummies.tail(1))
    probability = model.predict_proba(new_df_dummies.tail(1))[:, 1]

    if prediction == 1:
        result = {"output_1": "This customer is likely to be churned!!", "output_2": f"Confidence: {probability * 100}"}
    else:
        result = {"output_1": "This customer is likely to continue!!", "output_2": f"Confidence: {probability * 100}"}

    return result