NewtonKimathi commited on
Commit
59e08fd
β€’
1 Parent(s): 3065522

Customer Churn App

Browse files
Files changed (4) hide show
  1. app.py +94 -0
  2. export/App_toolkit.pkl +3 -0
  3. export/requirements.txt +7 -0
  4. xgb_model.json +0 -0
app.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # IMPORT LIBRARIES
2
+ import gradio as gr
3
+ import numpy as np
4
+ import pandas as pd
5
+ import pickle
6
+ import xgboost as xgb
7
+ from xgboost import XGBClassifier
8
+
9
+
10
+
11
+ # Function to load ML toolkit
12
+ def load_ml_toolkit(file_path):
13
+ with open(file_path, "rb") as file:
14
+ loaded_toolkit = pickle.load(file)
15
+ return loaded_toolkit
16
+
17
+
18
+ # Importing the toolkit
19
+ loaded_toolkit = load_ml_toolkit(r"export/App_toolkit.pkl")
20
+
21
+ encoder = loaded_toolkit["encoder"]
22
+ scaler = loaded_toolkit["scaler"]
23
+
24
+ # Import the model
25
+ model = XGBClassifier()
26
+ model.load_model(r"xgb_model.json")
27
+
28
+
29
+ #Colmuns to work with
30
+ input_cols = ["tenure", "montant", "frequence_rech", "arpu_segment", "frequence", "data_volume", "regularity", "freq_top_pack"]
31
+ columns_to_scale = ["montant", "frequence_rech", "arpu_segment", "frequence", "data_volume", "regularity", "freq_top_pack"]
32
+ categoricals = ["tenure"]
33
+
34
+
35
+
36
+ # Function to process inputs and return prediction
37
+ def process_and_predict(*args, encoder=encoder, scaler=scaler, model=model):
38
+
39
+ # Convert inputs into a DataFrame
40
+ input_data = pd.DataFrame([args], columns=input_cols)
41
+
42
+ # Encode the categorical column
43
+ input_data["tenure"] = encoder.transform(input_data["tenure"])
44
+
45
+ # Scale the numeric columns
46
+ input_data[columns_to_scale] = scaler.transform(input_data[columns_to_scale])
47
+
48
+ # Making the prediction
49
+ model_output = model.predict(input_data)
50
+ return {"Prediction: CHURN": float(model_output[0]), "Prediction: STAY": 1-float(model_output[0])}
51
+
52
+
53
+ #App Interface
54
+ with gr.Blocks() as turn_on_the_gradio:
55
+ gr.Markdown("# πŸ“ž EXPRESSO TELECOM CUSTOMER CHURN ☎️")
56
+ gr.Markdown('''
57
+
58
+ ## WELCOME CHERISHED USERπŸ‘‹
59
+
60
+ ### PLEASE GO AHEAD AND MAKE A PREDICTION πŸ™‚''')
61
+
62
+ # Receiving Inputs
63
+
64
+ gr.Markdown("**SECTION ONE**")
65
+ gr.Markdown("**CUSTOMER NETWORK ACTTIVITIES**")
66
+ with gr.Row():
67
+ montant = gr.Slider(label="Top-up amount", minimum=20, step=1, interactive=True, value=1, maximum= 500000)
68
+ data_volume = gr.Slider(label="Number of connections", minimum=0, step=1, interactive=True, value=1, maximum= 2000000)
69
+
70
+
71
+ with gr.Row():
72
+ frequence_rech = gr.Slider(label="Recharge Frequency", minimum=1, step=1, interactive=True, value=1, maximum=220)
73
+ freq_top_pack = gr.Slider(label="Top Package Activation Frequency", minimum=1, step=1, interactive=True, value=1, maximum=1050)
74
+ regularity = gr.Slider(label="Regularity (out of 90 days)", minimum=1, step=1, interactive=True, value=1, maximum=90)
75
+ tenure = gr.Dropdown(label="Tenure (time on the network)", choices=["D 3-6 month", "E 6-9 month", "F 9-12 month", "G 12-15 month", "H 15-18 month", "I 18-21 month", "J 21-24 month", "K > 24 month"], value="K > 24 month")
76
+
77
+
78
+ gr.Markdown("**SECTION 2**")
79
+ gr.Markdown("**CUSTOMER INCOME DETAILS**")
80
+ with gr.Row():
81
+ arpu_segment = gr.Slider(label="Income over the last 90 days", step=1, maximum=287000, interactive=True)
82
+ frequence = gr.Slider(label="Number of times the customer has made an income", step=1, minimum=1, maximum=91, interactive=True)
83
+
84
+ # Output Prediction
85
+ output = gr.Label("...")
86
+ submit_button = gr.Button("Submit")
87
+
88
+
89
+
90
+ submit_button.click(fn = process_and_predict,
91
+ outputs = output,
92
+ inputs=[tenure, montant, frequence_rech, arpu_segment, frequence, data_volume, regularity, freq_top_pack])
93
+
94
+ turn_on_the_gradio.launch(inbrowser= True)
export/App_toolkit.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:501d2fdba2a9b4e73eb95f028a2bef643a6ca8c4c1b8bd190fc22378632a0a81
3
+ size 362950
export/requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ requests==2.30.0
2
+ numpy==1.23.5
3
+ pandas==1.5.0
4
+ gradio
5
+ seaborn==0.12.2
6
+ lightgbm==3.3.5
7
+ xgboost==1.7.5
xgb_model.json ADDED
The diff for this file is too large to render. See raw diff