ellyothim commited on
Commit
c84fa5d
1 Parent(s): 8349224

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +202 -0
app.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Capstone Gradio App Embedding.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1zsT_lHGVHzG29XSb4tMF3UdA6glyWnRx
8
+ """
9
+
10
+ # from google.colab import drive
11
+ # drive.mount('/content/drive')
12
+
13
+ #!pip install gradio
14
+ #!pip install category_encoders
15
+
16
+ """### **DATA PREP**"""
17
+
18
+ import pandas as pd
19
+ import numpy as np
20
+ import gradio as gr
21
+ from sklearn.model_selection import train_test_split
22
+ from sklearn.model_selection import train_test_split, cross_val_score
23
+ from sklearn.metrics import accuracy_score, confusion_matrix, recall_score, precision_recall_curve, f1_score
24
+ from sklearn.preprocessing import StandardScaler
25
+ from sklearn.ensemble import ExtraTreesRegressor
26
+ from sklearn.preprocessing import LabelEncoder
27
+
28
+
29
+ train = pd.read_csv('C:\Users\user\Desktop\ChurnAnalysis_CapstonePred\Train.csv')
30
+
31
+ # use lambda function to remove \t make our model more robst
32
+ train = train.applymap(lambda x: x.replace("\t" , '' ) if isinstance (x , str) else x)
33
+ # " " , " "
34
+ train = train.applymap(lambda x: x.replace(" " , ' ' ) if isinstance (x , str) else x)
35
+
36
+ # drop what we don't need
37
+ train.drop(columns=['MRG', 'user_id', 'ZONE1', 'ZONE2', 'TOP_PACK'], inplace=True)
38
+
39
+
40
+ train["REGION"].fillna(method='ffill', inplace=True)
41
+ train["TENURE"].fillna(method='ffill', inplace=True)
42
+ train["MONTANT"].fillna(train["MONTANT"].median(), inplace=True)
43
+ train["FREQUENCE_RECH"].fillna(0, inplace=True)
44
+ train["REVENUE"].fillna(train["REVENUE"].median(), inplace=True)
45
+ train["ARPU_SEGMENT"].fillna(0, inplace=True)
46
+ train["FREQUENCE"].fillna(0, inplace=True)
47
+ train["DATA_VOLUME"].fillna(0, inplace=True)
48
+ train["ON_NET"].fillna(0, inplace=True)
49
+ train["ORANGE"].fillna(0, inplace=True)
50
+ train["TIGO"].fillna(0, inplace=True)
51
+ train["REGULARITY"].fillna(train["REGULARITY"].mean(), inplace=True)
52
+ train["FREQ_TOP_PACK"].fillna(train["FREQ_TOP_PACK"].mean(), inplace=True)
53
+
54
+ train['TENURE'] = train['TENURE'].str.replace('D 3-6 month', '1', regex=True)
55
+ train['TENURE'] = train['TENURE'].str.replace('E 6-9 month', '2', regex=True)
56
+ train['TENURE'] = train['TENURE'].str.replace('F 9-12 month', '3', regex=True)
57
+ train['TENURE'] = train['TENURE'].str.replace('J 21-24 month', '4', regex=True)
58
+ train['TENURE'] = train['TENURE'].str.replace('G 12-15 month', '5', regex=True)
59
+ train['TENURE'] = train['TENURE'].str.replace('H 15-18 month', '6', regex=True)
60
+ train['TENURE'] = train['TENURE'].str.replace('I 18-21 month', '7', regex=True)
61
+ train['TENURE'] = train['TENURE'].str.replace('K > 24 month', '8', regex=True)
62
+
63
+ # train['TENURE'].value_counts()
64
+
65
+ # Define a dictionary to map values
66
+ region_mapping = {
67
+ 'DAKAR': '1',
68
+ 'THIES': '2',
69
+ 'SAINT-LOUIS': '3',
70
+ 'LOUGA': '4',
71
+ 'KAOLACK': '5',
72
+ 'DIOURBEL': '6',
73
+ 'TAMBACOUNDA': '7',
74
+ 'KAFFRINE': '8',
75
+ 'KOLDA': '9',
76
+ 'FATICK': '10',
77
+ 'ZIGUINCHOR': '11',
78
+ 'SEDHIOU': '12',
79
+ 'KEDOUGOU': '13',
80
+ 'MATAM' : '14'
81
+ }
82
+
83
+ # Use the replace method to map values
84
+ train['REGION'] = train['REGION'].replace(region_mapping)
85
+
86
+ # Look at the new value_counts
87
+ # print(train['REGION'].value_counts())
88
+
89
+ """## **FITTING AND TRAINING**"""
90
+ """Select target and features"""
91
+
92
+ y = train['CHURN']
93
+ x = train.drop(columns='CHURN', axis=1)
94
+ X_train, X_test, y_train, y_test = train_test_split(x,y,test_size = 0.5,random_state=45 )# , stratify=y)
95
+
96
+ #Further split X_train and y_train into train and validation sets
97
+ X_train,X_val,y_train,y_val = train_test_split(X_train,y_train,test_size = 0.3, random_state=1 )#, stratify=y)
98
+
99
+ """### SCALE NUMERICAL COLUMNS"""
100
+ num_cols = ['MONTANT', 'FREQUENCE_RECH', 'REVENUE', 'ARPU_SEGMENT', 'FREQUENCE',
101
+ 'DATA_VOLUME', 'ON_NET', 'ORANGE', 'TIGO',
102
+ 'REGULARITY', 'FREQ_TOP_PACK']
103
+
104
+ scaler = StandardScaler()
105
+
106
+ X_train[num_cols] = scaler.fit_transform(X_train[num_cols])
107
+ X_val[num_cols] = scaler.fit_transform(X_val[num_cols])
108
+
109
+
110
+ # Create an instance
111
+ model = ExtraTreesRegressor(
112
+ n_estimators=100, # Number of trees in the forest
113
+ max_depth=10, # Maximum depth of the tree
114
+ random_state=42 # Random seed for reproducibility
115
+ )
116
+ # Train the model
117
+ MODEL = model.fit(X_train, y_train)
118
+
119
+ """## **Check if our model is working**"""
120
+ y_pred = MODEL.predict(X_test)
121
+
122
+
123
+ def classifier_1(result):
124
+ if result > 0.9:
125
+ return "Customer will churn"
126
+ else:
127
+ return "Customer will not churn"
128
+
129
+ def predict(REGION,TENURE , MONTANT , FREQUENCE_RECH, REVENUE , ARPU_SEGMENT ,FREQUENCE , DATA_VOLUME , ON_NET, ORANGE , TIGO, REGULARITY ,FREQ_TOP_PACK):
130
+ input_array = np.array([[REGION,TENURE , MONTANT , FREQUENCE_RECH, REVENUE , ARPU_SEGMENT ,FREQUENCE , DATA_VOLUME , ON_NET, ORANGE , TIGO, REGULARITY ,FREQ_TOP_PACK]])
131
+
132
+ pred = MODEL.predict(input_array)
133
+
134
+ output = classifier_1 (pred[0])
135
+
136
+ if output == "Customer will churn":
137
+ return [(0, output)]
138
+ else :
139
+ return [(1, output)]
140
+
141
+ #tenure = tenure_dropdown
142
+ REGION = gr.inputs.Slider(minimum=1, maximum=13, label='Location of each client')
143
+ TENURE = gr.inputs.Slider(minimum=1, maximum=8, label="Duration in network")
144
+ MONTANT = gr.inputs.Slider(minimum=22, maximum=470000, label="Top up amount")
145
+ FREQUENCE_RECH = gr.inputs.Slider(minimum=1, maximum=131, label="income frequency")
146
+ REVENUE = gr.inputs.Slider(minimum=1, maximum=532177, label="ARPU_SEGMENT")
147
+ ARPU_SEGMENT = gr.inputs.Slider(minimum=1, maximum= 177392, label="FREQUENCE")
148
+ FREQUENCE = gr.inputs.Slider(minimum=1, maximum=91, label="DATA_VOLUME")
149
+ DATA_VOLUME =gr.inputs.Slider(minimum=0, maximum=1702309, label="ON_NET")
150
+ ON_NET = gr.inputs.Slider(minimum=0, maximum=36687, label="ORANGE")
151
+ ORANGE = gr.inputs.Slider(minimum=0, maximum= 6721, label="TIGO")
152
+ TIGO = gr.inputs.Slider(minimum=0, maximum=4174, label="ZONE1")
153
+ REGULARITY = gr.inputs.Slider(minimum=1, maximum=62, label="ZONE2")
154
+ FREQ_TOP_PACK = gr.inputs.Slider(minimum=1, maximum= 592, label="REGULARITY")
155
+
156
+
157
+ op = gr.outputs.HighlightedText(color_map={"Customer will churn":"pink", "Customer will not churn":"yellow"})
158
+
159
+ gr.Interface(predict , inputs = [REGION,TENURE, MONTANT , FREQUENCE_RECH, REVENUE , ARPU_SEGMENT ,FREQUENCE , DATA_VOLUME , ON_NET, ORANGE ,TIGO, REGULARITY ,FREQ_TOP_PACK], outputs=op,
160
+ live = True).launch(debug=True)
161
+
162
+ # Input sliders
163
+ # REGION = gr.inputs.Slider(minimum=1, maximum=13, label='Location of each client')
164
+ # TENURE = gr.inputs.Slider(minimum=1, maximum=8, label="Duration in network")
165
+ # MONTANT = gr.inputs.Slider(minimum=22, maximum=470000, label="Top-up amount")
166
+ # FREQUENCE_RECH = gr.inputs.Slider(minimum=1, maximum=131, label="Income frequency")
167
+ # REVENUE = gr.inputs.Slider(minimum=1, maximum=532177, label="ARPU_SEGMENT")
168
+ # ARPU_SEGMENT = gr.inputs.Slider(minimum=1, maximum=177392, label="FREQUENCE")
169
+ # FREQUENCE = gr.inputs.Slider(minimum=1, maximum=91, label="DATA_VOLUME")
170
+ # DATA_VOLUME = gr.inputs.Slider(minimum=0, maximum=1702309, label="ON_NET")
171
+ # ON_NET = gr.inputs.Slider(minimum=0, maximum=36687, label="ORANGE")
172
+ # ORANGE = gr.inputs.Slider(minimum=0, maximum=6721, label="TIGO")
173
+ # TIGO = gr.inputs.Slider(minimum=0, maximum=4174, label="ZONE1")
174
+ # REGULARITY = gr.inputs.Slider(minimum=1, maximum=62, label="ZONE2")
175
+ # FREQ_TOP_PACK = gr.inputs.Slider(minimum=1, maximum=592, label="REGULARITY")
176
+
177
+ # # Output configuration
178
+ # op = gr.outputs.HighlightedText(color_map={"Customer will churn": "pink", "Customer will not churn": "yellow"})
179
+
180
+ # # Create and launch the interface
181
+ # gr.Interface(predict, inputs=[REGION, TENURE, MONTANT, FREQUENCE_RECH, REVENUE, ARPU_SEGMENT, FREQUENCE,
182
+ # DATA_VOLUME, ON_NET, ORANGE, TIGO, REGULARITY, FREQ_TOP_PACK], outputs=op,
183
+ # live=False).launch(debug=False)
184
+
185
+ # # Map numerical values to labels
186
+ # tenure_labels = {
187
+ # 0: "3-6 months",
188
+ # 1: "6-9 months",
189
+ # 2: "9-12 months",
190
+ # 3: "12-15 months",
191
+ # 4: "15-18 months",
192
+ # 5: "18-21 months",
193
+ # 6: "21-24 months",
194
+ # 7: "> 24 months"
195
+ # }
196
+
197
+ # # Reverse the mapping for predictions
198
+ # tenure_values = {v: k for k, v in tenure_labels.items()}
199
+
200
+ # # Create a dropdown menu with labels
201
+ # tenure_dropdown = gr.inputs.Dropdown(list(tenure_labels.values()), label="TENURE")
202
+