File size: 3,065 Bytes
0a27844
 
 
 
 
 
 
 
 
 
 
 
794eeaa
0a27844
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
# pip install scikit-learn

#
import gradio as gr
import pandas as pd
import pickle
# from sklearn.pipeline import Pipeline
# from sklearn.ensemble import RandomForestClassifier
# from sklearn.preprocessing import StandardScaler, LabelEncoder
# from sklearn.impute import SimpleImputer
# from imblearn.over_sampling import RandomOverSampler
# from sklearn.preprocessing import FunctionTransformer
# import joblib

xtrain= pd.read_csv('Xtrains.csv')
ytrain=pd.read_csv('Ytrains.csv')

# Loading Models
with open("model.pkl", "rb") as f:
	clf  = pickle.load(f)

clf.fit(xtrain, ytrain.values.ravel())

tenure_labels = {
    0: "3-6 months",
    1: "6-9 months",
    2: "9-12 months",
    3: "12-15 months",
    4: "15-18 months",
    5: "18-21 months",
    6: "21-24 months",
    7: "> 24 months"
}

# Reverse the mapping for predictions
tenure_values = {v: k for k, v in tenure_labels.items()}

def predict(tenure, montant, freq_rech, revenue, arpu, freq, data_vol, on_net, orange, tigo,  freq_top_pack, regularity):

    tenure_value = tenure_values[tenure]



    input_df = pd.DataFrame({
        'TENURE': [tenure_value],
        'MONTANT': [montant],
        'FREQUENCE_RECH': [freq_rech],
        'REVENUE': [revenue],
        'ARPU_SEGMENT': [arpu],
        'FREQUENCE': [freq],
        'DATA_VOLUME': [data_vol],
        'ON_NET': [on_net],
        'ORANGE': [orange],
        'TIGO': [tigo],
        'REGULARITY':[regularity],
        'FREQ_TOP_PACK': [freq_top_pack]
    })

    prediction = clf.predict(input_df)



    churn_label = "Customer will churn" if prediction == 1 else "Customer will not churn"
    result = {
        'text': churn_label,  # Use the churn label as 'text'
        'entities': []  # You can leave 'entities' as an empty list if no entities need highlighting
    }
    print(result)
    return result



# Create a dropdown menu with labels
tenure_dropdown = gr.inputs.Dropdown(list(tenure_labels.values()),  label="TENURE")

iface = gr.Interface(
    fn=predict,
    inputs=[
        tenure_dropdown,  # Dropdown instead of slider
        #gr.inputs.Slider(minimum=1, maximum=7, label="TENURE"),
        gr.inputs.Slider(minimum=20, maximum=470000, label="MONTANT"),
        gr.inputs.Slider(minimum=1, maximum=131, label="FREQUENCE_RECH"),
        gr.inputs.Slider(minimum=1, maximum=530000, label="REVENUE"),
        gr.inputs.Slider(minimum=0, maximum=2453, label="ARPU_SEGMENT"),
        gr.inputs.Slider(minimum=1, maximum=91, label="FREQUENCE"),
        gr.inputs.Slider(minimum=1, maximum=1702309, label="DATA_VOLUME"),
        gr.inputs.Slider(minimum=0, maximum=51000, label="ON_NET"),
        gr.inputs.Slider(minimum=0, maximum=12040, label="ORANGE"),
        gr.inputs.Slider(minimum=0, maximum=4174, label="TIGO"),
        gr.inputs.Slider(minimum=0, maximum=624, label="FREQ_TOP_PACK"),
         gr.inputs.Slider(minimum=0, maximum=62, label="REGULARITY")
    ],

    outputs=output,
    title="Team Paris Customer Churn Prediction App",
    description="Let's Get Started With Some Predictions!"
)

iface.launch()