Zakia commited on
Commit
712671d
1 Parent(s): 50e0361

add a bunch of files and move .pkl file to models directory

Browse files
app.py CHANGED
@@ -8,7 +8,7 @@ from pycaret.classification import load_model, predict_model
8
 
9
 
10
  # load the trained model for predictions
11
- model = load_model('tuned_blend_specific_model_19112021')
12
 
13
 
14
  # define the function to call
 
8
 
9
 
10
  # load the trained model for predictions
11
+ model = load_model('models/tuned_blend_specific_model_19112021')
12
 
13
 
14
  # define the function to call
input/amputation_dataset.xlsx ADDED
Binary file (237 kB). View file
 
models/tuned_blend_specific_model_19112021.pkl ADDED
Binary file (4.88 MB). View file
 
notebooks/diarc.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
src/diarc.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """diarc.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1Jyccp5Aeml-7oZABbACY2VTE9iQJg9Pe
8
+
9
+ # Bismillahir Rahmaanir Raheem
10
+ # Almadadh Ya Gause RadiAllahu Ta'alah Anh - Ameen
11
+
12
+ # <font color=grey>DIabetes-related Amputation Risk Calculator (DIARC)</font>
13
+ <b>_by Zakia Salod_</b>
14
+ """
15
+
16
+ !pip install pycaret
17
+
18
+ from pycaret.utils import version
19
+ version()
20
+
21
+ from pycaret.utils import enable_colab
22
+ enable_colab()
23
+
24
+ import numpy as np # Linear algebra
25
+ import pandas as pd # Data processing, CSV file I/O (e.g. pd.read_csv)
26
+ import matplotlib.pyplot as plt # For graphical representations of the data
27
+ import seaborn as sns
28
+
29
+ # Just to make sure the results are reproducible
30
+ np.random.seed(1234)
31
+
32
+ dataset = pd.read_excel('amputation_dataset.xlsx')
33
+
34
+ print(dataset['AMPUTATION'].value_counts())
35
+
36
+ ax = sns.countplot(x="AMPUTATION", data=dataset)
37
+
38
+ # show the number of duplicate rows in this dataset
39
+ dataset.duplicated(keep='first').sum()
40
+
41
+ # remove the duplicate rows in this dataset
42
+ # only keep the first instance of the row
43
+ dataset = dataset.drop_duplicates(keep='first')
44
+
45
+ print(dataset['AMPUTATION'].value_counts())
46
+
47
+ ax = sns.countplot(x="AMPUTATION", data=dataset)
48
+
49
+ dataset.head()
50
+
51
+ # Under sample the dataset to handle the imbalance
52
+ # Shuffle the Dataset.
53
+ shuffled_dataset = dataset.sample(frac=1, random_state=4)
54
+
55
+ # Put all the amputation class in a separate dataset.
56
+ amputation_dataset = shuffled_dataset.loc[shuffled_dataset['AMPUTATION'] == 1]
57
+
58
+
59
+ #Randomly select 105 observations from the non-amputation (majority class)
60
+ non_amputation_dataset = shuffled_dataset.loc[shuffled_dataset['AMPUTATION'] == 0].sample(n=105,random_state=42)
61
+
62
+ # Concatenate both dataframes again
63
+ dataset = pd.concat([amputation_dataset, non_amputation_dataset])
64
+
65
+ print(dataset['AMPUTATION'].value_counts())
66
+
67
+ ax = sns.countplot(x="AMPUTATION", data=dataset)
68
+
69
+ dataset.to_excel('amputation_removed_duplicates_and_balanced.xlsx')
70
+
71
+ from pycaret.classification import *
72
+
73
+ clf = setup(data = dataset, target = 'AMPUTATION', session_id = 42)
74
+
75
+ # display the dataset (X_train)
76
+ get_config('X_train')
77
+ # converts age from numeric to float
78
+ # converts gender and diabetes_class (the two binary category variables) into label encoder conversion
79
+ # so, gender_f ---> with value 1 indicating FEMALE is TRUE and value 0 indicating FEMALE is FALSE (and instead, MALE)
80
+ # diabetes_class type 1 diabetes ---> value 1 indicates diabetes type 1 and value 0 means diabetes type 2
81
+ # then, one hot encoding is applied to the race column (each race is split into separate columns, with value 1 denoting TRUE for that race)
82
+
83
+ # display the dataset (y_train)
84
+ get_config('y_train')
85
+
86
+ best_model = compare_models(sort = 'AUC')
87
+
88
+ # BLEND MODELS, ALHUM
89
+ # create models for blending
90
+ nb = create_model('nb')
91
+ bagged_nb = ensemble_model(nb, method='Bagging')
92
+ lr = create_model('lr')
93
+ bagged_lr = ensemble_model(lr, method='Bagging')
94
+ lda = create_model('lda')
95
+ bagged_lda = ensemble_model(lda, method='Bagging')
96
+
97
+ rf = create_model('rf')
98
+ bagged_rf = ensemble_model(rf, method='Bagging')
99
+ ada = create_model('ada')
100
+ bagged_ada = ensemble_model(ada, method='Bagging')
101
+
102
+
103
+ blend_specific = blend_models(estimator_list = [bagged_nb, bagged_lr, bagged_lda, bagged_rf, bagged_ada])
104
+
105
+ # plot model
106
+ plot_model(blend_specific)
107
+
108
+ # tuning
109
+ tuned_blend_specific = tune_model(blend_specific)
110
+
111
+ evaluate_model(tuned_blend_specific)
112
+
113
+ tuned_blend_specific_predictions = predict_model(tuned_blend_specific)
114
+
115
+ # finalize model for deployment
116
+ final_tuned_blend_specific = finalize_model(tuned_blend_specific)
117
+
118
+ # save the model
119
+ # creates a .pkl file
120
+ save_model(tuned_blend_specific, "tuned_blend_specific_model_19112021", verbose=True)
121
+
122
+ # display the dataset (X_test)
123
+ get_config('X_test')
124
+
125
+ # display the dataset (y_test)
126
+ get_config('y_test')
127
+
128
+ dataset2 = pd.read_excel('amputation_removed_duplicates_and_balanced.xlsx')
129
+
130
+ !pip install pandas-profiling
131
+
132
+ from pandas_profiling import ProfileReport
133
+
134
+ profile = ProfileReport(dataset2, title="Pandas Profiling Report")
135
+
136
+ profile.to_file("amputation_removed_duplicates_and_balanced_report.html")