EstebanDC commited on
Commit
58c4b7a
1 Parent(s): 32ff1d6

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -0
app.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ import numpy as np
3
+ import gradio as gr
4
+ import sklearn
5
+ import pandas as pd
6
+ from sklearn.model_selection import train_test_split
7
+ from sklearn.experimental import enable_iterative_imputer
8
+ from sklearn.impute import IterativeImputer
9
+ from sklearn.model_selection import KFold
10
+ from sklearn.ensemble import ExtraTreesRegressor
11
+ from sklearn.ensemble import GradientBoostingRegressor
12
+ from sklearn.ensemble import StackingRegressor
13
+ from sklearn.ensemble import RandomForestRegressor
14
+
15
+ filename = 'DatabaseFinal0.csv'
16
+ names0 = ['LL',"IP" ,"e0",'w', 'cc']
17
+ dataset=pd.read_csv(filename, names=names0)
18
+
19
+ y = dataset['cc']
20
+ X0 = dataset.drop('cc', axis=1)
21
+
22
+ impute_it = IterativeImputer()
23
+ X2=impute_it.fit_transform(X0)
24
+ X = pd.DataFrame(X2, columns=['LL',"IP" ,"e0",'w'])
25
+
26
+
27
+ validation_size = 0.2
28
+ seed = 10
29
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=validation_size, random_state=seed)
30
+
31
+
32
+ model1 =ExtraTreesRegressor(max_depth=15, max_features=None, n_estimators=500,random_state=100,min_samples_split=12)
33
+ model1= model1.fit(X_train, y_train)
34
+
35
+ model2 =GradientBoostingRegressor(learning_rate=0.007, max_depth=2,n_estimators=1650, random_state=100,min_samples_split=9,max_features= 'log2')
36
+ model2= model2.fit(X_train, y_train)
37
+
38
+ model3 =RandomForestRegressor(n_estimators= 1000,min_samples_split= 11, min_samples_leaf= 1,
39
+ max_features= "auto",max_depth= 6,bootstrap= True,random_state=100)
40
+ model3= model3.fit(X_train, y_train)
41
+
42
+
43
+ level1 = list()
44
+ level1.append(('ET', model1))
45
+ level1.append(('GBR', model2))
46
+ level2 = model3
47
+ cv = KFold(n_splits=10, random_state=100,shuffle=True)
48
+ modelodef = StackingRegressor(estimators=level1, final_estimator=level2, cv=cv, passthrough=True)
49
+ modelodef.fit(X_train, y_train)
50
+
51
+ pickle.dump(modelodef, open("modelodef.pkl", "wb"))
52
+
53
+
54
+ def cc(LL,IP,e0,w):
55
+ modelodef = pickle.load(open("modelodef.pkl", "rb"))
56
+ prediction0 = modelodef.predict([[LL,IP,e0,w]])
57
+ prediction = np.round(prediction0,3)
58
+ return prediction
59
+
60
+ title = "A SUPER-LEARNER MACHINE LEARNING MODEL FOR A GLOBAL PREDICTION OF COMPRESSION INDEX IN CLAYS"
61
+ description = "This app corresponds to the research paper: A super-learner machine learning model for a global prediction of compression index in clays"
62
+ article = """
63
+ Notes:
64
+ - Click submit/enviar button to obtain the Compression index prediction
65
+ - Click clear/limpiar button to refresh text
66
+ - Please note the application ranges of the variables in the above-referenced paper (in publication process). Outside these ranges, the predictions may not be reliable
67
+ - As a decimal separator you can use either a point or a comma
68
+ """
69
+
70
+ app = gr.Interface(
71
+ cc,
72
+ inputs=[
73
+ gr.Number(value=1, label="Liquid limit (%)"),
74
+ gr.Number(value=1, label="Plasticity index (%)"),
75
+ gr.Number(value=1, label="Initial void ratio"),
76
+ gr.Number(value=1, label="Natural water content (%)"),
77
+
78
+ ],
79
+ outputs=[gr.Text(label="Compression index")],
80
+ title=title,
81
+ description=description,
82
+ article = article,
83
+ theme="dark-seafoam"
84
+ )
85
+
86
+
87
+ app.launch()