Upload 4 files
Browse files- GP.pickle +3 -0
- app.py +115 -0
- params.npz +3 -0
- requirements.txt +5 -0
GP.pickle
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c7cc76670c04424d803b2a60ab5f250ae247aa0ac1da406d7ea5cf182e5bc87e
|
3 |
+
size 21561756
|
app.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# %%
|
2 |
+
import gradio as gr
|
3 |
+
from sklearn import preprocessing
|
4 |
+
import os
|
5 |
+
import numpy as np
|
6 |
+
import pickle
|
7 |
+
import pandas as pd
|
8 |
+
import scipy.io as scio
|
9 |
+
# %%
|
10 |
+
|
11 |
+
|
12 |
+
def tansig(x):
|
13 |
+
return (2/(1+np.exp(-2*x)))-1
|
14 |
+
|
15 |
+
|
16 |
+
def to_proba(output):
|
17 |
+
output = output-np.min(output)
|
18 |
+
ret = (output-np.min(output))/(np.max(output)-np.min(output))
|
19 |
+
return ret
|
20 |
+
|
21 |
+
|
22 |
+
def softmax(x):
|
23 |
+
x = x-np.min(x)
|
24 |
+
ex = np.exp(x)
|
25 |
+
return ex/ex.sum()
|
26 |
+
|
27 |
+
|
28 |
+
def load_data(path):
|
29 |
+
|
30 |
+
# df = pd.read_csv(path)
|
31 |
+
# return df.values
|
32 |
+
data = scio.loadmat(path)
|
33 |
+
traindata = np.double(data['trainx'])
|
34 |
+
# trainlabel = np.double(data['trainy'])
|
35 |
+
|
36 |
+
# testdata = np.double(data['testx'])
|
37 |
+
# testlabel = np.double(data['testy'])
|
38 |
+
return traindata[0]
|
39 |
+
|
40 |
+
|
41 |
+
# %%
|
42 |
+
labels = ['No MetS', 'MetS']
|
43 |
+
|
44 |
+
|
45 |
+
def predict(NCRF):
|
46 |
+
|
47 |
+
params = np.load('params.npz')
|
48 |
+
N1 = params['N1']
|
49 |
+
N2 = params['N2']
|
50 |
+
Beta1OfEachWindow = params['Beta1OfEachWindow']
|
51 |
+
ymax = params['ymax']
|
52 |
+
ymin = params['ymin']
|
53 |
+
minOfEachWindow = params['minOfEachWindow']
|
54 |
+
distOfMaxAndMin = params['distOfMaxAndMin']
|
55 |
+
weightOfEnhanceLayer = params['weightOfEnhanceLayer']
|
56 |
+
parameterOfShrink = params['parameterOfShrink']
|
57 |
+
OutputWeight = params['OutputWeight']
|
58 |
+
|
59 |
+
# test_x = load_data(file)
|
60 |
+
test_x = NCRF.values
|
61 |
+
# print(test_x.shape)
|
62 |
+
with open('GP.pickle', 'rb') as f:
|
63 |
+
gp = pickle.load(f)
|
64 |
+
|
65 |
+
gp_features = gp.transform(test_x)
|
66 |
+
test_x = np.hstack((test_x, gp_features))
|
67 |
+
# print(test_x.shape)
|
68 |
+
test_x = preprocessing.scale(test_x, axis=1)
|
69 |
+
# test_x = np.expand_dims(test_x, axis=0)
|
70 |
+
|
71 |
+
FeatureOfInputDataWithBiasTest = np.hstack(
|
72 |
+
[test_x, 0.1 * np.ones((test_x.shape[0], 1))])
|
73 |
+
OutputOfFeatureMappingLayerTest = np.zeros(
|
74 |
+
[test_x.shape[0], N2 * N1])
|
75 |
+
|
76 |
+
for i in range(N2):
|
77 |
+
outputOfEachWindowTest = np.dot(
|
78 |
+
FeatureOfInputDataWithBiasTest, Beta1OfEachWindow[i])
|
79 |
+
OutputOfFeatureMappingLayerTest[:, N1*i: N1*(i+1)] = (ymax - ymin)*(
|
80 |
+
outputOfEachWindowTest - minOfEachWindow[i]) / distOfMaxAndMin[i] - ymin
|
81 |
+
|
82 |
+
InputOfEnhanceLayerWithBiasTest = np.hstack(
|
83 |
+
[OutputOfFeatureMappingLayerTest, 0.1 * np.ones((OutputOfFeatureMappingLayerTest.shape[0], 1))])
|
84 |
+
tempOfOutputOfEnhanceLayerTest = np.dot(
|
85 |
+
InputOfEnhanceLayerWithBiasTest, weightOfEnhanceLayer)
|
86 |
+
|
87 |
+
OutputOfEnhanceLayerTest = tansig(
|
88 |
+
tempOfOutputOfEnhanceLayerTest * parameterOfShrink)
|
89 |
+
|
90 |
+
InputOfOutputLayerTest = np.hstack(
|
91 |
+
[OutputOfFeatureMappingLayerTest, OutputOfEnhanceLayerTest])
|
92 |
+
|
93 |
+
OutputOfTest = np.dot(InputOfOutputLayerTest, OutputWeight)
|
94 |
+
# print(OutputOfTest.shape)
|
95 |
+
OutputOfTest = np.squeeze(OutputOfTest)
|
96 |
+
proba = OutputOfTest # to_proba(OutputOfTest)
|
97 |
+
confidences = {labels[i]: float(proba[i]) for i in range(len(labels))}
|
98 |
+
# np.argmax(OutputOfTest) # np.array(OutputOfTest)
|
99 |
+
return confidences # int(np.argmax(OutputOfTest)) #
|
100 |
+
|
101 |
+
|
102 |
+
# %%
|
103 |
+
# headers = ["性别", "年龄", "腰臀比", "体脂率", "夜尿次数", "小便性质",
|
104 |
+
# "舌色", "苔色", "眼", "证型-淤血", "渴饮", "夏季平均每日空调时常", "吸烟", "饮食肥腻", "BMI"]
|
105 |
+
headers = ["Gender", "Age", "Waist-Hip Ratio", "Body fat rate", "Nocturia frequency", "Nature of pee",
|
106 |
+
"Tongue color", "Fur color", "Eye anomaly", "Congestion", "Thirsty", "Daily air conditioner usage time", "Smoking", "Fatty diet", "BMI"]
|
107 |
+
iface = gr.Interface(fn=predict,
|
108 |
+
inputs=gr.DataFrame(headers=headers,
|
109 |
+
row_count=(1, "fixed"),
|
110 |
+
col_count=(15, "fixed"), wrap=True),
|
111 |
+
outputs=gr.Label()
|
112 |
+
)
|
113 |
+
iface.launch()
|
114 |
+
|
115 |
+
# %%
|
params.npz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:77d84dab7669e2060f5b3466cf0833ae7e037ee7ae45d6a025e10ca0fb137a7e
|
3 |
+
size 1920294
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio==3.21.0
|
2 |
+
numpy==1.21.6
|
3 |
+
pandas==1.3.5
|
4 |
+
scikit_learn==1.2.2
|
5 |
+
scipy==1.7.3
|