|
|
|
import gradio as gr |
|
from sklearn import preprocessing |
|
import os |
|
import numpy as np |
|
import pickle |
|
import pandas as pd |
|
import scipy.io as scio |
|
|
|
|
|
|
|
def tansig(x): |
|
return (2/(1+np.exp(-2*x)))-1 |
|
|
|
|
|
def to_proba(output): |
|
output = output-np.min(output) |
|
ret = (output-np.min(output))/(np.max(output)-np.min(output)) |
|
return ret |
|
|
|
|
|
def softmax(x): |
|
x = x-np.min(x) |
|
ex = np.exp(x) |
|
return ex/ex.sum() |
|
|
|
|
|
def load_data(path): |
|
|
|
|
|
|
|
data = scio.loadmat(path) |
|
traindata = np.double(data['trainx']) |
|
|
|
|
|
|
|
|
|
return traindata[0] |
|
|
|
|
|
|
|
labels = ['No MetS', 'MetS'] |
|
|
|
|
|
def predict(NCRF): |
|
|
|
params = np.load('params.npz') |
|
N1 = params['N1'] |
|
N2 = params['N2'] |
|
Beta1OfEachWindow = params['Beta1OfEachWindow'] |
|
ymax = params['ymax'] |
|
ymin = params['ymin'] |
|
minOfEachWindow = params['minOfEachWindow'] |
|
distOfMaxAndMin = params['distOfMaxAndMin'] |
|
weightOfEnhanceLayer = params['weightOfEnhanceLayer'] |
|
parameterOfShrink = params['parameterOfShrink'] |
|
OutputWeight = params['OutputWeight'] |
|
|
|
|
|
test_x = NCRF.values |
|
|
|
with open('GP.pickle', 'rb') as f: |
|
gp = pickle.load(f) |
|
|
|
gp_features = gp.transform(test_x) |
|
test_x = np.hstack((test_x, gp_features)) |
|
|
|
test_x = preprocessing.scale(test_x, axis=1) |
|
|
|
|
|
FeatureOfInputDataWithBiasTest = np.hstack( |
|
[test_x, 0.1 * np.ones((test_x.shape[0], 1))]) |
|
OutputOfFeatureMappingLayerTest = np.zeros( |
|
[test_x.shape[0], N2 * N1]) |
|
|
|
for i in range(N2): |
|
outputOfEachWindowTest = np.dot( |
|
FeatureOfInputDataWithBiasTest, Beta1OfEachWindow[i]) |
|
OutputOfFeatureMappingLayerTest[:, N1*i: N1*(i+1)] = (ymax - ymin)*( |
|
outputOfEachWindowTest - minOfEachWindow[i]) / distOfMaxAndMin[i] - ymin |
|
|
|
InputOfEnhanceLayerWithBiasTest = np.hstack( |
|
[OutputOfFeatureMappingLayerTest, 0.1 * np.ones((OutputOfFeatureMappingLayerTest.shape[0], 1))]) |
|
tempOfOutputOfEnhanceLayerTest = np.dot( |
|
InputOfEnhanceLayerWithBiasTest, weightOfEnhanceLayer) |
|
|
|
OutputOfEnhanceLayerTest = tansig( |
|
tempOfOutputOfEnhanceLayerTest * parameterOfShrink) |
|
|
|
InputOfOutputLayerTest = np.hstack( |
|
[OutputOfFeatureMappingLayerTest, OutputOfEnhanceLayerTest]) |
|
|
|
OutputOfTest = np.dot(InputOfOutputLayerTest, OutputWeight) |
|
|
|
OutputOfTest = np.squeeze(OutputOfTest) |
|
proba = OutputOfTest |
|
confidences = {labels[i]: float(proba[i]) for i in range(len(labels))} |
|
|
|
return confidences |
|
|
|
|
|
|
|
|
|
|
|
headers = ["Gender", "Age", "Waist-Hip Ratio", "Body fat rate", "Nocturia frequency", "Nature of pee", |
|
"Tongue color", "Fur color", "Eye anomaly", "Congestion", "Thirsty", "Daily air conditioner usage time", "Smoking", "Fatty diet", "BMI"] |
|
iface = gr.Interface(fn=predict, |
|
inputs=gr.DataFrame(headers=headers, |
|
row_count=(1, "fixed"), |
|
col_count=(15, "fixed"), wrap=True), |
|
outputs=gr.Label() |
|
) |
|
iface.launch() |
|
|
|
|
|
|