MetS / app.py
Avan's picture
Upload 4 files
47f59d4
raw
history blame contribute delete
No virus
3.71 kB
# %%
import gradio as gr
from sklearn import preprocessing
import os
import numpy as np
import pickle
import pandas as pd
import scipy.io as scio
# %%
def tansig(x):
return (2/(1+np.exp(-2*x)))-1
def to_proba(output):
output = output-np.min(output)
ret = (output-np.min(output))/(np.max(output)-np.min(output))
return ret
def softmax(x):
x = x-np.min(x)
ex = np.exp(x)
return ex/ex.sum()
def load_data(path):
# df = pd.read_csv(path)
# return df.values
data = scio.loadmat(path)
traindata = np.double(data['trainx'])
# trainlabel = np.double(data['trainy'])
# testdata = np.double(data['testx'])
# testlabel = np.double(data['testy'])
return traindata[0]
# %%
labels = ['No MetS', 'MetS']
def predict(NCRF):
params = np.load('params.npz')
N1 = params['N1']
N2 = params['N2']
Beta1OfEachWindow = params['Beta1OfEachWindow']
ymax = params['ymax']
ymin = params['ymin']
minOfEachWindow = params['minOfEachWindow']
distOfMaxAndMin = params['distOfMaxAndMin']
weightOfEnhanceLayer = params['weightOfEnhanceLayer']
parameterOfShrink = params['parameterOfShrink']
OutputWeight = params['OutputWeight']
# test_x = load_data(file)
test_x = NCRF.values
# print(test_x.shape)
with open('GP.pickle', 'rb') as f:
gp = pickle.load(f)
gp_features = gp.transform(test_x)
test_x = np.hstack((test_x, gp_features))
# print(test_x.shape)
test_x = preprocessing.scale(test_x, axis=1)
# test_x = np.expand_dims(test_x, axis=0)
FeatureOfInputDataWithBiasTest = np.hstack(
[test_x, 0.1 * np.ones((test_x.shape[0], 1))])
OutputOfFeatureMappingLayerTest = np.zeros(
[test_x.shape[0], N2 * N1])
for i in range(N2):
outputOfEachWindowTest = np.dot(
FeatureOfInputDataWithBiasTest, Beta1OfEachWindow[i])
OutputOfFeatureMappingLayerTest[:, N1*i: N1*(i+1)] = (ymax - ymin)*(
outputOfEachWindowTest - minOfEachWindow[i]) / distOfMaxAndMin[i] - ymin
InputOfEnhanceLayerWithBiasTest = np.hstack(
[OutputOfFeatureMappingLayerTest, 0.1 * np.ones((OutputOfFeatureMappingLayerTest.shape[0], 1))])
tempOfOutputOfEnhanceLayerTest = np.dot(
InputOfEnhanceLayerWithBiasTest, weightOfEnhanceLayer)
OutputOfEnhanceLayerTest = tansig(
tempOfOutputOfEnhanceLayerTest * parameterOfShrink)
InputOfOutputLayerTest = np.hstack(
[OutputOfFeatureMappingLayerTest, OutputOfEnhanceLayerTest])
OutputOfTest = np.dot(InputOfOutputLayerTest, OutputWeight)
# print(OutputOfTest.shape)
OutputOfTest = np.squeeze(OutputOfTest)
proba = OutputOfTest # to_proba(OutputOfTest)
confidences = {labels[i]: float(proba[i]) for i in range(len(labels))}
# np.argmax(OutputOfTest) # np.array(OutputOfTest)
return confidences # int(np.argmax(OutputOfTest)) #
# %%
# headers = ["性别", "年龄", "腰臀比", "体脂率", "夜尿次数", "小便性质",
# "舌色", "苔色", "眼", "证型-淤血", "渴饮", "夏季平均每日空调时常", "吸烟", "饮食肥腻", "BMI"]
headers = ["Gender", "Age", "Waist-Hip Ratio", "Body fat rate", "Nocturia frequency", "Nature of pee",
"Tongue color", "Fur color", "Eye anomaly", "Congestion", "Thirsty", "Daily air conditioner usage time", "Smoking", "Fatty diet", "BMI"]
iface = gr.Interface(fn=predict,
inputs=gr.DataFrame(headers=headers,
row_count=(1, "fixed"),
col_count=(15, "fixed"), wrap=True),
outputs=gr.Label()
)
iface.launch()
# %%