# ann

#imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import torch
import torch.nn as nn
from torch.autograd import Variable

batch_size = 128
learningRate = 0.1

def rmse(actual,predict):
	predict = predict.detach().numpy()
	actual = np.detach().numpy()
	distance = predict - actual
	square_distance = distance**2
	mean_square_distance = square_distance.mean()
	score = np.sqrt(mean_square_distance)
	return score

#data
dataset = pd.read_csv('d_train_20180102.csv',encoding='gb2312').fillna(-999)
X = dataset.drop(['id','体检日期','血糖'],axis = 1).values
y = dataset['血糖'].values

#data encoding
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
X[:, 0] = LabelEncoder().fit_transform(X[:, 0])
print('编码完成···')
X = (X-X.mean())/(np.std(X))
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1, random_state = 0)



X_train = torch.tensor(X_train.astype(float)).float()
X_test = torch.tensor(X_test.astype(float)).float()
y_train = torch.tensor(y_train.astype(float)).float()
y_test = torch.tensor(y_test.astype(float)).float()

train = torch.utils.data.TensorDataset(X_train,y_train)
test = torch.utils.data.TensorDataset(X_test,y_test)

train_loader = torch.utils.data.DataLoader(train,batch_size = batch_size,shuffle=True)
test_loader = torch.utils.data.DataLoader(test,batch_size = 1,shuffle=True)

print('数据集划分完成···')

#ann
model = nn.Sequential(
	nn.Linear(39,100),
	nn.ReLU(),
	nn.Linear(100,50),
	nn.ReLU(),
	nn.Linear(50,25),
	nn.ReLU(),
	nn.Linear(25,12),
	nn.ReLU(),
	nn.Linear(12,6),
	nn.ReLU(),
	nn.Linear(6,1),
	)

optimzer = torch.optim.SGD(model.parameters(),lr = learningRate)
loss_func = nn.MSELoss()

#train
for epoc in range(500):
	for i,(xx,yy) in enumerate(train_loader):
		xxx = Variable(xx.view(-1,39))
		#print(xxx.size())
		yyy = Variable(yy)
		#print(yyy.size())
		optimzer.zero_grad()
		out = model(xxx).squeeze(-1)
		#print(out.size())
		loss = loss_func(out,yyy)	
		loss.backward()
		optimzer.step()
		if(epoc % 100 == 0):
			learningRate /= 2
		if (i%100 == 0):
			print('epoc: %d  loss: %.4f'%(epoc,loss))

#test
loss_list = []
for i,(xx,yy) in enumerate(test_loader):
	xxx = Variable(xx.view(-1,39))
	yyy = Variable(yy)
	out = model(xxx)
	#print(out)
	print('real:%.4f pred:%.4f'%(yyy,out[0]))


