# xgboost

# imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

models = []


def rmse(actual, predict):
    predict = np.array(predict)
    actual = np.array(actual)
    distance = predict - actual
    square_distance = distance ** 2
    mean_square_distance = square_distance.mean()
    score = np.sqrt(mean_square_distance)
    return score


# data
dataset = pd.read_table('./zhengqi_train.txt')
X = dataset.drop(['target'], axis=1).values
y = dataset['target'].values

# data encoding
X = (X - X.mean()) / (np.std(X))  # normalization

# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
print('数据集划分完成···')

# Fitting XGBoost to the Training set
import xgboost

learning_rate = 0.1
n_estimators = 700
max_depth = 3
eta = 0.01
min_child_weight = 0
gamma = 0
subsample = 0.9
colsample_bytree = 0.9

xgbst = xgboost.XGBRegressor(objective='reg:squarederror',
                             learning_rate=learning_rate,
                             n_estimators=n_estimators,
                             max_depth=max_depth,
                             eta=eta,
                             min_child_weight=min_child_weight,
                             gamma=gamma,
                             subsample=subsample,
                             colsample_bytree=colsample_bytree)
xgbst.fit(X_train, y_train, verbose=True)
models.append(xgbst)

<<<<<<< Updated upstream
# random forest
=======
y_pred_xgb = xgbst.predict(X_test)
final_loss = rmse(y_test,y_pred_xgb)
print('real:%.4f pred:%.4f'%(y_test[0],y_pred_xgb[0]))
print('final loss: ',final_loss)

#random forest
>>>>>>> Stashed changes
from sklearn.ensemble import RandomForestRegressor

n_estimators = 50
max_depth = 5
min_samples_split = 4
min_samples_leaf = 2
rf = RandomForestRegressor(n_estimators=n_estimators,
                           max_depth=max_depth,
                           min_samples_split=min_samples_split,
                           min_samples_leaf=min_samples_leaf,
                           )
rf.fit(X_train, y_train)
#models.append(rf)

# svm
from sklearn.svm import SVR

l_svr = SVR(kernel='linear')
l_svr.fit(X_train, y_train)
#models.append(l_svr)

n_svr = SVR(kernel='poly')
n_svr.fit(X_train, y_train)
#models.append(n_svr)

r_svr = SVR(kernel='rbf')
r_svr.fit(X_train, y_train)
#models.append(r_svr)

# knn
from sklearn.neighbors import KNeighborsRegressor

knn = KNeighborsRegressor(weights="uniform")
knn.fit(X_train, y_train)
#models.append(knn)

# etr
from sklearn.ensemble import ExtraTreesRegressor

etr = ExtraTreesRegressor()
etr.fit(X_train, y_train)
#models.append(etr)

# stacking
import torch
import torch.nn as nn
from torch.autograd import Variable

batch_size = 128
learningRate = 0.1
decaysteps = 
decayratio = 
lamb = 0.1
length = len(models)

features = []
labels = y_train
for mds in models:
    y_pred = mds.predict(X_train)
    features.append(y_pred)
features = np.array(features).T
# to tensor
features = torch.tensor(features.astype(float)).float()
labels = torch.tensor(labels.astype(float)).float()

train = torch.utils.data.TensorDataset(features, labels)
train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True)

<<<<<<< Updated upstream
# model = nn.Sequential(
#     nn.Linear(length, 100),
#     nn.ReLU(),
#     nn.Linear(100, 1)
# )
import model
model = model.weights_net(length=length)

optimzer = torch.optim.SGD(model.parameters(), lr=learningRate)
=======
model = nn.Sequential(
	nn.Linear(length,15),
	nn.ReLU(),
	nn.Linear(15,1),
	#nn.log_softmax()
	)

optimzer = torch.optim.SGD(model.parameters(),lr = learningRate)
scheduler = optim.lr_scheduler.StepLR(optimzer,step_size=decaysteps,gamma=decayratio)
>>>>>>> Stashed changes

loss_func = nn.MSELoss()

for epoc in range(500):
<<<<<<< Updated upstream
    for i, (xx, yy) in enumerate(train_loader):
        xxx = Variable(xx.view(-1, length))
        # print(xxx.size())
        yyy = Variable(yy)
        # print(yyy.size())
        optimzer.zero_grad()
        out = model(xxx).squeeze(-1)
        # print(out.size())
        l1_loss = 0
        for param in model.parameters():
            l1_loss += torch.sum(torch.abs(param))
        # print(out.shape, yyy.shape)
        loss = loss_func(out, yyy) + lamb * l1_loss
        loss.backward()
        optimzer.step()

        if (epoc % 100 == 0):
            learningRate /= 2

        if (i % 100 == 0):
            print('epoc: %d  loss: %.4f' % (epoc, loss))

# test
=======
	for i,(xx,yy) in enumerate(train_loader):
		xxx = Variable(xx.view(-1,length))
		#print(xxx.size())
		yyy = Variable(yy)
		#print(yyy.size())
		optimzer.zero_grad()
		out = model(xxx).squeeze(-1)
		#print(out.size())
		l1_loss = 0
		for param in model.parameters():
			l1_loss += torch.sum(torch.abs(param))
		loss = loss_func(out,yyy) + lamb * l1_loss
		loss.backward()
		optimzer.step()
		scheduler.step()
		
		if(epoc % 100 == 0):
			learningRate /= 2
		'''
		if (i%100 == 0):
			print('epoc: %d  loss: %.4f'%(epoc,loss))
		'''
#test
>>>>>>> Stashed changes
results = []
for mds in models:
    y_test_pred = mds.predict(X_test)
    results.append(y_test_pred)
results = np.array(results).T

results = torch.tensor(results.astype(float)).float()
real = torch.tensor(y_test.astype(float)).float()
<<<<<<< Updated upstream
test = torch.utils.data.TensorDataset(results, real)
test_loader = torch.utils.data.DataLoader(test, batch_size=1, shuffle=True)

pred_list = []
real_list = []
for i, (xx, yy) in enumerate(test_loader):
    xxx = Variable(xx.view(-1, length))
    yyy = Variable(yy)
    out = model(xxx)
    # print(out)

    pred_list.append(out.data.numpy())
    real_list.append(yyy.data.numpy()[0])
    print('real:%.4f pred:%.4f' % (yyy, out.data.numpy()))

print('final loss: ', rmse(pred_list, real_list))
=======
test = torch.utils.data.TensorDataset(results,real)
test_loader = torch.utils.data.DataLoader(test,batch_size = 1,shuffle=False)

pred_list = []
real_list = []
for i,(xx,yy) in enumerate(test_loader):
	xxx = Variable(xx.view(-1,length))
	yyy = Variable(yy)
	out = model(xxx)
	#print(out)
	pred_list.append(out.data.numpy()[0])
	real_list.append(yyy.data.numpy()[0])
	if i == 0:
		print('real:%.4f pred:%.4f'%(yyy,out[0]))

print('final loss: ',rmse(pred_list,real_list))
>>>>>>> Stashed changes
