# coding: utf-8

import numpy as np
import pandas as pd
import xgboost as xgb
import gc
import matplotlib.pyplot as plt
from xgboost.sklearn import XGBRegressor
from sklearn import metrics
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split

print("Loading data...")

train = pd.read_csv("F:/MyDownloads/ml_data/ZillowPrize/train_2016_v2.csv")
prop = pd.read_csv("F:/MyDownloads/ml_data/ZillowPrize/properties_2016.csv", low_memory=False)
sample = pd.read_csv("F:/MyDownloads/ml_data/ZillowPrize/sample_submission.csv")

# print(train.info())
# print(prop.head())
# print(prop["buildingclasstypeid"].isnull().sum(), prop["buildingclasstypeid"].notnull().sum())
# print(prop.info())
# print(sample.head())

for c, dtype in zip(prop.columns, prop.dtypes):
    if dtype == np.float64:
        prop[c] = prop[c].astype(np.float32)

print("Creating training set...")
df_train = train.merge(prop, how="left", on="parcelid")
# print(df_train.shape)

df_train = df_train[0:10000]

x_train = df_train.drop(['parcelid', 'logerror', 'transactiondate', 'propertyzoningdesc', 'propertycountylandusecode'], axis=1)
y_train = df_train['logerror']
print(x_train.shape, y_train.shape)

train_columns = x_train.columns
for c in x_train.dtypes[x_train.dtypes == object].index.values:
    x_train[c] = (x_train[c] == True)

del df_train;gc.collect()

# split = 8000
# x_train, y_train, x_valid, y_valid = x_train[:split], y_train[:split], x_train[split:], y_train[split:]
x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.33, random_state=50)

print(x_train.shape, x_valid.shape, y_train.shape, y_valid.shape)


print("Building DMatrix")
d_train = xgb.DMatrix(x_train, label=y_train)
d_valid = xgb.DMatrix(x_valid, label=y_valid)

print('Start Training Process')
params = dict()
params['eta'] = 0.1
params['objective'] = 'reg:linear'
params['eval_metric'] = 'mae'
params['max_depth'] = 4
params['silent'] = 1

print("Get n_estimators through xgb.cv func")
cvresult = xgb.cv(params, d_train, num_boost_round=1000, nfold=5,
                          metrics='mae', early_stopping_rounds=100)
n_estimators = cvresult.shape[0]
print("n_estimators: %d" % n_estimators)

xgb1 = XGBRegressor(learning_rate=params['eta'], n_estimators=n_estimators, max_depth=params['max_depth'],
                     min_child_weight=1, gamma=0, subsample=0.8, colsample_bytree=0.8, objective='reg:linear',
                     nthread=20, scale_pos_weight=1, seed=27)
watchlist = [(x_train, y_train), (x_valid, y_valid)]
xgb1.fit(x_train, y_train, eval_metric=params['eval_metric'], eval_set=watchlist, verbose=True)

xtrain_predictions = xgb1.predict(x_train)
print("\nModel Report")
print("mean squared error: %.4g" % metrics.mean_squared_error(y_train, xtrain_predictions))


# plot feature importance
feat_imp = pd.Series(xgb1.feature_importances_).sort_values(ascending=False)
ax = feat_imp.plot(kind='bar', title='Feature Importances')
xticklabel = [train_columns[i] for i in feat_imp.index]
print(feat_imp.index)
print(train_columns)
ax.xaxis.set_ticklabels(xticklabel, rotation=90)
plt.ylabel('Feature Importance Score')
plt.show()
exit(0)
del x_train, y_train, x_valid, y_valid; gc.collect()

print('Building test set ...')
sample['parcelid'] = sample['ParcelId']
df_test = sample.merge(prop, on='parcelid', how='left')
del prop; gc.collect()

x_test = df_test[train_columns]
for c in x_test.dtypes[x_test.dtypes == object].index.values:
    x_test[c] = (x_test[c] == True)
del df_test; gc.collect()

xtest_predictions = xgb1.predict(x_test)
for c in sample.columns[sample.columns != 'ParcelId']:
    sample[c] = xtest_predictions
sample.drop("parcelid", axis=1, inplace=True)

print('Writing csv ...')
sample.to_csv('xgb_starter.csv', index=False, float_format='%.4f')


plt.show()





exit(0)

print('Training ...')
watchlist = [(d_train, 'train'), (d_valid, 'valid')]
clf = xgb.train(params, d_train, num_boost_round=n_estimators, evals=watchlist, early_stopping_rounds=100,
                verbose_eval=10)

fit, ax = plt.subplots(figsize=(50, 50))
xgb.plot_importance(clf, ax=ax)
fit2, ax2 = plt.subplots(figsize=(50, 50))
xgb.plot_tree(clf, ax=ax2, num_trees=n_estimators-1)
plt.show()

del d_train, d_valid


print('Building test set ...')

sample['parcelid'] = sample['ParcelId']
df_test = sample.merge(prop, on='parcelid', how='left')

del prop; gc.collect()

x_test = df_test[train_columns]
for c in x_test.dtypes[x_test.dtypes == object].index.values:
    x_test[c] = (x_test[c] == True)

del df_test; gc.collect()
d_test = xgb.DMatrix(x_test)

del x_test; gc.collect()

print('Predicting on test ...')

p_test = clf.predict(d_test)

del d_test; gc.collect()

for c in sample.columns[sample.columns != 'ParcelId']:
    sample[c] = p_test

print('Writing csv ...')
sample.to_csv('xgb_starter.csv', index=False, float_format='%.4f')