import numpy as np
import pandas as pd
import warnings
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.special import jn
from IPython.display import display, clear_output
import time

warnings.filterwarnings('ignore')
%matplotlib inline

from sklearn import linear_model
from sklearn import preprocessing
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor

from sklearn.decomposition import PCA, FastICA, FactorAnalysis, SparsePCA

import lightgbm as lgb
import xgboost as xgb

from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error

 
train_dataset = pd.read_csv('used_car_train_20200313.csv', sep=' ')
testA_dataset = pd.read_csv('used_car_testA_20200313.csv', sep=' ')

print('Train data shape:', train_dataset.shape)
print('TestA data shape:', testA_dataset.shape)

train_dataset.head()

train_dataset.info()

train_dataset.columns

testA_dataset.info()

train_dataset.describe()

testA_dataset.describe()

 
numerical_features = train_dataset.select_dtypes(exclude='object').columns
print(numerical_features)

categorical_features = train_dataset.select_dtypes(include='object').columns
print(categorical_features)

feature_cols = [col for col in numerical_features if col not in ['SaleID', 'name', 'regDate', 'creatDate', 'price', 'model', 'brand', 'regionCode', 'seller']]
feature_cols = [col for col in feature_cols if 'Type' not in col]

X_train_features = train_dataset[feature_cols]
Y_train_target = train_dataset['price']

X_test_features = testA_dataset[feature_cols]

print('X train shape:', X_train_features.shape)
print('X test shape:', X_test_features.shape)

def statistical_info(data):
    print('_min', np.min(data))
    print('_max:', np.max(data))
    print('_mean', np.mean(data))
    print('_ptp', np.ptp(data))
    print('_std', np.std(data))
    print('_var', np.var(data))

print('Sta of label:')
statistical_info(Y_train_target)

plt.hist(Y_train_target)
plt.show()
plt.close()

X_train_features = X_train_features.fillna(-1)
X_test_features = X_test_features.fillna(-1)

 
xgr = xgb.XGBRegressor(n_estimators=120, learning_rate=0.1, gamma=0, subsample=0.8, colsample_bytree=0.9, max_depth=7)

scores_train = []
scores = []

sk = StratifiedKFold(n_splits=5, shuffle=True, random_state=0)
for train_ind, val_ind in sk.split(X_train_features, Y_train_target):
    train_x = X_train_features.iloc[train_ind].values
    train_y = Y_train_target.iloc[train_ind]
    val_x = X_train_features.iloc[val_ind].values
    val_y = Y_train_target.iloc[val_ind]
    
    xgr.fit(train_x, train_y)
    pred_train_xgb = xgr.predict(train_x)
    pred_xgb = xgr.predict(val_x)
    
    score_train = mean_absolute_error(train_y, pred_train_xgb)
    scores_train.append(score_train)
    score = mean_absolute_error(val_y, pred_xgb)
    scores.append(score)

print('Train mae:', np.mean(score_train))
print('Val mae', np.mean(scores))

def build_xgb_model(x_train, y_train):
    model = xgb.XGBRegressor(n_estimators=150, learning_rate=0.1, gamma=0, subsample=0.8, colsample_bytree=0.9, max_depth=7)
    model.fit(x_train, y_train)
    return model

def build_lgb_model(x_train, y_train):
    estimator = lgb.LGBMRegressor(num_leaves=127, n_estimators=150)
    param_grid = {
        'learning_rate': [0.01, 0.05, 0.1, 0.2],
    }
    gbm = GridSearchCV(estimator, param_grid)
    gbm.fit(x_train, y_train)
    return gbm

x_train, x_val, y_train, y_val = train_test_split(X_train_features, Y_train_target, test_size=0.3)

print('Train lgb...')
model_lgb = build_lgb_model(x_train, y_train)
val_lgb = model_lgb.predict(x_val)
MAE_lgb = mean_absolute_error(y_val, val_lgb)
print('MAE of val with lgb:', MAE_lgb)

print('Predict lgb...')
model_lgb_pred = build_lgb_model(X_train_features, Y_train_target)
subA_lgb = model_lgb_pred.predict(X_test_features)
print('Sta of Predict lgb:')
statistical_info(subA_lgb)

print('Train xgb...')
model_xgb = build_xgb_model(x_train, y_train)
val_xgb = model_xgb.predict(x_val)
MAE_xgb = mean_absolute_error(y_val, val_xgb)
print('MAE of val with xgb:', MAE_xgb)

print('Predict xgb...')
model_xgb_pred = build_xgb_model(X_train_features, Y_train_target)
subA_xgb = model_xgb_pred.predict(X_test_features)
print('Sta of Predict xgb:')
statistical_info(subA_xgb)

val_Weighted = (1 - MAE_lgb / (MAE_xgb + MAE_lgb)) * val_lgb + (1 - MAE_xgb / (MAE_xgb + MAE_lgb)) * val_xgb
val_Weighted[val_Weighted < 0] = 10 
print('MAE of val with Weighted ensemble:', mean_absolute_error(y_val, val_Weighted))

sub_Weighted = (1 - MAE_lgb / (MAE_xgb + MAE_lgb)) * subA_lgb + (1 - MAE_xgb / (MAE_xgb + MAE_lgb)) * subA_xgb

plt.hist(Y_train_target)
plt.show()
plt.close()

sub = pd.DataFrame()
sub['SaleID'] = testA_dataset.SaleID
sub['price'] = sub_Weighted
sub.to_csv('./sub_Weighted.csv', index=False)

sub.head()