#回归算法模块

# Importing libraries

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns

#models
from sklearn.linear_model import LinearRegression, Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
# from xgboost import XGBRegressor

from sklearn.model_selection import train_test_split
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import mean_squared_error

import warnings
warnings.filterwarnings('ignore')
sns.set()

# ----------------------- Helper functions -------------------

def rmse(y_true, y_pred):
    return np.sqrt(mean_squared_error(y_true, y_pred))

def eval_model(model, X_train, y_train, X_test, y_test):
    _ = model.fit(X_train, y_train)
    print("Train rmse : ", rmse(y_train, model.predict(X_train)))
    print("Test rmse : ", rmse(y_test, model.predict(X_test)))


data = pd.read_csv('abalone.csv')

data['age'] = data.Rings

 # remove rings variable
data.drop('Rings', axis=1, inplace=True)

print("Data loaded Successfully!")
print("No. of rows : ", data.shape[0])
print("No. of columns : ", data.shape[1])


# train test split

train, test = train_test_split(data, test_size=0.25, random_state=1)
print('Train data points :', len(train))
print('Test data points :', len(test))

numerical_features = ["Length", 'Diameter', 'Height','Whole weight',
                      'Shucked weight', 'Viscera weight', 'Shell weight']

categorical_feature = "Sex"

features = numerical_features + [categorical_feature]

target = 'age'

train.Sex = train.Sex.replace({"M":1, "I":0, "F":-1})
test.Sex = test.Sex.replace({"M":1, "I":0, "F":-1})

idx = train.loc[train.Height>0.4].index
train.drop(idx, inplace=True)

idx = train.loc[train['Viscera weight']>0.6].index
train.drop(idx, inplace=True)

idx = train.loc[train[target]>25].index
train.drop(idx, inplace=True)

X_train = train[features]
y_train = train[target]

X_test = test[features]
y_test = test[target]

X_train.head
y_train.head

models = {'linear_regression': LinearRegression(),

          'lasso': Lasso(random_state=1),

          'decision_tree': DecisionTreeRegressor(random_state=1),

          'random_forest': RandomForestRegressor(random_state=1),

          }

#
for key, regressor in models.items():
    print(key)
    eval_model(regressor, X_train, y_train, X_test, y_test)
    print("\n------------------------------------------")

# Linear regression
lr_params = {'fit_intercept':[True,False]}

# Lasso
lasso_params = {'alpha': [1e-4, 1e-3, 1e-2, 1, 10, 100]}

# Decision tree
dt_params =  {'max_depth': [4, 6, 8, 10, 12, 14, 16, 20],
            'min_samples_split': [5, 10, 20, 30, 40, 50],
            'max_features': [0.2, 0.4, 0.6, 0.8, 1],
            'max_leaf_nodes': [8, 16, 32, 64, 128,256]}

# Random Forest
rf_params = {'bootstrap': [True, False],
             'max_depth': [2, 5, 10, 20, None],
             'max_features': ['auto', 'sqrt'],
             'min_samples_leaf': [1, 2, 4],
             'min_samples_split': [2, 5, 10],
             'n_estimators': [100, 150, 200, 250]}

# XGBoost
xgb_params = {'n_estimators':[100, 200, 300] ,
             'max_depth':list(range(1,10)) ,
             'learning_rate':[0.006,0.007,0.008,0.05,0.09] ,
             'min_child_weight':list(range(1,10))}

params = [lr_params, lasso_params, dt_params, rf_params, xgb_params]

# searching Hyperparameters
i = 0
for name, model in models.items():
    print(name)
    regressor = RandomizedSearchCV(estimator=model,
                                   n_iter=10,
                                   param_distributions=params[i],
                                   cv=3,
                                   scoring='neg_root_mean_squared_error')

    search = regressor.fit(X_train, y_train)

    print('Best params :', search.best_params_)
    print("RMSE :", -search.best_score_)
    i += 1
    print()

rf_params = {'n_estimators': 200,
             'min_samples_split': 2,
             'min_samples_leaf': 4,
             'max_features': 'sqrt',
             'max_depth': None,
             'bootstrap': True}

model = RandomForestRegressor(random_state=1, **rf_params)

model.fit(X_train, y_train)

import pickle
with open("model.pkl", "wb") as f:
    pickle.dump(model, f)

with open("model.pkl", 'rb') as f:
    model = pickle.load(f)



