import pandas as pd
import numpy as np
from matplotlib.pyplot import figure
import matplotlib.pyplot as plt
import seaborn as sns

#显示所有列
pd.set_option('display.max_columns', None)
#显示所有行
pd.set_option('display.max_rows', None)
#设置value的显示长度为100，默认为50
pd.set_option('max_colwidth',100)

#加载数据
train = pd.read_csv(r'数据集\train\train.csv')
stores = pd.read_csv(r"数据集\stores.csv")
oil = pd.read_csv(r"数据集\oil.csv")
transactions = pd.read_csv(r"数据集\transactions\transactions.csv")
holidays_events = pd.read_csv(r"数据集\holidays_events.csv")
test = pd.read_csv(r"数据集\test.csv")
#将数据特征进行整合
train = train.merge(stores, how="left", on='store_nbr')
train = train.merge(oil, how="left", on='date')
train = train.merge(transactions, how="left", on=['date','store_nbr'])
train = train.merge(holidays_events,on='date',how='left')
train = train.rename(columns={'type_x' : 'store_type','type_y':'holiday_type'})

test = test.merge(stores, how="left", on='store_nbr')
test = test.merge(oil, how="left", on='date')
test = test.merge(holidays_events,on='date',how='left')
test = test.rename(columns={'type_x' : 'store_type','type_y':'holiday_type'})
#是否有重复记录
dup = train[train.duplicated(subset=train.columns.tolist(), keep=False)]
print(f"Duplicate records in train: {dup.shape[0]}")

dup = test[test.duplicated(subset=test.columns.tolist(), keep=False)]
print(f"Duplicate records in test: {dup.shape[0]}")

#处理缺失值
train['date'] = pd.to_datetime(train['date'])
oil_mis = train.set_index("date").dcoilwtico.resample("D").mean().reset_index()
plt.rcParams['figure.figsize'] = (20, 5)
plt.title('Oil Price')
plt.ylabel('dcoilwtico')
plt.plot(oil_mis['date'],oil_mis['dcoilwtico'])
train['dcoilwtico'] = train['dcoilwtico'].fillna(method='bfill')
test['dcoilwtico'] = test['dcoilwtico'].fillna(method='bfill')

train.transactions = train.transactions.replace(np.nan,0)

train[['locale','locale_name', 'description']] = train[['locale','locale_name', 'description']].replace(np.nan,'')
train['holiday_type'] = train['holiday_type'].replace(np.nan,'Work Day')
train['transferred'] = train['transferred'].replace(np.nan,False)

test[['locale','locale_name', 'description']] = test[['locale','locale_name', 'description']].replace(np.nan,'')
test['holiday_type'] = test['holiday_type'].replace(np.nan,'Work Day')
test['transferred'] = test['transferred'].replace(np.nan,False)

#特征工程
#Exctracting date information from train data
train['date'] = pd.to_datetime(train['date'])
train['Quarter'] = train['date'].apply(lambda x: pd.to_datetime(x).quarter)
train['Week'] = train['date'].apply(lambda x: pd.to_datetime(x).week)
train['DayofWeek'] = train['date'].apply(lambda x: pd.to_datetime(x).dayofweek)
train['isWeekend'] = np.where(train['DayofWeek'].isin([5,6]),1,0)
train['Month'] = train['date'].apply(lambda x: pd.to_datetime(x).month)
train['Year'] = train['date'].apply(lambda x: pd.to_datetime(x).year)
train['Day'] = train['date'].apply(lambda x: pd.to_datetime(x).day)
train['season'] = train['Month'].apply(lambda x: 0 if x in [2,3] else 1 if x in [4,5,6] else 2 if x in [7,8] else 3 if x in [9,10,11] else 4)

#Exctracting date information from test data
test['date'] = pd.to_datetime(train['date'])
test['Quarter'] = test['date'].apply(lambda x: pd.to_datetime(x).quarter)
test['Week'] = test['date'].apply(lambda x: pd.to_datetime(x).week)
test['DayofWeek'] = test['date'].apply(lambda x: pd.to_datetime(x).dayofweek)
test['isWeekend'] = np.where(test['DayofWeek'].isin([5,6]),1,0)
test['Month'] = test['date'].apply(lambda x: pd.to_datetime(x).month)
test['Year'] = test['date'].apply(lambda x: pd.to_datetime(x).year)
test['Day'] = test['date'].apply(lambda x: pd.to_datetime(x).day)
test['season'] = test['Month'].apply(lambda x: 0 if x in [2,3] else 1 if x in [4,5,6] else 2 if x in [7,8] else 3 if x in [9,10,11] else 4)

#Created a new feature, the length of the day leading to thanksgiving and christmas on train data
train['Days_to_Thansksgiving'] = (pd.to_datetime(train["Year"].astype(str)+"-11-24", format="%Y-%m-%d") - pd.to_datetime(train["date"], format="%Y-%m-%d")).dt.days.astype(int)
train['Days_to_Christmas'] = (pd.to_datetime(train["Year"].astype(str)+"-12-24", format="%Y-%m-%d") - pd.to_datetime(train["date"], format="%Y-%m-%d")).dt.days.astype(int)

#Created a new feature, the length of the day leading to thanksgiving and christmas on test data
test['Days_to_Thansksgiving'] = (pd.to_datetime(test["Year"].astype(str)+"-11-24", format="%Y-%m-%d") - pd.to_datetime(test["date"], format="%Y-%m-%d")).dt.days.astype(int)
test['Days_to_Christmas'] = (pd.to_datetime(test["Year"].astype(str)+"-12-24", format="%Y-%m-%d") - pd.to_datetime(test["date"], format="%Y-%m-%d")).dt.days.astype(int)

#Data Encoding
storetype_values = {'A':5, 'D':4, 'B':3, 'E':2, 'C':1}
train['store_type'] = train.store_type.map(storetype_values)
test['store_type'] = test.store_type.map(storetype_values)

holidaytype_values = {'Additional':6, 'Transfer':5, 'Bridge':4, 'Event':3, 'Holiday':2, 'Work Day':1}
train['holiday_type'] = train.holiday_type.map(holidaytype_values)
test['holiday_type'] = test.holiday_type.map(holidaytype_values)

locale_values = {'National':4, 'Regional':3, 'Local':2, '':1}
train['locale'] = train.locale.map(locale_values)
test['locale'] = test.locale.map(locale_values)

#标签编码
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
feature = ['family','city','state','locale_name','transferred']
train[feature] = train[feature].apply(le.fit_transform)
test[feature] = test[feature].apply(le.fit_transform)

#建模（回归分析）
from sklearn.linear_model import ElasticNet, Lasso, Ridge
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.preprocessing import LabelEncoder
#import pandas.util.testing as tm
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split

model_features = ['store_nbr', 'family', 'onpromotion', 'city',
       'state', 'store_type', 'cluster', 'dcoilwtico',
       'holiday_type', 'locale', 'locale_name', 'transferred',
       'isWeekend', 'Year', 'season',
       'Days_to_Thansksgiving', 'Days_to_Christmas', 'Month', 'Quarter',
       'Week', 'DayofWeek','Day']

train.date = pd.to_datetime(train.date)

#Create train and test data from a train data frame for modeling and model evaluation
split_date = "2017.07.15"
df_train_model = train.loc[train.date <= split_date].copy()
df_test_model = train.loc[train.date > split_date].copy()

df_test_model.set_index('date', inplace=True)
df_train_model.set_index('date', inplace=True)

#Splitting the data for modelling
Y = df_train_model['sales']
X = df_train_model[model_features]
test_size = .3
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state = 3)

#RMSLE value function
def rmsle(h, y):
    return np.sqrt(np.square(np.log(h + 1) - np.log(y + 1)).mean())

#Model evaluation function
def reg_analysis(model, X_train, y_train, X_test, y_test):
    # get train, test set amd X,y here (for cross-validation)
    model.fit(X_train, y_train)
    prediction = model.predict(X_test)
    # Model score
    print ('Model score = ',r2_score(y_test, prediction))
    #RMSLE
    print ('RMSLE score = ',rmsle(prediction,y_test))
    return model
ada_model = AdaBoostRegressor()
print('AdaBoostRegressor')
reg_analysis(ada_model, X_train, y_train, X_test, y_test)

dt_model = DecisionTreeRegressor()
print('DecisionTreeRegressor')
reg_analysis(dt_model, X_train, y_train, X_test, y_test)

lasso_model = Lasso()
print('Lasso')
reg_analysis(lasso_model, X_train, y_train, X_test, y_test)

elastic_model = ElasticNet()
print('ElasticNet')
reg_analysis(elastic_model, X_train, y_train, X_test, y_test)

ridge_model =Ridge()
print('Ridge')
reg_analysis(ridge_model, X_train, y_train, X_test, y_test)

#随机森林
#划分训练集、测试集
df_train_rf = train.sample(500000).reset_index(drop=True).copy()
df_test_rf = train.loc[train.date > "2017.07.15"].copy()

#Splitting the data for rf modelling
Y = df_train_rf['sales']
X = df_train_rf[model_features]
test_size = .3
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state = 3)

#Baseline model基线模型
model = RandomForestRegressor()
model.fit(X_train,y_train)
prediction = model.predict(X_test)
print ('调参前Model score = ',r2_score(y_test, prediction))
print ('调参前RMSLE score =  ',rmsle(prediction,y_test))

#Feature importance特征重要性
importance_df = pd.DataFrame({
    'feature': X_train.columns,
    'importance': model.feature_importances_
}).sort_values('importance', ascending=False)

plt.figure(figsize=(16,10))
plt.title('Feature Importance')
sns.barplot(data=importance_df, x='importance', y='feature')
plt.show()

#调参
def test_params(**params):
    model = RandomForestRegressor(random_state=42, n_jobs=-1, **params).fit(X_train,y_train)
    train_rmsle = rmsle(model.predict(X_train), y_train)
    val_rmsle = rmsle(model.predict(X_test), y_test)
    return train_rmsle, val_rmsle

def test_param_and_plot(param_name, param_values):
    train_errors, val_errors = [], []
    for value in param_values:
        params = {param_name: value}
        train_rmsle, val_rmsle = test_params(**params)
        train_errors.append(train_rmsle)
        val_errors.append(val_rmsle)
    plt.figure(figsize=(16,8))
    plt.title('Overfitting curve: ' + param_name)
    plt.plot(param_values, train_errors, 'b-o')
    plt.plot(param_values, val_errors, 'r-o')
    plt.xlabel(param_name)
    plt.ylabel('RMSLE')
    plt.legend(['Training', 'Validation'])
    plt.show()
'''
#max_depth（树的最大生长深度）
test_param_and_plot('max_depth', [5, 10, 15, 20, 25, 30, 35])
#n_estimators（子树的数量）
test_param_and_plot('n_estimators', [10, 30, 50, 70, 90, 100])
#min_samples_leaf（叶子的最小样本数量）
test_param_and_plot('min_samples_split', [2, 3, 4, 5, 6, 7, 8, 9, 10])
#min_samples_split(分支节点的最小样本数量）
test_param_and_plot('min_samples_leaf', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
#max-samples
test_param_and_plot('max_samples', [0.2, 0.4, 0.6, 0.8, 1])
#max_features（最大选择特征数）
test_param_and_plot('max_features', [8, 9, 10, 11, 12, 13, 14, 15, 16])
'''
from sklearn.ensemble import RandomForestRegressor
model_tun = RandomForestRegressor(n_jobs=-1, max_depth=35, n_estimators=130, min_samples_split=2, min_samples_leaf=1, max_samples=0.80, max_features=20, random_state=123)
model_tun.fit(X_train, y_train)
train_pred = model_tun.predict(X_train)
test_pred = model_tun.predict(X_test)
print ('调参后Model score = ',r2_score(y_train, train_pred))
print ('调参后RMSLE score =  ',rmsle(train_pred,y_train))
