import gc


def reduce_mem_usage(df):
    start_mem = df.memory_usage().sum() / 1024**2
    print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
    for col in df.columns:
        col_type = df[col].dtype
        if col_type != object:
            c_min = df[col].min()
            c_max = df[col].max()
            if str(col_type)[:3] == 'int':
                if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
                    df[col] = df[col].astype(np.int8)
                elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
                    df[col] = df[col].astype(np.int16)
                elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
                    df[col] = df[col].astype(np.int32)
                elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
                    df[col] = df[col].astype(np.int64)
            else:
                if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
                    df[col] = df[col].astype(np.float16)
                elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
                    df[col] = df[col].astype(np.float32)
                else:
                    df[col] = df[col].astype(np.float64)
    end_mem = df.memory_usage().sum() / 1024**2
    print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
    print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
    return df



import pandas as pd
#读入数据
# data=pd.read_csv('train_V2.csv')
# print (origin_data.shape)
#origin_data.head()

import matplotlib.pyplot as plt
import seaborn as sns#数据可视化
import numpy as np

#查看有多少局

#1.数据预处理和获取train，test
def BuildFeature(is_train=True):
    y = None
    test_idx = None

    if is_train:
        print("Reading train.csv")
        df = pd.read_csv('train_V2.csv')
        df = df[df['maxPlace'] > 1]
    else:
        print("Reading test.csv")
        df = pd.read_csv('test_V2.csv')
        test_idx = df.Id

    # Reduce the memory usage
    df = reduce_mem_usage(df)

    print("Delete Unuseful Columns")
    target = 'winPlacePerc'
    features = list(df.columns)
    features.remove("Id")
    features.remove("matchId")
    features.remove("groupId")
    features.remove("matchType")

    if is_train:
        print("Read Labels")
        y = np.array(df.groupby(['matchId', 'groupId'])[target].agg('mean'), dtype=np.float64)
        features.remove(target)

    print("Read Group mean features")
    agg = df.groupby(['matchId', 'groupId'])[features].agg('mean')
    agg_rank = agg.groupby('matchId')[features].rank(pct=True).reset_index()
    if is_train:
        df_out = agg.reset_index()[['matchId', 'groupId']]
    else:
        df_out = df[['matchId', 'groupId']]
    df_out = df_out.merge(agg.reset_index(), suffixes=["", ""], how='left', on=['matchId', 'groupId'])
    df_out = df_out.merge(agg_rank, suffixes=["_mean", "_mean_rank"], how='left', on=['matchId', 'groupId'])

    print("Read Group max features")
    agg = df.groupby(['matchId', 'groupId'])[features].agg('max')
    agg_rank = agg.groupby('matchId')[features].rank(pct=True).reset_index()
    df_out = df_out.merge(agg.reset_index(), suffixes=["", ""], how='left', on=['matchId', 'groupId'])
    df_out = df_out.merge(agg_rank, suffixes=["_max", "_max_rank"], how='left', on=['matchId', 'groupId'])

    print("Read Group min features")
    agg = df.groupby(['matchId', 'groupId'])[features].agg('min')
    agg_rank = agg.groupby('matchId')[features].rank(pct=True).reset_index()
    df_out = df_out.merge(agg.reset_index(), suffixes=["", ""], how='left', on=['matchId', 'groupId'])
    df_out = df_out.merge(agg_rank, suffixes=["_min", "_min_rank"], how='left', on=['matchId', 'groupId'])

    print("Read Group size features")
    agg = df.groupby(['matchId', 'groupId']).size().reset_index(name='group_size')
    df_out = df_out.merge(agg, how='left', on=['matchId', 'groupId'])

    print("Read Match mean features")
    agg = df.groupby(['matchId'])[features].agg('mean').reset_index()
    df_out = df_out.merge(agg, suffixes=["", "_match_mean"], how='left', on=['matchId'])

    print("Read Match size features")
    agg = df.groupby(['matchId']).size().reset_index(name='match_size')
    df_out = df_out.merge(agg, how='left', on=['matchId'])

    df_out.drop(["matchId", "groupId"], axis=1, inplace=True)
    X = df_out
    feature_names = list(df_out.columns)
    del df, df_out, agg, agg_rank
    gc.collect()

    return X, y, feature_names, test_idx

X_train, y_train, train_columns, _ = BuildFeature(is_train=True)
X_test, _, _ , test_idx = BuildFeature(is_train=False)

#[CASE1]----LINEAR REGRESSION
# from sklearn.linear_model import LinearRegression
# LR_model = LinearRegression(n_jobs=4, normalize=True)
# LR_model.fit(X_train,y_train)
#
# y_pred_train = LR_model.predict(X_train)
# y_pred_test = LR_model.predict(X_test)
#
# #2.TRAIN 的测试率
# LR_model.score(X_train,y_train)
# #3.对测试集的预测结果进行了可视化：
# y_pred_train[y_pred_train>1] = 1
# y_pred_train[y_pred_train<0] = 0
#
# # f, ax = plt.subplots(figsize=(10,10))
# # plt.scatter(y_train, y_pred_train)
# # plt.xlabel("y")
# # plt.ylabel("y_pred_train")
# # plt.xlim([0,1])
# # plt.ylim([0,1])
# # plt.show()
# print(LR_model.score(X_train,y_train))

# df_test=pd.DataFrame()
# df_test['Id']=test_idx
# df_test['winPlacePerc'] = y_pred_test
# submission = df_test[['Id', 'winPlacePerc']]
# submission.to_csv('submission_lr.csv', index=False)

# #case 2:network (MAE仅为0.0452,Validation score:0.946906)
# Xtrain, ytrain, features,_ = BuildFeature(is_train=True)
# Xtest, _, _,test_idx = BuildFeature(is_train=False)
# from sklearn.neural_network import MLPRegressor
#
# clf = MLPRegressor(hidden_layer_sizes=(300, 200, 100, 50, ), activation='relu',
#                    solver='adam', alpha=0.0001, batch_size=128,  learning_rate='constant',
#                    learning_rate_init=0.001, max_iter=10, shuffle=True, verbose=True,
#                    early_stopping=True, validation_fraction=0.2)
# clf.fit(Xtrain, ytrain)
# yPred = clf.predict(Xtrain)
# print(yPred)
# yPred[yPred > 1] = 1
# yPred[yPred < 0] = 0
# plt.figure(figsize=(15, 15))
# plt.scatter(ytrain, yPred)
# plt.xlabel("y")
# plt.ylabel("Predict y")
# plt.show()
#
#
# Xtrain = None
# ytrain = None
# yPred = clf.predict(Xtest)
# yPred[yPred > 1] = 1
# yPred[yPred < 0] = 0
#
# df_test = reduce_mem_usage(pd.read_csv('test_V2.csv'))
# df_test['winPlacePerc'] = yPred
# submission = df_test[['Id', 'winPlacePerc']]
# submission.to_csv('submission_network.csv', index=False)

#case 3:GBR
# from sklearn.ensemble import GradientBoostingRegressor
# GBR = GradientBoostingRegressor(loss='ls',learning_rate=0.1,
#                                 n_estimators=100,max_depth=3)
# GBR.fit(X_train,y_train)
# print(GBR.score(X_train,y_train))
#
# y_pred_train = GBR.predict(X_train)
# print(y_pred_train)
#
# y_pred_train[y_pred_train>1] = 1
# y_pred_train[y_pred_train<0] = 0
#
# f, ax = plt.subplots(figsize=(10,10))
# plt.scatter(y_train, y_pred_train)
# plt.xlabel("y")
# plt.ylabel("y_pred_train")
# plt.xlim([0,1])
# plt.ylim([0,1])
# plt.show()
#
# y_pred_test = GBR.predict(X_test)
# y_pred_test[y_pred_test > 1] = 1
# y_pred_test[y_pred_test < 0] = 0
# print(y_pred_test)
# df_test = reduce_mem_usage(pd.read_csv('test_V2.csv'))
# df_test['winPlacePerc'] = y_pred_test
# submission = df_test[['Id', 'winPlacePerc']]
# submission.to_csv('submission_gbr.csv', index=False)

#case 4
Xtrain, ytrain, features,_ = BuildFeature(is_train=True)
Xtest, _, _,test_idx = BuildFeature(is_train=False)
Xtrain=np.array(Xtrain)
ytrain=np.array(ytrain)
import lightgbm as lgb
print(Xtrain)
print(features)
num = Xtrain.shape[0]
splitnum = int(num*0.8)
idx = np.arange(num)
np.random.shuffle(idx)

trainX = Xtrain[idx[:splitnum]]
trainy = ytrain[idx[:splitnum]]
validX = Xtrain[idx[splitnum:]]
validy = ytrain[idx[splitnum:]]
gc.collect()

def lgbModel(trainX, trainy, validX, validy, testX):
    params = {"objective" : "regression", "metric" : "mae", 'n_estimators':20000,
              'early_stopping_rounds':200, "num_leaves" : 31, "learning_rate" : 0.05,
              "bagging_fraction" : 0.7, "bagging_seed" : 0, "num_threads" : 4,
              "colsample_bytree" : 0.7
             }
    lgbTrain = lgb.Dataset(trainX, label=trainy)
    lgbVal = lgb.Dataset(validX, label=validy)
    model = lgb.train(params, lgbTrain, valid_sets=[lgbTrain, lgbVal],
                      early_stopping_rounds=200, verbose_eval=1000)
    yPredTest = model.predict(testX, num_iteration=model.best_iteration)
    return yPredTest, model
yPred, model = lgbModel(trainX, trainy, validX, validy, Xtest)

df_test = reduce_mem_usage(pd.read_csv('test_V2.csv'))
df_test['winPlacePerc'] = yPred
submission = df_test[['Id', 'winPlacePerc']]
submission.to_csv('submission_lgb.csv', index=False)