import numpy as np
import pandas as pd
import lightgbm as lgb
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder, LabelEncoder
from sklearn.metrics import log_loss
import gc
from scipy import sparse
import warnings
warnings.filterwarnings('ignore')

movie_col_tag = ['movieId','movieYear','movieName']
movie_titles_df = pd.read_table('C:\\Users\\lyf\\Desktop\\赵易聪\\大数据分析B\\HW2\\Project2-data\\movie_titles.txt',
                                sep = ",",
                                names = movie_col_tag,
                                encoding ='ISO-8859-1')
movie_titles_df.drop('movieName',axis=1,inplace=True)


data_col_tag = ['userId','movieId','rating','timestamp']
rating_data_train = pd.read_table('C:\\Users\\lyf\\Desktop\\赵易聪\\大数据分析B\\HW2\\Project2-data\\netflix_train.txt',
                                sep = "\s+",
                                names =data_col_tag)


rating_data_train.insert(4,'timestampYear',[0 for _ in range(len(rating_data_train))])
rating_data_train.insert(5,'timestampMonth',[0 for _ in range(len(rating_data_train))])
rating_data_train.insert(6,'timestampDay',[0 for _ in range(len(rating_data_train))])
rating_data_train.insert(7,'movieYear_',[0 for _ in range(len(rating_data_train))])

for i in range(len(rating_data_train)):
    rating_data_train.iloc[i]['movieYear_'] = movie_titles_df[movie_titles_df['movieId']==rating_data_train.iloc[i]['movieId']]['movieYear']
    for j in range(4,7):
        rating_data_train.iloc[i].iloc[j] = rating_data_train.iloc[i]['timestamp'].split('-')[j-4]
    if rating_data_train.iloc[i]['rating'] <=3:
        rating_data_train.iloc[i]['rating'] = 0
    else:
        rating_data_train.iloc[i]['rating'] = 1
rating_data_train.drop('timestamp',axis=1,inplace=True)
rating_data_train.insert(0,'rating',rating_data_train.pop('rating'))

rating_data_train.to_csv('C:\\Users\\lyf\\Desktop\\赵易聪\\大数据分析B\\HW2\\Project2-data\\gbdt+lr_train.csv')




rating_data_test = pd.read_table('C:\\Users\\lyf\\Desktop\\赵易聪\\大数据分析B\\HW2\\Project2-data\\netflix_test.txt',
                                sep = "\s+",
                                names =data_col_tag)


rating_data_test.insert(4,'timestampYear',[0 for _ in range(len(rating_data_test))])
rating_data_test.insert(5,'timestampMonth',[0 for _ in range(len(rating_data_test))])
rating_data_test.insert(6,'timestampDay',[0 for _ in range(len(rating_data_test))])
rating_data_test.insert(7,'movieYear_',[0 for _ in range(len(rating_data_test))])

for i in range(len(rating_data_test)):
    rating_data_test.iloc[i]['movieYear_'] = movie_titles_df[movie_titles_df['movieId']==rating_data_test.iloc[i]['movieId']]['movieYear']
    for j in range(4,7):
        rating_data_test.iloc[i].iloc[j] = rating_data_test.iloc[i]['timestamp'].split('-')[j-4]
    if rating_data_test.iloc[i]['rating'] <=3:
        rating_data_test.iloc[i]['rating'] = 0
    else:
        rating_data_test.iloc[i]['rating'] = 1
rating_data_test.drop('timestamp',axis=1,inplace=True)
rating_data_test.insert(0,'rating',rating_data_test.pop('rating'))

rating_data_test.to_csv('C:\\Users\\lyf\\Desktop\\赵易聪\\大数据分析B\\HW2\\Project2-data\\gbdt+lr_test.csv')


category_fea=['timestampMonth','timestampDay']
continuous_fea=['userId','movieId','timestampYear','movieYear_']
rating_data = pd.concat([rating_data_train, rating_data_test])
rating_data.fillna(-1, inplace=True)

def gbdt_lr_model(data, category_fea, continuous_fea):  # 0.43616
    # 离散特征one-hot编码
    for col in category_fea:
        onehot_feats = pd.get_dummies(data[col], prefix=col)
        data.drop([col], axis=1, inplace=True)
        data = pd.concat([data, onehot_feats], axis=1)

    train = data[data['rating'] != -1]
    target = train.pop('rating')
    test = data[data['rating'] == -1]
    test.drop(['rating'], axis=1, inplace=True)

    # 划分数据集
    x_train, x_val, y_train, y_val = train_test_split(train, target, test_size=0.2, random_state=2020)

    gbm = lgb.LGBMClassifier(objective='binary',
                             subsample=0.8,
                             min_child_weight=0.5,
                             colsample_bytree=0.7,
                             num_leaves=100,
                             max_depth=12,
                             learning_rate=0.01,
                             n_estimators=1000,
                             )

    gbm.fit(x_train, y_train,
            eval_set=[(x_train, y_train), (x_val, y_val)],
            eval_names=['train', 'val'],
            eval_metric='binary_logloss',
            early_stopping_rounds=100,
            )

    model = gbm.booster_

    gbdt_feats_train = model.predict(train, pred_leaf=True)
    gbdt_feats_test = model.predict(test, pred_leaf=True)
    gbdt_feats_name = ['gbdt_leaf_' + str(i) for i in range(gbdt_feats_train.shape[1])]
    df_train_gbdt_feats = pd.DataFrame(gbdt_feats_train, columns=gbdt_feats_name)
    df_test_gbdt_feats = pd.DataFrame(gbdt_feats_test, columns=gbdt_feats_name)

    train = pd.concat([train, df_train_gbdt_feats], axis=1)
    test = pd.concat([test, df_test_gbdt_feats], axis=1)
    train_len = train.shape[0]
    data = pd.concat([train, test])
    del train
    del test
    gc.collect()

    # # 连续特征归一化
    scaler = MinMaxScaler()
    for col in continuous_fea:
        data[col] = scaler.fit_transform(data[col].values.reshape(-1, 1))

    for col in gbdt_feats_name:
        onehot_feats = pd.get_dummies(data[col], prefix=col)
        data.drop([col], axis=1, inplace=True)
        data = pd.concat([data, onehot_feats], axis=1)

    train = data[: train_len]
    test = data[train_len:]
    del data
    gc.collect()

    x_train, x_val, y_train, y_val = train_test_split(train, target, test_size=0.3, random_state=2018)

    lr = LogisticRegression()
    lr.fit(x_train, y_train)
    tr_logloss = log_loss(y_train, lr.predict_proba(x_train)[:, 1])
    print('tr-logloss: ', tr_logloss)
    val_logloss = log_loss(y_val, lr.predict_proba(x_val)[:, 1])
    print('val-logloss: ', val_logloss)
    y_pred = lr.predict_proba(test)[:, 1]
    print(y_pred)


# 训练和预测GBDT+LR模型
gbdt_lr_model(rating_data.copy(), category_fea, continuous_fea)





