import numpy as np
import pandas as pd
import tensorflow as tf
import argparse
from tensorflow.keras.optimizers import Adam
from deepctr.models import DeepFM, xDeepFM, DIN, DIEN
from deepctr.feature_column import SparseFeat, DenseFeat, get_feature_names, VarLenSparseFeat
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, log_loss
from sklearn.preprocessing import OrdinalEncoder, MinMaxScaler
from util import set_imbalance_other, get_hist
import warnings

warnings.filterwarnings('ignore')

train_site_path = "./data/new_train_site.csv"
train_app_path = "./data/new_train_app.csv"
test_site_path = "./data/new_valid_site.csv"
test_app_path = "./data/new_valid_app.csv"

parser = argparse.ArgumentParser()
parser.add_argument("--data_type", type = str, choices=['app','site'])
parser.add_argument("--model_name", type = str, choices= ['DeepFM','DIN', 'DIEN'])
args = parser.parse_args()

def read_data(data_type):
    """读取样本数据，获取数据类型，并降低精度读取"""
    data_sample = pd.read_csv(test_app_path, nrows=20)
    column_types = {}

    column_types['user'] = np.int32
    column_types['id'] = np.uint64

    for key in data_sample.select_dtypes(include=['object']).columns:
        column_types[key] = 'category'

    for key in data_sample.select_dtypes(include=['int']).columns:
        column_types[key] = np.int32

    for key in data_sample.select_dtypes(include=['float64']).columns.drop(['user']):
        column_types[key] = 'float32'
    print("Reading {} data...".format(data_type))
    if data_type == 'site':
        data_train = pd.read_csv(train_site_path, dtype=column_types)
        data_test = pd.read_csv(test_site_path, dtype=column_types)
    elif data_type == 'app':
        data_train = pd.read_csv(train_app_path, dtype=column_types)
        data_test = pd.read_csv(test_app_path, dtype=column_types)

    return data_train, data_test

def train(data_train, data_test, args):
    """特征工程"""

    test_id = data_test['id'].values
    category_cols = [
        'device_id', 'device_ip', 'device_model',
        'pub_id', 'pub_domain', 'pub_category'
    ]
    # TODO 对 category_cols 做哈希，进一步减小内存占用
    for col in category_cols:
        data_train[col] = data_train[col].apply(lambda x: hash(x) % 1e8)
        data_test[col] = data_test[col].apply(lambda x: hash(x) % 1e8)
    for col in category_cols:
        data_train[col] = data_train[col].astype(np.int32)
        data_test[col] = data_test[col].astype(np.int32)

    id_cols = ['id', 'user']

    for col in id_cols:
        data_train[col] = data_train[col].apply(lambda x: hash(x) % 1e6)
        data_test[col] = data_test[col].apply(lambda x: hash(x) % 1e6)
    for col in id_cols:
        data_train[col] = data_train[col].astype(np.int32)
        data_test[col] = data_test[col].astype(np.int32)

    # TODO: 处理出现次数少于 10 的特征
    imbalance_cols = [
        "pub_id", "pub_domain", "device_model", "C14", "C17", "C20"
    ]
    for col in imbalance_cols:
        set_imbalance_other(data_train, col)
        set_imbalance_other(data_test, col)

    count_feats = ['device_id_count', 'device_ip_count', 'user_count', 'user_hour_count', 'pub_user_cnt']
    sparse_feats = data_train.columns.drop(['id', 'user', 'click', 'hour', 'device_ip', 'device_id'] + count_feats)
    id_feats = ['id', 'user']



    # TODO: 切分训练集与验证集，取数据后 10% 作为验证集
    all_idx = np.arange(data_train.shape[0])
    train_idx, valid_idx = all_idx[:-data_train.shape[0]//10], all_idx[-data_train.shape[0]//10:]

    # TODO: 对 sparse 特征进行编码
    lbe = OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value=-1, dtype=np.int32)
    lbe.fit(data_train.loc[train_idx, sparse_feats])

    data_train[sparse_feats] = lbe.transform(data_train[sparse_feats])
    data_test[sparse_feats] = lbe.transform(data_test[sparse_feats])
    del lbe

    for feat in sparse_feats:
        data_train[feat] = data_train[feat] + 1
        data_test[feat] = data_test[feat] + 1

    # TODO: 对 count 特征做 log 变换后归一化
    scaler = MinMaxScaler()
    data_train[count_feats] = np.log(data_train[count_feats] + 1e-5)
    data_test[count_feats] = np.log(data_test[count_feats] + 1e-5)
    scaler.fit(data_train.loc[train_idx, count_feats])
    data_train[count_feats] = scaler.transform(data_train[count_feats])
    data_test[count_feats] = scaler.transform(data_test[count_feats])

    if args.model_name in ['DIN', 'DIEN']:
        all_hist_pub_id, all_hist_pub_domain, all_hist_pub_category = get_hist(
            pd.concat([data_train, data_test], ignore_index=True))
        # valid_hist_pub_id,valid_hist_pub_domain,valid_hist_pub_category = get_hist(X_valid)
        # test_hist_pub_id,test_hist_pub_domain,test_hist_pub_category = get_hist(data_test)

        train_hist_pub_id, valid_hist_pub_id, test_hist_pub_id = all_hist_pub_id[train_idx, :], all_hist_pub_id[
                                                                                                valid_idx,
                                                                                                :], all_hist_pub_id[
                                                                                                    valid_idx[
                                                                                                        -1] + 1:, ]
        train_hist_pub_domain, valid_hist_pub_domain, test_hist_pub_domain = all_hist_pub_domain[train_idx,
                                                                             :], all_hist_pub_domain[valid_idx,
                                                                                 :], all_hist_pub_domain[
                                                                                     valid_idx[-1] + 1:, :]
        train_hist_pub_category, valid_hist_pub_category, test_hist_pub_category = all_hist_pub_category[train_idx,
                                                                                   :], all_hist_pub_category[valid_idx,
                                                                                       :], all_hist_pub_category[
                                                                                           valid_idx[-1] + 1:, :]
    X_train = data_train.loc[train_idx].reset_index(drop=True)
    X_valid = data_train.loc[valid_idx].reset_index(drop=True)
    target = ['click']

    # TODO: 生成 Deepctr 特征
    fixlen_feature_columns = [SparseFeat(feat, vocabulary_size=X_train[feat].nunique() + 1, embedding_dim=10)
                              for i, feat in enumerate(sparse_feats)
                              ] + [SparseFeat('user', vocabulary_size=1000000, embedding_dim=10)] + [
                                 DenseFeat(feat, 1, ) for feat in count_feats]

    if args.model_name == 'DIN':
        behavior_feature_list = ['pub_id', 'pub_domain', 'pub_category']
        marker_list = ["hist_" + x for x in behavior_feature_list]
        varlen_feature_columns = [
            VarLenSparseFeat(
                SparseFeat("hist_{}".format(feat), vocabulary_size=X_train[feat].nunique() + 1, embedding_dim=10,
                           embedding_name=feat), maxlen=4)
            for feat in behavior_feature_list
        ]

        feature_columns = fixlen_feature_columns + varlen_feature_columns
        feature_names = get_feature_names(feature_columns)

        train_model_input = {name: X_train[name] for name in feature_names if name not in marker_list}
        train_model_input['hist_pub_id'] = train_hist_pub_id
        train_model_input['hist_pub_domain'] = train_hist_pub_domain
        train_model_input['hist_pub_category'] = train_hist_pub_category

        valid_model_input = {name: X_valid[name] for name in feature_names if name not in marker_list}
        valid_model_input['hist_pub_id'] = valid_hist_pub_id
        valid_model_input['hist_pub_domain'] = valid_hist_pub_domain
        valid_model_input['hist_pub_category'] = valid_hist_pub_category

        test_model_input = {name: data_test[name] for name in feature_names if name not in marker_list}
        test_model_input['hist_pub_id'] = test_hist_pub_id
        test_model_input['hist_pub_domain'] = test_hist_pub_domain
        test_model_input['hist_pub_category'] = test_hist_pub_category
    elif args.model_name == 'DIEN':
        train_behavior_length = np.array([4 for i in range(X_train.shape[0])])
        valid_behavior_length = np.array([4 for i in range(X_valid.shape[0])])
        test_behavior_length = np.array([4 for i in range(data_test.shape[0])])
        behavior_feature_list = ['pub_id', 'pub_domain', 'pub_category']
        marker_list = ["hist_" + x for x in behavior_feature_list]
        varlen_feature_columns = [
            VarLenSparseFeat(
                SparseFeat("hist_{}".format(feat), vocabulary_size=X_train[feat].nunique() + 1, embedding_dim=10,
                           embedding_name=feat), maxlen=4, length_name='seq_length')
            for feat in behavior_feature_list
        ]

        feature_columns = fixlen_feature_columns + varlen_feature_columns
        feature_names = get_feature_names(feature_columns)

        train_model_input = {name: X_train[name] for name in feature_names if name not in marker_list + ['seq_length']}
        train_model_input['hist_pub_id'] = train_hist_pub_id
        train_model_input['hist_pub_domain'] = train_hist_pub_domain
        train_model_input['hist_pub_category'] = train_hist_pub_category
        train_model_input['seq_length'] = train_behavior_length

        valid_model_input = {name: X_valid[name] for name in feature_names if name not in marker_list + ['seq_length']}
        valid_model_input['hist_pub_id'] = valid_hist_pub_id
        valid_model_input['hist_pub_domain'] = valid_hist_pub_domain
        valid_model_input['hist_pub_category'] = valid_hist_pub_category
        valid_model_input['seq_length'] = valid_behavior_length

        test_model_input = {name: data_test[name] for name in feature_names if name not in marker_list + ['seq_length']}
        test_model_input['hist_pub_id'] = test_hist_pub_id
        test_model_input['hist_pub_domain'] = test_hist_pub_domain
        test_model_input['hist_pub_category'] = test_hist_pub_category
        test_model_input['seq_length'] = test_behavior_length
    elif args.model_name == 'DeepFM':
        dnn_feature_columns = fixlen_feature_columns
        linear_feature_columns = fixlen_feature_columns
        feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)

        train_model_input = {name:X_train[name] for name in feature_names}
        valid_model_input = {name:X_valid[name] for name in feature_names}
        test_model_input = {name:data_test[name] for name in feature_names}

    target = ['click']
    checkpoint_path = "./tmp/checkpoint.hdfs"
    # print("User, hist_pub_id, hist_pub_domain, hist_pub_categroy")
    # for i in range(2340,2350):
    #     print(train_model_input['user'][i],train_model_input['hist_pub_id'][i],train_model_input['hist_pub_domain'][i],train_model_input['hist_pub_category'][i])
    if args.model_name == 'DeepFM':

        model = DeepFM(linear_feature_columns, dnn_feature_columns,
                       task='binary',
                       l2_reg_linear=1e-6,
                       l2_reg_dnn=1e-6,
                       dnn_use_bn=True
                       )

    elif args.model_name == 'DIN':
        model = DIN(feature_columns, behavior_feature_list,
                    l2_reg_dnn=1e-5,
                    l2_reg_embedding=1e-5,
                    dnn_use_bn=True
                    )
    elif args.model_name == 'DIEN':
        model = DIEN(feature_columns, behavior_feature_list,
                     gru_type='AUGRU',
                     #              use_negsampling=True,
                     l2_reg_dnn=1e-5,
                     l2_reg_embedding=1e-5,
                     use_bn=True
                     )
    print("Training {} Model...".format(args.model_name))

    model.compile(Adam(1e-6), "binary_crossentropy",
                  metrics=['binary_crossentropy'], )



    earlyStoppingCallbacks = tf.keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True)
    checkpointCallbacks = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, save_weights_only=True)
    history = model.fit(train_model_input, X_train[target].values,
                        batch_size=512, epochs=1000, verbose=1,
                        validation_data=(valid_model_input, X_valid[target].values),
                        callbacks=[earlyStoppingCallbacks, checkpointCallbacks])
    print("Training finished, start predicting...")

    pred_ans = model.predict(test_model_input, batch_size=256)
    test_log_loss = log_loss(data_test[target].values, pred_ans)
    test_auc = roc_auc_score(data_test[target].values, pred_ans)
    print("test LogLoss", round(test_log_loss, 4))
    print("test AUC", round(test_auc, 4))

    model.save_weights("./tmp/model_weights/{}_{:.4}_{}.h5".format(args.model_name, test_auc, args.data_type))

    prediction = pd.DataFrame(columns=['id', 'click'])
    prediction['id'] = test_id
    prediction['click'] = pred_ans

    prediction.to_csv("./result/prediction_{}_{}.csv".format(args.data_type, args.model_name), index=False)

if __name__ == '__main__':

    data_train, data_test = read_data(args.data_type)
    train(data_train, data_test, args)
