# -*- coding: utf-8 -*-
# my package
import os

import numpy as np
import pandas as pd
# tensorflow packages
import tensorflow as tf
# other packages
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from tensorflow import keras
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.sequence import pad_sequences

from models import custom_objects
from otherpkg.utils import timeit, choose_gpu, load_pickle


def load_dataset(emb_matrix_list, his_clk_items, his_clk_time, his_clk_times,
                 age, gender, repeat=1, drop_remainder=False,
                 shuffle_buffer_size=10000, batch_size=32):
    dataset = tf.data.Dataset.from_tensor_slices(((his_clk_items, his_clk_time, his_clk_times), (age, gender))). \
        map(lambda x, y: (process_data(x, emb_matrix_list), y),
            num_parallel_calls=tf.data.experimental.AUTOTUNE)
    dataset = dataset.repeat(repeat).shuffle(shuffle_buffer_size).batch(batch_size, drop_remainder=drop_remainder)
    return dataset.prefetch(1)


@tf.function
def process_data(data, emb_matrix_list):
    emb_list = list(map(lambda emb_matrix: tf.nn.embedding_lookup(emb_matrix, data[0]), emb_matrix_list))
    split = list(map(lambda x: tf.split(x, x.shape[0], axis=0), emb_list))
    result = list(data)
    temp = []
    for l in split:
        temp.extend(list(map(lambda x: tf.squeeze(x, axis=-1), l)))
    result.extend(temp)
    return tuple(result)


def process_data_by_np(data, emb_matrix_list):
    emb_list = list(map(lambda emb_matrix: np.take(emb_matrix, data[0], axis=0), emb_matrix_list))
    split = list(map(lambda x: np.split(x, x.shape[1], axis=1), emb_list))
    result = list(data)
    temp = []
    for l in split:
        temp.extend(list(map(lambda x: np.squeeze(x, axis=1), l)))
    result.extend(temp)
    return result


def get_word_emb_matrix(max_words, word_index, word_vec, dtype='float32'):
    for k, v in word_vec.items():
        print(v)
        break
    if isinstance(v, list):
        shape = (len(v),)
    else:
        shape = v.shape
    emb_matrix = np.zeros(((max_words,) + shape), dtype=dtype)
    for word, i in word_index.items():
        if i < max_words:
            embedding_vec = word_vec.get(word)
            if embedding_vec is not None:
                emb_matrix[i] = embedding_vec
    return emb_matrix


def tokenizer_transform(item_tokenizer, data, max_len):
    his_clk_items = item_tokenizer.texts_to_sequences(data['his_creative_id'].values)
    his_clk_items = pad_sequences(his_clk_items, maxlen=max_len)
    his_clk_time = pad_sequences(data['his_time'].values, maxlen=max_len)
    his_clk_times = pad_sequences(data['his_click_times'].values, maxlen=max_len)
    return his_clk_items, his_clk_time, his_clk_times


def process_item_profile(data, sparse_features, dense_features):
    data = data[['creative_id'] + sparse_features + dense_features]
    for feat in sparse_features:
        lbe = LabelEncoder()
        data[feat] = lbe.fit_transform(data[feat]) + 1
    data[dense_features] = data[dense_features].fillna(0)
    mms = MinMaxScaler(feature_range=(0, 1))
    data[dense_features] = mms.fit_transform(data[dense_features])
    return data


@timeit
def main(file_flag):
    choose_gpu(num=0)
    batch_size = 128
    # batch_size = 4096

    # flag = "e4_h4_n2_r0_smooth_count_feat_{}".format(file_flag)
    flag = "e4_h4_n2_r0_{}".format(file_flag)
    # path
    item_toke_path = './models/item_tokenizer_{}.pickle'
    item_emb_matrix_list_path = "./models/item_emb_matrix_list_{}.pkl"
    checkpoint_dir = "./checkpoints/{}_cpt"
    log_dir = "./logs/{}_train_logs"
    item_toke_path = item_toke_path.format(flag)
    item_emb_matrix_list_path = item_emb_matrix_list_path.format(flag)
    checkpoint_dir = checkpoint_dir.format(flag)
    log_dir = log_dir.format(flag)

    # recover item tokenizer
    item_tokenizer = load_pickle(item_toke_path)

    max_len = 123  # must data analysis, train data 0.99 quantile
    # max_len = 2  # test

    # recover item_emb_matrix_list
    item_emb_matrix_list = load_pickle(item_emb_matrix_list_path)

    # recover best model
    checkpoint_path = os.path.join(checkpoint_dir, "weights.hdf5")
    model = load_model(checkpoint_path, custom_objects=custom_objects)

    # path
    base_dir = './input'
    train_dir = base_dir + '/train_preliminary'
    test_dir = base_dir + '/test'
    training = False
    if training:
        train_user = pd.read_csv(train_dir + '/user.csv')
        train_data = pd.read_csv(base_dir + '/train_user_action_{}.csv'.format(file_flag))
        valid_data = pd.read_csv(base_dir + '/valid_user_action_{}.csv'.format(file_flag))

        # train_data = pd.read_csv('./sample/train_user_action_{}.csv'.format(file_flag))
        # valid_data = pd.read_csv('./sample/valid_user_action_{}.csv'.format(file_flag))

        for df in [train_data, valid_data]:
            for c in ['his_creative_id', 'his_time', 'his_click_times']:
                df[c] = df[c].apply(lambda x: list(map(int, x.split(" "))))

        # merge user labels
        train_data = pd.merge(train_data, train_user, on='user_id', how='left')
        valid_data = pd.merge(valid_data, train_user, on='user_id', how='left')

        # process item profile
        train_ad = pd.read_csv(train_dir + '/ad.csv')
        test_ad = pd.read_csv(test_dir + '/ad.csv')
        item_profile = pd.concat([train_ad, test_ad]).drop_duplicates()
        count_features = pd.read_csv(os.path.join(base_dir, 'count_feature.csv'))
        age_cols = [c for c in count_features.columns if 'age' in c]
        gender_cols = [c for c in count_features.columns if 'gender' in c]
        overall_count = count_features[age_cols+gender_cols].apply(np.sum, axis=0)
        smooth_count = overall_count.values
        enough_show = 300 # 数据分析后得到的
        smooth_count = smooth_count / sum(smooth_count) * 2 * enough_show
        # 曝光量平滑
        count_features[age_cols+gender_cols] = count_features[age_cols+gender_cols].apply(lambda x: x+smooth_count, axis=1)
        count_features[age_cols] = count_features[age_cols].apply(lambda x: x/sum(x), axis=1)
        count_features[gender_cols] = count_features[gender_cols].apply(lambda x: x/sum(x), axis=1)
        # merge dense features
        item_profile = pd.merge(item_profile, count_features, on='creative_id', how='left')
        # process oov item
        item_profile[age_cols+gender_cols] = item_profile[age_cols+gender_cols].fillna(item_profile[age_cols+gender_cols].mean())
        # sparse_features = ['ad_id', 'product_id', 'product_category', 'advertiser_id', 'industry']
        sparse_features = ['product_id', 'product_category', 'advertiser_id', 'industry']
        dense_features = age_cols + gender_cols
        item_profile = process_item_profile(item_profile, sparse_features, dense_features)
        print("item_profile shape:", item_profile.shape)

        # process train data
        train_his_clk_items, train_his_clk_time, train_his_clk_times = tokenizer_transform(item_tokenizer, train_data, max_len)
        valid_his_clk_items, valid_his_clk_time, valid_his_clk_times = tokenizer_transform(item_tokenizer, valid_data, max_len)
        print("train_his_clk_items shape:", train_his_clk_items.shape)
        print("valid_his_clk_items shape:", valid_his_clk_items.shape)

        emb_matrix_list = []
        with tf.device("/CPU:0"):
            for m in item_emb_matrix_list:
                emb_matrix_list.append(tf.constant(m))

        # callbacks
        callbacks = [
            ModelCheckpoint(checkpoint_path,
                            monitor="val_loss",
                            save_best_only=True),
            EarlyStopping(patience=5, monitor="val_loss"),
            TensorBoard(log_dir=log_dir)
        ]

        train_age = train_data['age'] - 1
        valid_age = valid_data['age'] - 1
        train_age = keras.utils.to_categorical(train_age, 10)
        valid_age = keras.utils.to_categorical(valid_age, 10)

        train_gender = train_data['gender'] - 1
        train_gender = train_gender.values.reshape((-1, 1))
        valid_gender = valid_data['gender'] - 1
        valid_gender = valid_gender.values.reshape((-1, 1))

        train_dataset = load_dataset(emb_matrix_list, train_his_clk_items, train_his_clk_time,
                                     train_his_clk_times, train_age, train_gender, repeat=None, batch_size=batch_size)
        valid_dataset = load_dataset(emb_matrix_list, valid_his_clk_items, valid_his_clk_time,
                                     valid_his_clk_times, valid_age, valid_gender, batch_size=batch_size)

        epochs = 50
        model.fit(train_dataset,
                  steps_per_epoch=(train_data.shape[0]) // batch_size,
                  epochs=epochs,
                  validation_data=valid_dataset,
                  verbose=2, callbacks=callbacks)

        # load best model
        model = load_model(checkpoint_path, custom_objects=custom_objects)

    # load test data
    test_data = pd.read_csv(base_dir + '/test_user_action.csv')
    for c in ['his_creative_id', 'his_time', 'his_click_times']:
        test_data[c] = test_data[c].apply(lambda x: list(map(int, x.split(" "))))
    test_his_clk_items, test_his_clk_time, test_his_clk_times = tokenizer_transform(item_tokenizer, test_data, max_len)
    test_dataset = process_data_by_np((test_his_clk_items, test_his_clk_time, test_his_clk_times), item_emb_matrix_list)
    age_preds, gender_preds = model.predict(test_dataset, batch_size=2048)
    sub = pd.DataFrame({'user_id': test_data['user_id'].values})
    sub['predicted_age'] = np.argmax(age_preds, axis=1) + 1
    sub['predicted_gender'] = np.where(gender_preds.flatten() > 60261/90000, 2, 1)
    sub.to_csv('./output/sub_with__count_feat_{}.csv'.format(file_flag), index=False)


if __name__ == "__main__":
    file_flag = 1
    main(file_flag)
