# -*- coding: utf-8 -*-
# my package
import copy
import os
import shutil
from graph_emb.word2vec import CustomWord2Vec
import gc

import numpy as np
import pandas as pd
# tensorflow packages
import tensorflow as tf
# other packages
from deepctr.inputs import SparseFeat
from sklearn.preprocessing import LabelEncoder
from tensorflow import keras
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer

from models import custom_objects
from models.my_model import E2EModel
from otherpkg.utils import timeit, choose_gpu, save_by_pickle, load_rec_without_date


def load_w2v(table_name):
    data = load_rec_without_date(table_name)
    data['vector'] = data['vector'].apply(lambda x: list(map(float, x.split(","))))
    data = data.astype({'id': int})
    item_vec = dict(zip(data['id'], data['vector']))
    return item_vec


def load_dataset(emb_matrix_list, his_clk_items, his_clk_time, his_clk_times,
                 age, gender, repeat=1, drop_remainder=False,
                 shuffle_buffer_size=10000, batch_size=32):
    dataset = tf.data.Dataset.from_tensor_slices(((his_clk_items, his_clk_time, his_clk_times), (age, gender))). \
        map(lambda x, y: (process_data(x, emb_matrix_list), y),
            num_parallel_calls=tf.data.experimental.AUTOTUNE)
    dataset = dataset.repeat(repeat).shuffle(shuffle_buffer_size).batch(batch_size, drop_remainder=drop_remainder)
    return dataset.prefetch(1)


@tf.function
def process_data(data, emb_matrix_list):
    emb_list = list(map(lambda emb_matrix: tf.nn.embedding_lookup(emb_matrix, data[0]), emb_matrix_list))
    split = list(map(lambda x: tf.split(x, x.shape[0], axis=0), emb_list[1:]))
    result = list(data)
    result.append(emb_list[0])
    temp = []
    for l in split:
        temp.extend(list(map(lambda x: tf.squeeze(x, axis=-1), l)))
    result.extend(temp)
    return tuple(result)


def process_data_by_np(data, emb_matrix_list):
    emb_list = list(map(lambda emb_matrix: np.take(emb_matrix, data[0], axis=0), emb_matrix_list))
    split = list(map(lambda x: np.split(x, x.shape[1], axis=1), emb_list[1:]))
    result = list(data)
    result.append(emb_list[0])
    temp = []
    for l in split:
        temp.extend(list(map(lambda x: np.squeeze(x, axis=1), l)))
    result.extend(temp)
    return result


def get_word_emb_matrix(max_words, word_index, word_vec, dtype='float32'):
    for k, v in word_vec.items():
        print(v)
        break
    if isinstance(v, list):
        shape = (len(v),)
    else:
        shape = v.shape
    emb_matrix = np.zeros(((max_words,) + shape), dtype=dtype)
    for word, i in word_index.items():
        if i < max_words:
            embedding_vec = word_vec.get(word)
            if embedding_vec is not None:
                emb_matrix[i] = embedding_vec
    return emb_matrix


def tokenizer_transform(item_tokenizer, data, max_len):
    his_clk_items = item_tokenizer.texts_to_sequences(data['his_creative_id'].values)
    his_clk_items = pad_sequences(his_clk_items, maxlen=max_len)
    his_clk_time = pad_sequences(data['his_time'].values, maxlen=max_len)
    his_clk_times = pad_sequences(data['his_click_times'].values, maxlen=max_len)
    # sqrt click times
    his_clk_times = np.power(his_clk_times, 0.5)
    return his_clk_items, his_clk_time, his_clk_times


def process_item_profile(data, sparse_features):
    data = data[['creative_id'] + sparse_features]
    for feat in sparse_features:
        lbe = LabelEncoder()
        data[feat] = lbe.fit_transform(data[feat]) + 1
    return data


@timeit
def main(file_flag):
    choose_gpu(num=0)
    batch_size = 256

    flag = "final_net_finetune_{}".format(file_flag)
    # path
    item_toke_path = './models/item_tokenizer_{}.pickle'
    item_emb_matrix_list_path = "./models/item_emb_matrix_list_{}.pkl"
    checkpoint_dir = "./checkpoints/{}_cpt"
    log_dir = "./logs/{}_train_logs"
    item_toke_path = item_toke_path.format(flag)
    item_emb_matrix_list_path = item_emb_matrix_list_path.format(flag)
    checkpoint_dir = checkpoint_dir.format(flag)
    log_dir = log_dir.format(flag)

    # path
    base_dir = './input'
    train_dir = base_dir + '/train_preliminary'
    test_dir = base_dir + '/test'
    train_user = pd.read_csv(train_dir + '/user.csv')
    train_data = pd.read_csv(base_dir + '/train_user_action_{}.csv'.format(file_flag))
    valid_data = pd.read_csv(base_dir + '/valid_user_action_{}.csv'.format(file_flag))

    # train_data = pd.read_csv('./sample/train_user_action_{}.csv'.format(file_flag))
    # valid_data = pd.read_csv('./sample/valid_user_action_{}.csv'.format(file_flag))

    for df in [train_data, valid_data]:
        for c in ['his_creative_id', 'his_time', 'his_click_times']:
            df[c] = df[c].apply(lambda x: list(map(int, x.split(" "))))

    # merge user labels
    train_data = pd.merge(train_data, train_user, on='user_id', how='left')
    valid_data = pd.merge(valid_data, train_user, on='user_id', how='left')

    # process item profile
    train_ad = pd.read_csv(train_dir + '/ad.csv')
    test_ad = pd.read_csv(test_dir + '/ad.csv')
    item_profile = pd.concat([train_ad, test_ad]).drop_duplicates()
    # item tokenizer
    item_tokenizer = Tokenizer(lower=False, oov_token="OOV_TOKEN")
    item_tokenizer.fit_on_texts([item_profile['creative_id'].values.tolist()])
    item_index = item_tokenizer.word_index
    max_items = len(item_index) + 1
    # save item tokenizer
    save_by_pickle(item_tokenizer, item_toke_path)

    # 加载w2v模型
    dims = [32, 32]
    item_dim = sum(dims)
    item_emb_matrix_list = []
    cols = ['creative_id', 'ad_id']
    for c, s in zip(cols, dims):
        d = dict(zip(item_profile[cols[0]], item_profile[c]))
        w2v_save_path = base_dir + "/" + c + "_word2vec.model"
        word2vec = CustomWord2Vec(min_count=1, window=10, size=s, workers=15, sg=1, load_path=w2v_save_path)
        item_vec = {}
        for i in item_index.keys():
            item_vec[i] = word2vec.get(str(d.get(i, "-1")))
        item_emb_matrix_list.append(get_word_emb_matrix(max_items, item_index, item_vec))
    item_emb_matrix_list = [np.concatenate(item_emb_matrix_list, axis=1)]

    # process train data
    train_data['his_len'] = train_data['his_creative_id'].apply(lambda x: len(x))
    max_len = int(train_data['his_len'].quantile(0.999))  # must data analysis, train data 0.99 quantile, 后续可以调大
    print("max_len:", max_len)
    train_his_clk_items, train_his_clk_time, train_his_clk_times = tokenizer_transform(item_tokenizer, train_data, max_len)
    valid_his_clk_items, valid_his_clk_time, valid_his_clk_times = tokenizer_transform(item_tokenizer, valid_data, max_len)
    print("train_his_clk_items shape:", train_his_clk_items.shape)
    print("valid_his_clk_items shape:", valid_his_clk_items.shape)

    # item_profile['c_id'] = item_profile['creative_id']
    # sparse_features = ['c_id', 'ad_id', 'product_id', 'product_category', 'advertiser_id', 'industry']
    # sparse_features = ['c_id', 'product_id', 'product_category', 'advertiser_id', 'industry']
    # sparse_features = ['product_id', 'product_category', 'advertiser_id', 'industry']
    sparse_features = ['product_category', 'advertiser_id']
    item_profile = process_item_profile(item_profile, sparse_features)
    print("item_profile shape:", item_profile.shape)
    gc.collect()

    embedding_dim = 16
    fixlen_feature_columns = [SparseFeat(feat, vocabulary_size=item_profile[feat].max()+1,
                                         embedding_dim=embedding_dim) for feat in sparse_features]
    linear_feature_columns = copy.deepcopy(fixlen_feature_columns)
    dnn_feature_columns = copy.deepcopy(fixlen_feature_columns)

    item_profile_list = [{} for _ in range(len(fixlen_feature_columns))]
    for row in item_profile.values:
        start = 1
        for i in range(0, len(item_profile_list)):
            if isinstance(row[start], np.ndarray) or isinstance(row[start], list):
                item_profile_list[i][row[0]] = row[start]
            else:
                item_profile_list[i][row[0]] = row[start:start + 1]
            start += 1

    for i, d in enumerate(item_profile_list):
        item_emb_sparse_matrix = get_word_emb_matrix(max_items, item_index, d, 'int32')
        item_emb_matrix_list.append(item_emb_sparse_matrix)

    # 保存item_emb_matrix_list
    save_by_pickle(item_emb_matrix_list, item_emb_matrix_list_path)

    emb_matrix_list = []
    with tf.device("/CPU:0"):
        for m in item_emb_matrix_list:
            emb_matrix_list.append(tf.constant(m))

    max_time = 91
    e2e_model = E2EModel(max_len, max_time + 1)
    model = e2e_model.build_model_3(linear_feature_columns, dnn_feature_columns, sparse_features, max_items,
                                    dnn_hidden_units_1=(32, 16), dnn_hidden_units_2=(32, 16), item_dim=item_dim,
                                    num_layers=2, pos_trainable=True, l2_reg=1e-5, dnn_activation='relu', dnn_dropout=0.2)
    model.compile(optimizer='adam', loss=['categorical_crossentropy', 'binary_crossentropy'],
                  metrics=['acc'], loss_weights=[0.5, 0.5])
    print(model.summary())

    # callbacks
    shutil.rmtree(checkpoint_dir, ignore_errors=True)
    os.makedirs(checkpoint_dir, exist_ok=True)
    shutil.rmtree(log_dir, ignore_errors=True)
    os.makedirs(log_dir, exist_ok=True)
    checkpoint_path = os.path.join(checkpoint_dir, "weights.hdf5")
    callbacks = [
        ModelCheckpoint(checkpoint_path,
                        monitor="val_loss",
                        save_best_only=True),
        EarlyStopping(patience=5, monitor="val_loss"),
        TensorBoard(log_dir=log_dir)
    ]

    train_age = train_data['age'] - 1
    valid_age = valid_data['age'] - 1
    train_age = keras.utils.to_categorical(train_age, 10)
    valid_age = keras.utils.to_categorical(valid_age, 10)

    train_gender = train_data['gender'] - 1
    train_gender = train_gender.values.reshape((-1, 1))
    valid_gender = valid_data['gender'] - 1
    valid_gender = valid_gender.values.reshape((-1, 1))

    train_dataset = load_dataset(emb_matrix_list, train_his_clk_items, train_his_clk_time,
                                 train_his_clk_times, train_age, train_gender, repeat=None, batch_size=batch_size)
    valid_dataset = load_dataset(emb_matrix_list, valid_his_clk_items, valid_his_clk_time,
                                 valid_his_clk_times, valid_age, valid_gender, batch_size=batch_size)

    epochs = 50
    model.fit(train_dataset,
              steps_per_epoch=(train_data.shape[0]) // batch_size,
              epochs=epochs,
              validation_data=valid_dataset,
              verbose=2, callbacks=callbacks)

    # load best model
    model = load_model(checkpoint_path, custom_objects=custom_objects)

    # load test data
    test_data = pd.read_csv(base_dir + '/test_user_action.csv')
    for c in ['his_creative_id', 'his_time', 'his_click_times']:
        test_data[c] = test_data[c].apply(lambda x: list(map(int, x.split(" "))))
    test_his_clk_items, test_his_clk_time, test_his_clk_times = tokenizer_transform(item_tokenizer, test_data, max_len)
    test_dataset = process_data_by_np((test_his_clk_items, test_his_clk_time, test_his_clk_times), item_emb_matrix_list)
    age_preds, gender_preds = model.predict(test_dataset, batch_size=2048)
    sub = pd.DataFrame({'user_id': test_data['user_id'].values})
    sub['predicted_age'] = np.argmax(age_preds, axis=1) + 1
    sub['predicted_gender'] = np.where(gender_preds.flatten() > 0.5, 2, 1)
    sub.to_csv('./output/sub_{}.csv'.format(flag), index=False)
    # 保存概率结果
    sub_pred = pd.DataFrame({'user_id': test_data['user_id'].values})
    sub_pred['pred_age'] = list(map(lambda x: " ".join(map(lambda y: str(y), x)), age_preds))
    sub_pred['pred_gender'] = gender_preds
    sub_pred.to_csv('./output/raw_sub_{}.csv'.format(flag), index=False)


if __name__ == "__main__":
    file_flag = 0
    main(file_flag)
