# 做召回评估的一个标志, 如果不进行评估就是直接使用全量数据进行召回
import collections
import math
import os
import pickle
import random

import faiss
import numpy as np
import pandas as pd
import tensorflow as tf
from deepctr.feature_column import SparseFeat, VarLenSparseFeat
from deepmatch.models import YoutubeDNN
from deepmatch.utils import sampledsoftmaxloss
from pandas import DataFrame, Series
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.indexes.numeric import IntegerIndex
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from tqdm import tqdm

metric_recall = True


def get_all_click_sample(sample_nums=10000) -> DataFrame:
    """
    训练集中采样一部分数据调试
    """
    all_click: DataFrame = pd.read_csv("../data/train_click_log.csv")
    all_user_ids = all_click["user_id"].unique()
    sample_user_ids = np.random.choice(all_user_ids, size=sample_nums, replace=False)
    all_click = all_click[all_click["user_id"].isin(sample_user_ids)]
    all_click = all_click.drop_duplicates(subset=['user_id', 'click_article_id', 'click_timestamp'])
    return all_click


def get_all_click_df(offline=True) -> DataFrame:
    """
    读取点击数据，这里分成线上和线下，如果是为了获取线上提交结果应该讲测试集中的点击数据合并到总的数据中
    如果是为了线下验证模型的有效性或者特征的有效性，可以只使用训练集
    """
    if offline:
        all_click = pd.read_csv("../data/train_click_log.csv")
    else:
        trn_click = pd.read_csv("../data/train_click_log.csv")
        tst_click = pd.read_csv("../data/testA_click_log.csv")
        all_click = trn_click.append(tst_click)
    all_click = all_click.drop_duplicates(subset=['user_id', 'click_article_id', 'click_timestamp'])
    return all_click


def get_item_info_df() -> DataFrame:
    """
    读取文章的基本属性
    """
    item_info_df: DataFrame = pd.read_csv("../data/articles.csv")
    item_info_df = item_info_df.rename(columns={'article_id': 'click_article_id'})
    return item_info_df


def get_item_emb_dict() -> dict:
    """
    读取文章的Embedding数据
    """
    item_emb_df: DataFrame = pd.read_csv("../data/articles_emb.csv")
    item_emb_cols = [x for x in item_emb_df.columns if "emb" in x]
    item_emb_np = np.ascontiguousarray(item_emb_df[item_emb_cols])
    item_emb_np = item_emb_np / np.linalg.norm(x=item_emb_np, axis=1, keepdims=True)
    item_emb_dict = dict(zip(item_emb_df["article_id"], item_emb_np))
    return item_emb_dict


max_min_scaler = lambda x: (x - np.min(x)) / (np.max(x) - np.min(x))

# 采样数据
# all_click_df = get_all_click_sample()
# 全量训练集
all_click_df = get_all_click_df(offline=False)

# 对时间戳进行归一化,用于在关联规则的时候计算权重
all_click_df["click_timestamp"] = all_click_df[["click_timestamp"]].apply(max_min_scaler)

item_info_df = get_item_info_df()


# item_emb_dict = get_item_emb_dict()


def get_user_item_time(click_df: DataFrame) -> dict:
    """
    根据点击时间获取用户的点击文章序列   {user1: [(item1, time1), (item2, time2)..]...}
    """
    click_df = click_df.sort_values("click_timestamp")

    def make_item_time_pair(df: DataFrame) -> list:
        return list(zip(df["click_article_id"], df["click_timestamp"]))

    click_byuid: DataFrameGroupBy = click_df.groupby("user_id")[["click_article_id", "click_timestamp"]]
    user_item_time_df: DataFrame = click_byuid.apply(
        lambda x: make_item_time_pair(x))
    user_item_time_df = user_item_time_df.reset_index().rename(columns={0: "item_time_list"})
    user_item_time_dict = dict(zip(user_item_time_df["user_id"], user_item_time_df["item_time_list"]))
    return user_item_time_dict


def get_item_user_time_dict(click_df: DataFrame) -> dict:
    """
    根据时间获取商品被点击的用户序列  {item1: [(user1, time1), (user2, time2)...]...}
    """
    click_df = click_df.sort_values("click_timestamp")

    def make_user_time_pair(df: DataFrame) -> list:
        return list(zip(df["user_id"], df["click_timestamp"]))

    click_byiid: DataFrameGroupBy = click_df.groupby("click_article_id")["user_id", "click_timestamp"]
    item_user_time_df: DataFrame = click_byiid.apply(lambda x: make_user_time_pair(x))
    item_user_time_df = item_user_time_df.reset_index().rename(columns={0: "user_time_list"})
    item_user_time_dict = dict(zip(item_user_time_df["click_article_id"], item_user_time_df["user_time_list"]))
    return item_user_time_dict


def get_hist_and_last_click(all_click: DataFrame) -> (DataFrame, DataFrame):
    """
    获取当前数据的历史点击和最后一次点击
    """
    all_click = all_click.sort_values(['user_id', 'click_timestamp'])
    click_last_df = all_click.groupby('user_id').tail(1)

    def hist_func(user_df):
        if len(user_df) == 1:
            return user_df
        else:
            return user_df[:-1]

    click_hist_df = all_click.groupby('user_id').apply(hist_func).reset_index(drop=True)
    return click_hist_df, click_last_df


def get_item_info_dict(item_info_df: DataFrame) -> (dict, dict, dict):
    """
    获取文章id对应的基本属性，保存成字典的形式，方便后面召回阶段，冷启动阶段直接使用
    """
    item_info_df["created_at_ts"] = item_info_df[["created_at_ts"]].apply(max_min_scaler)
    item_type_dict = dict(zip(item_info_df["click_article_id"], item_info_df["category_id"]))
    item_words_dict = dict(zip(item_info_df["click_article_id"], item_info_df["words_count"]))
    item_created_time_dict = dict(zip(item_info_df["click_article_id"], item_info_df["created_at_ts"]))
    return item_type_dict, item_words_dict, item_created_time_dict


def get_user_hist_item_info_dict(all_click: DataFrame) -> (dict, dict, dict, dict):
    """
    获取用户历史点击的文章信息
    """

    # 获取user_id对应的用户历史点击文章类型的集合字典
    user_hist_item_typs_gb: DataFrameGroupBy = all_click.groupby("user_id")["category_id"]
    user_hist_item_typs: DataFrame = user_hist_item_typs_gb.agg(func=set).reset_index()
    user_hist_item_typs_dict = dict(zip(user_hist_item_typs['user_id'], user_hist_item_typs['category_id']))

    # 获取user_id对应的用户点击文章的集合
    user_hist_item_ids_gb: DataFrameGroupBy = all_click.groupby("user_id")["click_article_id"]
    user_hist_item_ids: DataFrame = user_hist_item_ids_gb.agg(func=set).reset_index()
    user_hist_item_ids_dict = dict(zip(user_hist_item_ids['user_id'], user_hist_item_ids['click_article_id']))

    # 获取user_id对应的用户历史点击的文章的平均字数字典
    user_hist_item_words_gb: DataFrameGroupBy = all_click.groupby("user_id")["words_count"]
    user_hist_item_words: DataFrame = user_hist_item_words_gb.agg(func='mean').reset_index()
    user_hist_item_words_dict = dict(zip(user_hist_item_words['user_id'], user_hist_item_words['words_count']))

    # 获取user_id对应的用户最后一次点击的文章的创建时间
    all_click = all_click.sort_values('click_timestamp')

    def read_last(df: DataFrame):
        return df.iloc[-1]

    user_last_item_created_time_gb: DataFrameGroupBy = all_click.groupby('user_id')["created_at_ts"]
    user_last_item_created_time: DataFrame = user_last_item_created_time_gb.apply(lambda x: read_last(x)).reset_index()
    user_last_item_created_time["created_at_ts"] = user_last_item_created_time[['created_at_ts']].apply(max_min_scaler)
    user_last_item_created_time_dict = dict(
        zip(user_last_item_created_time['user_id'], user_last_item_created_time['created_at_ts']))
    return user_hist_item_typs_dict, user_hist_item_ids_dict, user_hist_item_words_dict, user_last_item_created_time_dict


def get_item_topk_click(click_df: DataFrame, k: int) -> IntegerIndex:
    """
    获取近期点击最多的文章
    """
    click_article_df: Series = click_df['click_article_id']
    click_article_df = click_article_df.value_counts()
    topk_click: IntegerIndex = click_article_df.index[:k]
    return topk_click


# 获取文章的属性信息，保存成字典的形式方便查询
item_type_dict, item_words_dict, item_created_time_dict = get_item_info_dict(item_info_df)

# 定义一个多路召回的字典，将各路召回的结果都保存在这个字典当中
user_multi_recall_dict = {'itemcf_sim_itemcf_recall': {},
                          'embedding_sim_item_recall': {},
                          'youtubednn_recall': {},
                          'youtubednn_usercf_recall': {},
                          'cold_start_recall': {}}


# 提取最后一次点击作为召回评估，如果不需要做召回评估直接使用全量的训练集进行召回(线下验证模型)
# 如果不是召回评估，直接使用全量数据进行召回，不用将最后一次提取出来
# trn_hist_click_df, trn_last_click_df = get_hist_and_last_click(all_click_df)


def metrics_recall(user_recall_items_dict: dict, trn_last_click_df: DataFrame, topk=5):
    """
    依次评估召回的前10, 20, 30, 40, 50个文章中的击中率
    """
    last_click_item_dict = dict(zip(trn_last_click_df['user_id'], trn_last_click_df['click_article_id']))
    user_num = len(user_recall_items_dict)
    for k in range(10, topk + 1, 10):
        hit_num = 0
        for user, item_list in user_recall_items_dict.items():
            tmp_recall_items = [x[0] for x in item_list[:k]]
            if last_click_item_dict[user] in set(tmp_recall_items):
                hit_num += 1
        hit_rate = round(hit_num / user_num, ndigits=5)
        print(' topk: ', k, ' : ', 'hit_num: ', hit_num, 'hit_rate: ', hit_rate, 'user_num : ', user_num)


def itemcf_sim(df: DataFrame, item_created_time_dict: dict) -> dict:
    """
    文章与文章之间的相似性矩阵计算
    思路: 基于物品的协同过滤(详细请参考上一期推荐系统基础的组队学习) + 关联规则
    1.用户点击的时间权重
    2.用户点击的顺序权重
    3.文章创建的时间权重
    """
    user_item_time_dict = get_user_item_time(df)
    i2i_sim = {}
    item_cnt = collections.defaultdict(int)
    for user, item_time_list in tqdm(user_item_time_dict.items()):
        for loc1, (i, i_click_time) in enumerate(item_time_list):
            item_cnt[i] += 1
            i2i_sim.setdefault(i, {})
            for loc2, (j, j_click_time) in enumerate(item_time_list):
                if i == j:
                    continue
                loc_alpha = 1.0 if loc1 < loc2 else 0.7
                loc_weight = loc_alpha * (0.9 ** (np.abs(loc1 - loc2) - 1))
                click_time_weight = np.exp(0.7 ** np.abs(i_click_time - j_click_time))
                created_time_weight = np.exp(0.8 ** np.abs(item_created_time_dict[i] - item_created_time_dict[j]))
                i2j_sim: dict = i2i_sim[i]
                i2j_sim.setdefault(j, 0)
                i2j_sim[j] += loc_weight * click_time_weight * created_time_weight / (
                    math.log(len(item_time_list) + 1))
    for i, related_items in i2i_sim.items():
        for j, wij in related_items.items():
            i2i_sim[i][j] = wij / math.sqrt(item_cnt[i] * item_cnt[j])
    return i2i_sim


# i2i_sim = itemcf_sim(all_click_df, item_created_time_dict=item_created_time_dict)


def get_user_activate_degree_dict(all_click_df: DataFrame) -> dict:
    """
    用户活跃度
    """
    all_click_df_: DataFrame = all_click_df.groupby('user_id')[['click_article_id']].count().reset_index()
    mm = MinMaxScaler()
    all_click_df_['click_article_id'] = mm.fit_transform(all_click_df_[['click_article_id']])
    user_activate_degree_dict = dict(zip(all_click_df_['user_id'], all_click_df_['click_article_id']))
    return user_activate_degree_dict


def usercf_sim(all_click_df: DataFrame, user_activate_degree_dict: dict) -> dict:
    """
    用户相似性矩阵计算
    思路: 基于用户的协同过滤(详细请参考上一期推荐系统基础的组队学习) + 关联规则
    """
    item_user_time_dict = get_item_user_time_dict(all_click_df)
    u2u_sim = {}
    user_cnt = collections.defaultdict(int)
    for item, user_time_list in tqdm(item_user_time_dict.items()):
        for u, u_click_time in user_time_list:
            user_cnt[u] += 1
            u2u_sim.setdefault(u, {})
            for v, v_click_time in user_time_list:
                if u == v:
                    continue
                activate_weight = 100 * 0.5 * (user_activate_degree_dict[u] + user_activate_degree_dict[v])
                u2v_sim: dict = u2u_sim[u]
                u2v_sim.setdefault(v, 0)
                u2v_sim[v] += activate_weight / (math.log(len(user_time_list) + 1))
    for u, related_users in u2u_sim.items():
        for v, wuv in related_users.items():
            u2u_sim[u][v] = wuv / math.sqrt(user_cnt[u] * user_cnt[v])
    return u2u_sim


# user_activate_degree_dict = get_user_activate_degree_dict(all_click_df)
# u2u_sim = usercf_sim(all_click_df, user_activate_degree_dict)


def embdding_sim(item_emb_df: DataFrame, topk: int):
    """
    基于内容的文章embedding相似性矩阵计算
    思路: 对于每一篇文章， 基于embedding的相似性返回topk个与其最相似的文章， 只不过由于文章数量太多，这里用了faiss进行加速
    """
    item_idx_2_rawid_dict = dict(zip(item_emb_df.index, item_emb_df["article_id"]))
    item_emb_cols = [x for x in item_emb_df.columns if 'emb' in x]
    item_emb_np: np.ndarray = np.ascontiguousarray(item_emb_df[item_emb_cols].values, dtype=np.float32)
    item_emb_np = item_emb_np / np.linalg.norm(item_emb_np, axis=1, keepdims=True)
    item_index: faiss.Index = faiss.index_factory(item_emb_np.shape[1], 'PCA32,IVF100,PQ8')
    # item_index: faiss.IndexFlatIP = faiss.IndexFlatIP(item_emb_np.shape[1])
    item_index.train(item_emb_np)
    item_index.add(item_emb_np)
    sim, idx = item_index.search(item_emb_np, topk)
    item_sim_dict = collections.defaultdict(dict)
    for target_idx, sim_value_list, rele_idx_list in tqdm(zip(range(len(item_emb_np)), sim, idx)):
        target_raw_id = item_idx_2_rawid_dict[target_idx]
        for rele_idx, sim_value in zip(rele_idx_list[1:], sim_value_list[1:]):
            rele_raw_id = item_idx_2_rawid_dict[rele_idx]
            item_sim_dict[target_raw_id][rele_raw_id] = item_sim_dict.get(target_raw_id, {}).get(rele_raw_id,
                                                                                                 0) + sim_value
    pickle.dump(item_sim_dict, open('emb_i2i_sim.pkl', 'wb'))


# item_emb_df = pd.read_csv('../data/articles_emb.csv')
# embdding_sim(item_emb_df, 10)

# item_sim_dict: dict = pickle.load(open('emb_i2i_sim.pkl', 'rb'))
# print(dict)

def gen_data_set(data: DataFrame, negsample=0) -> (list, list):
    """
    获取双塔召回时的训练验证数据
    :param negsample:过滑窗构建样本的时候，负样本的数量
    :return:(训练集，测试集)
    """
    data.sort_values("click_timestamp", inplace=True)
    item_ids: Series = data['click_article_id'].unique()
    train_set = []
    test_set = []
    for reviewerID, hist in tqdm(data.groupby('user_id')):
        hist: DataFrame = hist
        pos_list = hist['click_article_id'].tolist()
        neg_list = []
        if negsample > 0:
            candidate_set = list(set(item_ids) - set(pos_list))
            neg_list = np.random.choice(a=candidate_set, size=len(pos_list) * negsample, replace=True)
        if len(pos_list) == 1:
            train_set.append((reviewerID, [pos_list[0]], pos_list[0], 1, len(pos_list)))
            test_set.append((reviewerID, [pos_list[0]], pos_list[0], 1, len(pos_list)))
        for i in range(1, len(pos_list), 1):
            pre_hist = pos_list[:i]
            if i != len(pos_list) - 1:
                train_set.append((reviewerID, pre_hist[::-1], pos_list[i], 1, len(pre_hist[::-1])))
                for negi in range(negsample):
                    train_set.append(
                        (reviewerID, pre_hist[::-1], neg_list[i * negsample + negi], 0, len(pre_hist[::-1])))
            else:
                test_set.append((reviewerID, pre_hist[::-1], pos_list[i], 1, len(pre_hist[::-1])))
    random.shuffle(train_set)
    random.shuffle(test_set)
    return train_set, test_set


def gen_model_input(train_set: list, user_profile: DataFrame, seq_max_len: int) -> (dict, np.ndarray):
    """
    将输入的数据进行padding，使得序列特征的长度都一致
    """
    train_uid = np.array([line[0] for line in train_set])
    train_seq = [line[1] for line in train_set]
    train_iid = np.array([line[2] for line in train_set])
    train_label = np.array([line[3] for line in train_set])
    train_hist_len = np.array([line[4] for line in train_set])
    train_seq_pad = pad_sequences(train_seq, maxlen=seq_max_len, padding='post', truncating='post', value=0)
    train_model_input = {'user_id': train_uid, 'click_article_id': train_iid, 'hist_article_id': train_seq_pad,
                         'hist_len': train_hist_len}
    return train_model_input, train_label


def youtubednn_u2i_dict_inner(data: DataFrame, topk=20):
    """
    youtubednn实现
    """
    sparse_features = ['click_article_id', 'user_id']
    SEQ_LEN = 30
    user_profile_ = data[['user_id']].drop_duplicates('user_id')
    item_profile_ = data[['click_article_id']].drop_duplicates('click_article_id')
    features = ['click_article_id', 'user_id']
    feature_max_idx = {}
    for feature in features:
        lbe = LabelEncoder()
        data[feature] = lbe.fit_transform(data[feature])
        feature_max_idx[feature] = data[feature].max() + 1
    user_profile = data[['user_id']].drop_duplicates('user_id')
    item_profile = data[['click_article_id']].drop_duplicates('click_article_id')
    user_index_2_rawid = dict(zip(user_profile['user_id'], user_profile_['user_id']))
    item_index_2_rawid = dict(zip(item_profile['click_article_id'], item_profile_['click_article_id']))
    train_set, test_set = gen_data_set(data, 0)
    train_model_input, train_label = gen_model_input(train_set, user_profile, SEQ_LEN)
    test_model_input, test_label = gen_model_input(test_set, user_profile, SEQ_LEN)
    embedding_dim = 16
    user_feature_columns = [SparseFeat('user_id', feature_max_idx['user_id'], embedding_dim),
                            VarLenSparseFeat(
                                SparseFeat('hist_article_id', feature_max_idx['click_article_id'], embedding_dim,
                                           embedding_name='click_article_id'), SEQ_LEN, 'mean', 'hist_len'), ]
    item_feature_columns = [SparseFeat('click_article_id', feature_max_idx['click_article_id'], embedding_dim)]

    if tf.__version__ >= '2.0.0':
        tf.compat.v1.disable_eager_execution()

    model = YoutubeDNN(user_feature_columns=user_feature_columns, item_feature_columns=item_feature_columns,
                       num_sampled=5, user_dnn_hidden_units=(64, embedding_dim))
    model.compile(optimizer='adam', loss=sampledsoftmaxloss)
    history = model.fit(x=train_model_input, y=train_label, batch_size=256, epochs=1, verbose=1, validation_split=0.0)
    test_user_model_input = test_model_input
    all_item_model_input = {'click_article_id': item_profile['click_article_id'].values, }
    user_embedding_model = Model(inputs=model.user_input, outputs=model.user_embedding)
    item_embedding_model = Model(inputs=model.item_input, outputs=model.item_embedding)
    user_embs = user_embedding_model.predict(test_user_model_input, batch_size=2 ** 12)
    item_embs = item_embedding_model.predict(all_item_model_input, batch_size=2 ** 12)
    # user_embs = user_embs / np.linalg.norm(user_embs, axis=1, keepdims=True)
    # item_embs = item_embs / np.linalg.norm(item_embs, axis=1, keepdims=True)
    raw_user_id_emb_dict = {user_index_2_rawid[k]: v for k, v in zip(user_profile['user_id'], user_embs)}
    raw_item_id_emb_dict = {item_index_2_rawid[k]: v for k, v in zip(item_profile['click_article_id'], item_embs)}
    pickle.dump(raw_user_id_emb_dict, open('user_youtube_emb.pkl', 'wb'))
    pickle.dump(raw_item_id_emb_dict, open('item_youtube_emb.pkl', 'wb'))

    ui_index: faiss.Index = faiss.index_factory(embedding_dim, 'IVF100,PQ8')
    ui_index.train(item_embs)
    ui_index.add(item_embs)
    sim, idx = ui_index.search(np.ascontiguousarray(user_embs), topk)
    user_recall_items_dict = collections.defaultdict(dict)
    for target_idx, sim_value_list, rele_idx_list in tqdm(zip(test_user_model_input['user_id'], sim, idx)):
        target_raw_id = user_index_2_rawid[target_idx]
        for rele_idx, sim_value in zip(rele_idx_list[1:], sim_value_list[1:]):
            rele_raw_id = item_index_2_rawid[rele_idx]
            user_recall_items_dict[target_raw_id][rele_raw_id] = user_recall_items_dict.get(target_raw_id, {}).get(
                rele_raw_id,
                0) + sim_value
    user_recall_items_dict = {k: sorted(v.items(), key=lambda x: x[1], reverse=True) for k, v in
                              user_recall_items_dict.items()}
    pickle.dump(user_recall_items_dict, open('youtube_u2i_dict.pkl', 'wb'))


def youtubednn_u2i_dict(data: DataFrame, topk=20) -> dict:
    """
    判断文件是否存在
    """
    if not os.path.exists('youtube_u2i_dict.pkl'):
        youtubednn_u2i_dict_inner(data, topk)
    return pickle.load(open('youtube_u2i_dict.pkl', 'rb'))


if not metric_recall:
    user_multi_recall_dict['youtubednn_recall'] = youtubednn_u2i_dict(all_click_df, topk=20)
else:
    trn_hist_click_df, trn_last_click_df = get_hist_and_last_click(all_click_df)
    user_multi_recall_dict['youtubednn_recall'] = youtubednn_u2i_dict(all_click_df, topk=20)
    metrics_recall(user_multi_recall_dict['youtubednn_recall'], trn_last_click_df, topk=20)
