import numpy as np
import pandas as pd
import torch
from sklearn.metrics import log_loss, roc_auc_score
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from keras.preprocessing.sequence import pad_sequences

from preprocessing.inputs import SparseFeat, DenseFeat, VarLenSparseFeat
from model.dssm import DSSM
from preprocessing.utils import Cosine_Similarity


def data_process(data_path, samp_rows=40000):
    data = pd.read_csv(data_path, nrows=samp_rows)
    data['rating'] = data['rating'].apply(lambda x: 1 if x > 3 else 0)
    data = data.sort_values(by='timestamp', ascending=True)
    train = data.iloc[:int(len(data) * 0.8)].copy()
    test = data.iloc[int(len(data) * 0.8):].copy()
    return train, test, data


def get_user_feature(data):
    """
    添加user_hist和user_mean_rating两个字段
    user_hist: 哪些用户给这部电影打分了
    """
    data_group = data[data['rating'] == 1]
    data_group = data_group[['user_id', 'movie_id']].groupby('user_id').agg(list).reset_index()
    data_group['user_hist'] = data_group['movie_id'].apply(lambda x: '|'.join([str(i) for i in x]))
    data = pd.merge(data_group.drop('movie_id', axis=1), data, on='user_id')
    data_group = data[['user_id', 'rating']].groupby('user_id').agg('mean').reset_index()
    data_group.rename(columns={'rating': 'user_mean_rating'}, inplace=True)
    data = pd.merge(data_group, data, on='user_id')
    return data


def get_item_feature(data):
    """
    添加item_mean_rating字段
    """
    data_group = data[['movie_id', 'rating']].groupby('movie_id').agg('mean').reset_index()
    data_group.rename(columns={'rating': 'item_mean_rating'}, inplace=True)
    data = pd.merge(data_group, data, on='movie_id')
    return data


def get_var_feature(data, col):
    key2index = {}

    def split(x):
        key_ans = x.split('|')
        for key in key_ans:
            if key not in key2index:
                key2index[key] = len(key2index) + 1
        return list(map(lambda x: key2index[x], key_ans))

    var_feature = list(map(split, data[col].values))  # [[1, 2, 3], [1, 2, 3], [4, 2, 5]]
    var_feature_length = np.array(list(map(len, var_feature)))  # [3,3,3...]
    max_len = max(var_feature_length)  # 6
    var_feature = pad_sequences(var_feature, maxlen=max_len, padding='post', )  # 每个项全部填充到6个元素, 结尾补 0
    return key2index, var_feature, max_len


def get_test_var_feature(data, col, key2index, max_len):
    print("user_hist_list: \n")

    def split(x):
        key_ans = x.split('|')
        for key in key_ans:
            if key not in key2index:
                # Notice : input value 0 is a special "padding",
                # so we do not use 0 to encode valid feature for sequence input
                key2index[key] = len(key2index) + 1
        return list(map(lambda x: key2index[x], key_ans))

    test_hist = list(map(split, data[col].values))
    test_hist = pad_sequences(test_hist, maxlen=max_len, padding='post')  # 结尾补0
    return test_hist


if __name__ == '__main__':
    # %%
    data_path = './data/movielens.txt'
    train, test, data = data_process(data_path, samp_rows=40000)
    # 添加user_hist和user_mean_rating两个字段
    train = get_user_feature(train)
    # 添加item_mean_rating字段
    train = get_item_feature(train)

    # 稀疏特征(离散特征)和稠密特征(连续特征)
    sparse_features = ['user_id', 'movie_id', 'gender', 'age', 'occupation']
    dense_features = ['user_mean_rating', 'item_mean_rating']
    target = ['rating']

    user_sparse_features, user_dense_features = ['user_id', 'gender', 'age', 'occupation'], ['user_mean_rating']
    item_sparse_features, item_dense_features = ['movie_id', ], ['item_mean_rating']

    # 1.稀疏特征和过程序列特征的标签编码
    for feat in sparse_features:
        # 标签编码
        """
        enc=preprocessing.LabelEncoder()   #获取一个LabelEncoder
        enc=enc.fit(['小猫','小狗','兔子'])  #训练LabelEncoder
        data=enc.transform(data)       #使用训练好的LabelEncoder对原数据进行编码
        print(data) # [2 2 1 1 0 0]
        """
        lbe = LabelEncoder()
        lbe.fit(data[feat])
        train[feat] = lbe.transform(train[feat])
        test[feat] = lbe.transform(test[feat])
    # 归一化特征到一定区间, 比如 [1,2,3] ==> []
    """
    竖着进行归一化
    见 https://blog.csdn.net/wyssailing/article/details/100626703
    mms = MinMaxScaler(feature_range=(0, 1), copy=True)
    print(mms.fit_transform([[1., -1., 2.],  # [[0.5        0.         1.        ]
                             [2., 0., 0.],  # [1.         0.5        0.33333333]
                             [0., 1., -1.]]))  # [0.         1.         0.        ]]
    """
    mms = MinMaxScaler(feature_range=(0, 1), copy=True)
    mms.fit(train[dense_features])
    train[dense_features] = mms.transform(train[dense_features])

    # 2.预处理序列特征
    # 获取电影风格是 feature
    """
    genres_key2index: 风格对应的index值 {'Action': 1 ...}
    train_genres_list: feature, [[1,2,3,0,0,0]] , 将 genres 进行标签化成一个列表, 元素个数为6
    genres_maxlen: 最长的元素风格数
    """
    genres_key2index, train_genres_list, genres_maxlen = get_var_feature(train, 'genres')
    user_key2index, train_user_hist, user_maxlen = get_var_feature(train, 'user_hist')

    user_feature_columns = [SparseFeat(feat, data[feat].nunique(), embedding_dim=4)
                            for i, feat in enumerate(user_sparse_features)] + [DenseFeat(feat, 1, ) for feat in
                                                                               user_dense_features]
    item_feature_columns = [SparseFeat(feat, data[feat].nunique(), embedding_dim=4)
                            for i, feat in enumerate(item_sparse_features)] + [DenseFeat(feat, 1, ) for feat in
                                                                               item_dense_features]

    item_varlen_feature_columns = [VarLenSparseFeat(SparseFeat('genres', vocabulary_size=1000, embedding_dim=4),
                                                    maxlen=genres_maxlen, combiner='mean', length_name=None)]

    user_varlen_feature_columns = [VarLenSparseFeat(SparseFeat('user_hist', vocabulary_size=3470, embedding_dim=4),
                                                    maxlen=user_maxlen, combiner='mean', length_name=None)]

    # 3.  无论是用户特征还是物品特征, 将稀疏特征, 连续特征, 序列特征都加起来
    user_feature_columns += user_varlen_feature_columns
    item_feature_columns += item_varlen_feature_columns

    # 将这些要用到的特征组合成一个字典
    train_model_input = {name: train[name] for name in sparse_features + dense_features}
    train_model_input["genres"] = train_genres_list
    train_model_input["user_hist"] = train_user_hist
    '''
    train_model_input 字典 
    keys
    ['user_id', 'movie_id', 'gender', 'age', 'occupation', 'user_mean_rating', 
     'item_mean_rating', 'genres', 'user_hist']
    '''
    # 4. 设置模型, 训练, 预测, 评估
    device = 'cpu'
    use_cuda = True
    if use_cuda and torch.cuda.is_available():
        print('显卡可用...')
        device = 'cuda:0'

    model = DSSM(user_feature_columns, item_feature_columns, device=device)
    model.to(device)
    print(model)
    # 优化器, 损失函数, 指标设置
    model.compile("adam", "binary_crossentropy", metrics=['auc', 'accuracy'])
    # 开始训练
    model.fit(train_model_input, train[target].values, batch_size=256, epochs=10, verbose=2, validation_split=0.2)

    # 保存
    torch.save(model.cpu().state_dict(), './save_model/model.pt')
    model.to(device)
    # 5.处理测试数据
    test = pd.merge(test, train[['movie_id', 'item_mean_rating']].drop_duplicates(), on='movie_id', how='left').fillna(
        0.5)
    test = pd.merge(test, train[['user_id', 'user_mean_rating']].drop_duplicates(), on='user_id', how='left').fillna(
        0.5)
    test = pd.merge(test, train[['user_id', 'user_hist']].drop_duplicates(), on='user_id', how='left').fillna('1')
    test[dense_features] = mms.transform(test[dense_features])

    test_genres_list = get_test_var_feature(test, 'genres', genres_key2index, genres_maxlen)
    test_user_hist = get_test_var_feature(test, 'user_hist', user_key2index, user_maxlen)

    test_model_input = {name: test[name] for name in sparse_features + dense_features}
    test_model_input["genres"] = test_genres_list
    test_model_input["user_hist"] = test_user_hist

    # %%
    # 6.Evaluate
    eval_tr = model.evaluate(train_model_input, train[target].values)
    print(eval_tr)

    # %%
    pred_ts = model.predict(test_model_input, batch_size=2000)
    print("test LogLoss", round(log_loss(test[target].values, pred_ts), 4))
    print("test AUC", round(roc_auc_score(test[target].values, pred_ts), 4))

    # %%
    # 7.Embedding
    print("user embedding shape: ", model.user_dnn_embedding[:2])
    print("item embedding shape: ", model.item_dnn_embedding[:2])

    # %%
    # 8. get single tower
    dict_trained = model.state_dict()  # trained model
    trained_lst = list(dict_trained.keys())

    # user tower
    model_user = DSSM(user_feature_columns, [], device=device)
    model_user.to(device)
    dict_user = model_user.state_dict()
    for key in dict_user:
        dict_user[key] = dict_trained[key]
    model_user.load_state_dict(dict_user)  # load trained model parameters of user tower
    user_feature_name = user_sparse_features + user_dense_features
    user_model_input = {name: test[name] for name in user_feature_name}
    user_model_input["user_hist"] = test_user_hist
    user_embedding = model_user.predict(user_model_input, batch_size=2000)
    print("single user embedding shape: ", user_embedding[:2])

    # item tower
    model_item = DSSM([], item_feature_columns, device=device)
    model_item.to(device)
    dict_item = model_item.state_dict()
    for key in dict_item:
        dict_item[key] = dict_trained[key]
    model_item.load_state_dict(dict_item)  # 加载项目塔的训练模型参数
    item_feature_name = item_sparse_features + item_dense_features
    item_model_input = {name: test[name] for name in item_feature_name}
    item_model_input["genres"] = test_genres_list
    item_embedding = model_item.predict(item_model_input, batch_size=2000)  # (2000,128)
    print("single item embedding shape: ", item_embedding[:2])  # (2,128)

    # top-k predict召回
    temp_user = {}
    for key in user_model_input.keys():
        temp_user[key] = []
        if user_model_input[key][0].shape == ():
            temp_user[key].append(user_model_input[key][0])
            temp_user[key] = pd.Series(data=temp_user[key])
        else:
            temp_user[key] = np.array([user_model_input['user_hist'][0]])

    user_embedding_temp = model_user.predict(temp_user, batch_size=1)
    score_list = Cosine_Similarity(torch.tensor(user_embedding_temp), torch.tensor(item_embedding))
    # 推荐出来的前100个商品
    item_id_list = torch.topk(score_list, 100, largest=True, sorted=True, out=None)[1].numpy()
    # [1129  960   72  668  495  132  813 1848 1282  500 ...]
    # 获取id, 会有重复的内容
    print(set([item_model_input['movie_id'][index] for index in item_id_list]))
    # {132, 1547, 2319, 1171, 1050, 1563, 2335, 2336, 2856, 2602, 2478, 2235, 188, 319, 710, 2630, 75, 2385, 2898, 1244, 2274, 2279, 1898, 1391, 2546, 2805, 2939}
