# encoding: utf-8


"""

@author: tongzhenguo

@time: 2021/4/27 下午2:54

@desc:

https://zhuanlan.zhihu.com/p/55960239

算法输入1 idlist:每一行代表的是一个session的行为item的唯一索引id(从0计数)
算法输入2 namelist:每一行代表的是一个session的行为item
算法输出：id和对应的向量
"""
import codecs
import json
import math
import random

import numpy as np
from tensorflow.python.keras import Input
from tensorflow.python.keras.layers import Dense, Embedding, Reshape, dot
from tensorflow.python.keras.models import Model

DoulistFile = '../../data/doulist_0804_09.json'
MovieFile = '../../data/movie_0804_09.json'
DoulistCorpusIdFile = DoulistFile.replace('json', 'movie_id')
DoulistCorpusNameFile = DoulistFile.replace('json', 'movie_name')

# network conf
min_word_freq__ = 10  # 过滤的阈值
emb_size = 100  # embedding的维数
context_window_size = 2  # 窗口大小
epochs = 1  # 迭代次数(这里为了方便设置成1)
batch_size = 256  # batch大小


def get_movie_freq(doulist_file=DoulistFile, min_word_freq=0):
    movie_counter = {}  # movie计数的容器
    with codecs.open(doulist_file, encoding="utf8") as fopen:  # 打开文件
        for line in fopen:  # 逐行扫描
            doulist_dict = json.loads(line.strip())  # load成字典
            for movie_name in doulist_dict['movie_names']:  # 取movielist
                # movie_name = movie_name.encode('utf8') #编码
                if movie_name not in movie_counter:  # 计数
                    movie_counter[movie_name] = 0
                movie_counter[movie_name] += 1
    movie_freq = filter(lambda _: _[1] >= min_word_freq, movie_counter.items())  # 去掉数量过少的movie
    return dict(movie_freq)


# NOTE 这里的id是序号，不是movie_id
def get_movie_name_id_dict(doulist_file=DoulistFile, min_word_freq=0):  # 生成movie2idx
    movie_freq = get_movie_freq(doulist_file, min_word_freq)  # 去掉数量过少的movie
    # 同数量的按照字母序
    movie_counter_sorted = sorted(movie_freq.items(), key=lambda x: (-x[1], x[0]))  # 数量排序
    movies, _ = list(zip(*movie_counter_sorted))
    movie_name_id_dict = dict(zip(movies, range(len(movies))))  # 生成movie2idx
    movie_name_id_dict['<unk>'] = len(movies)
    print('movie_name_id_dict is %d from [%s]' % (len(movie_name_id_dict), doulist_file))
    return movie_name_id_dict


def get_movie_id_name_dict(doulist_file=DoulistFile):  # 生成idx2movie
    movie_name_id_dict = get_movie_name_id_dict(doulist_file)
    movie_id_name_dict = dict([(_[1], _[0]) for _ in movie_name_id_dict.items()])
    print('movie_id_name_dict is %d from [%s]' % (len(movie_id_name_dict), doulist_file))
    return movie_id_name_dict


def process2corpus():
    movie_name_id_dict = get_movie_name_id_dict()  # 生成idx2movie
    print('total movie is %d from [%s], [%s]' % (len(movie_name_id_dict), DoulistFile, MovieFile))
    unk_id = 0
    with codecs.open(DoulistFile, encoding="utf8") as fopen, \
            codecs.open(DoulistCorpusNameFile, 'w', encoding="utf8") as fwrite, \
            codecs.open(DoulistCorpusIdFile, 'w', encoding="utf8") as fwrite_1:
        for line in fopen:
            doulist_dict = json.loads(line.strip())
            doulist_movies = [_ for _ in doulist_dict['movie_names']]  # 取movielist
            doulist_movie_ids = [str(movie_name_id_dict[_]) for _ in doulist_movies]  # 生成idlist
            fwrite.write('%s\n' % ('\t'.join(doulist_movies)))
            fwrite_1.write('%s\n' % (' '.join(doulist_movie_ids)))


def shuffle(reader, buf_size):  # 打乱
    """
    Creates a data reader whose data output is shuffled.
    Output from the iterator that created by original reader will be
    buffered into shuffle buffer, and then shuffled. The size of shuffle buffer
    is determined by argument buf_size.
    :param reader: the original reader whose output will be shuffled.
    :type reader: callable
    :param buf_size: shuffle buffer size.
    :type buf_size: int
    :return: the new reader whose output is shuffled.
    :rtype: callable
    """

    def data_reader():
        buf = []
        for e in reader():
            buf.append(e)
            if len(buf) >= buf_size:
                random.shuffle(buf)
                for b in buf:
                    yield b
                buf = []

        if len(buf) > 0:
            random.shuffle(buf)
            for b in buf:
                yield b

    return data_reader


def skipgram_model(vocab_size, embedding_dim=100):  # vocab_size指词的数量
    target = Input(shape=(1,), name='target')
    context = Input(shape=(1,), name='context')
    # print target.shape, context.shape
    shared_embedding = Embedding(vocab_size, embedding_dim, input_length=1,
                                 name='shared_embedding')  # 共享一套embedding层
    embedding_target = shared_embedding(target)
    embedding_context = shared_embedding(context)
    # print embedding_target.shape, embedding_context.shape

    merged_vector = dot([embedding_target, embedding_context], axes=-1)  # 点积
    reshaped_vector = Reshape((1,), input_shape=(1, 1))(merged_vector)
    # print merged_vector.shape
    prediction = Dense(1, input_shape=(1,), activation='sigmoid')(reshaped_vector)
    # print prediction.shape

    model = Model(inputs=[target, context], outputs=prediction)
    model.compile(optimizer='adam', loss='binary_crossentropy')
    return model


def get_negatives_with_weight(target, context_list, sampling_weights):
    population = list(range(len(sampling_weights)))
    for _ in range(len(context_list)):  # 1:1采样
        # 采样K远远大于countext长度，所以必定会采中
        neg_candidates = random.choice(population, sampling_weights, k=int(1e5))
        i = 0
        while neg_candidates[i] in context_list:  # 如果采到context，则重采
            i += 1
        yield ((target, neg_candidates[i]), 0)


def get_negatives(target, context_list, vocabulary_size):
    for _ in range(len(context_list)):  # 1:1采样
        ne_idx = random.randrange(0, vocabulary_size)  # 采样
        while ne_idx in context_list:  # 如果采到context，则重采
            ne_idx = random.randrange(0, vocabulary_size)
        yield ((target, ne_idx), 0)


def skipgram_reader_generator(movie_dict, file_name=DoulistCorpusNameFile, context_window=2):
    def reader():
        vocabulary_size = len(movie_dict)
        with codecs.open(file_name, encoding="utf8") as fopen:
            for line in fopen:  # 逐行扫描
                line_list = line.strip().split('\t')  # movie list
                movie_ids = [movie_dict.get(_, movie_dict['<unk>']) for _ in line_list]  # movie id list
                for i in range(len(movie_ids)):
                    target = movie_ids[i]
                    # generate positive sample
                    context_list = []
                    j = i - context_window
                    while j <= i + context_window and j < len(movie_ids):  # 取target的context
                        if j >= 0 and j != i:
                            context_list.append(movie_ids[j])
                            yield ((target, movie_ids[j]), 1)
                        j += 1
                    # generate negative sample
                    get_negatives(target, context_list, vocabulary_size)
                    # for _ in range(len(context_list)):  # 1:1采样
                    #     ne_idx = random.randrange(0, vocabulary_size)  # 采样
                    #     while ne_idx in context_list:  # 如果采到context，则重采
                    #         ne_idx = random.randrange(0, vocabulary_size)
                    #     yield ((target, ne_idx), 0)

    return reader


# 热度打压概率(context词/正样本)：1 - math.sqrt(t / count * total_cnt), t = 0.0005,小于t的将会有概率被丢弃
# 负样本采样概率： math.sqrt(count) / total_cnt
# https://tangshusen.me/Dive-into-DL-PyTorch/#/chapter10_natural-language-processing/10.3_word2vec-pytorch
def skipgram_reader_generator_v2(movie_dict, movie_freq, file_name=DoulistCorpusNameFile, context_window=2):
    total_cnt = sum([cnt for _, cnt in movie_freq.items()])

    def discard(name, t=1e-4):
        # 不满足最小频次限制的，也丢弃
        if name not in movie_freq:
            return False
        cnt = movie_freq[name]
        return random.uniform(0, 1) < 1 - math.sqrt(t / cnt * total_cnt)

    def reader():
        # 负样本采样概率： pow(feedCountVector, 0.5) / totalFeedCount
        sampling_weights = [cnt ** 0.5 / total_cnt for _, cnt in movie_freq.items()]
        # vocabulary_size = len(movie_dict)
        with codecs.open(file_name, encoding="utf8") as fopen:
            for line in fopen:  # 逐行扫描
                line_list = line.strip().split('\t')  # movie list
                # 热度打压概率(context词/正样本)：1 - math.sqrt(t / count * total_cnt), t = 0.0005,小于t的将会有概率被丢弃
                movie_ids = [movie_dict.get(_, movie_dict['<unk>']) for _ in line_list if
                             not discard(_, t=5e-5)]  # movie id list
                for i in range(len(movie_ids)):
                    target = movie_ids[i]
                    # generate positive sample
                    context_list = []
                    j = i - context_window
                    while j <= i + context_window and j < len(movie_ids):  # 取target的context
                        if j >= 0 and j != i:
                            context_list.append(movie_ids[j])
                            yield ((target, movie_ids[j]), 1)
                        j += 1
                    # generate negative sample
                    get_negatives_with_weight(target, context_list, sampling_weights)
                    # for _ in range(len(context_list)):  # 1:1采样
                    #     ne_idx = random.randrange(0, vocabulary_size)  # 采样
                    #     while ne_idx in context_list:  # 如果采到context，则重采
                    #         ne_idx = random.randrange(0, vocabulary_size)
                    #     yield ((target, ne_idx), 0)

    return reader


def train_skipgram_base_model(doulist_file, file):
    word_dict = get_movie_name_id_dict(min_word_freq=min_word_freq__)  # 生成movie2idx
    movie_freq = get_movie_freq(doulist_file, min_word_freq__)
    dict_size = len(word_dict)  # 电影数
    model = skipgram_model(dict_size, emb_size)  # 生成skipgram的图
    # print model.layers
    for epoch_id in range(epochs):
        # train by batch
        batch_id = 0
        x_batch = [[], []]
        y_batch = []
        loss_list = []
        for movie_ids, label in shuffle(
                skipgram_reader_generator_v2(word_dict, movie_freq, context_window=context_window_size), 10000)():
            batch_id += 1
            x_batch[0].append(movie_ids[0])
            x_batch[1].append(movie_ids[1])
            y_batch.append(label)
            if batch_id % (batch_size * 1000) == 0:  # 1000个batch后开始记录batch的loss变化
                # Print evaluate log
                print('[epoch #%d] batch #%d, train loss:%s' % (epoch_id, batch_id, np.mean(loss_list)))
                loss_list = []
            if batch_id % batch_size == 0:  # 当生成的样本达到batch的数量后，扔进模型进行训练
                X = [np.array(x_batch[0]), np.array(x_batch[1])]
                loss = model.train_on_batch(X, np.array(y_batch))
                loss_list.append(loss)
                x_batch = [[], []]
                y_batch = []
    print('model train done')
    # store word embedding
    with codecs.open(file, 'w', encoding="utf8") as fwrite:
        for idx, vec in enumerate(model.layers[2].get_weights()[0].tolist()):
            fwrite.write('%d %s\n' % (idx, ' '.join([str(_) for _ in vec])))


train_skipgram_base_model(DoulistFile, "/Users/tongzhenguo/code/models/data/item2vec_out")
