# -*- coding: utf-8 -*-
'''
Created on 2016年12月19日

@author: ZhuJiahui
'''

from word_embedding.input_data import TextLoader

import numpy as np
import tensorflow as tf
import time
import math

def word2vec_skipgram(text_corpus, embedding_dimension, batch_size, skip_window):
    
    '''
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_dir', type=str, default='../dataset/text8',
                       help='data directory containing input.txt')
    parser.add_argument('--batch_size', type=int, default=2000,
                       help='minibatch size')
    parser.add_argument('--win_size', type=int, default=3,
                       help='RNN sequence length')
    parser.add_argument('--word_dim', type=int, default=256,
                       help='number of word embedding')
    parser.add_argument('--neg_size', type=int, default=64,
                       help='number of negative words')
    parser.add_argument('--num_epochs', type=int, default=1,
                       help='number of epochs')

    args = parser.parse_args() #参数集合
    '''
    
    negative_size = 64
    epoch_num = 10

    # 准备训练数据
    text_dataset = TextLoader(text_corpus, batch_size, skip_window=3, min_frequency=5)
    vocabulary_size = text_dataset.vocabulary_size

    # 模型定义
    graph = tf.Graph()
    with graph.as_default():
        
        # 输入变量创建占位符(placeholder) 之后再进行数据的填充
        train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
        train_labels = tf.placeholder(tf.int32, shape=[batch_size])

        # 模型参数
        with tf.variable_scope('word2vector' + 'skip-gram'):
            
            # 采用均匀分布初始化
            embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_dimension], -1.0, 1.0))
            # 按行归一化
            embeddings = tf.nn.l2_normalize(embeddings, 1)
            # 权重初始化
            nce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_dimension], stddev=1.0 / math.sqrt(embedding_dimension)))
            # 偏置初始化
            nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
        
        # 查找(look up)batch中的输入词对应的vector
        embed = tf.nn.embedding_lookup(embeddings, train_inputs)
        labels = tf.expand_dims(train_labels, axis=1)  # 在索引1处插入标记1

        if labels.dtype != tf.int64:
            labels = tf.cast(labels, tf.int64)
        labels_flat = tf.reshape(labels, [-1])

        # 第一部分 抽取负例,计算正负例得分
        sampled, true_expected_count, sampled_expected_count = tf.nn.log_uniform_candidate_sampler(
            true_classes=labels,
            num_true=1,
            num_sampled=negative_size,
            unique=True,
            range_max=vocabulary_size
            )

        all_ids = tf.concat([labels_flat, sampled], 0)

        all_w = tf.nn.embedding_lookup(nce_weights, all_ids)
        all_b = tf.nn.embedding_lookup(nce_biases, all_ids)

        true_w = tf.slice(all_w, tf.stack([0, 0]), [batch_size, embedding_dimension])
        true_b = tf.slice(all_b, [0], [batch_size])
        true_logits = tf.matmul(embed, true_w, transpose_b=True) + true_b

        sampled_w = tf.slice(all_w, tf.stack([batch_size, 0]), [negative_size, embedding_dimension])
        sampled_b = tf.slice(all_b, [batch_size], [negative_size])
        sampled_logits = tf.matmul(embed, sampled_w, transpose_b=True) + sampled_b

        if True: # 减去词出现的先验频率
            true_logits -= tf.log(true_expected_count)
            sampled_logits -= tf.log(sampled_expected_count)
        out_logits = tf.concat([true_logits, sampled_logits], 1)
        out_targets = tf.concat([tf.ones_like(true_logits), tf.zeros_like(sampled_logits)], 1)

        # 第二部分 计算正负例与正确标签的交叉熵
        loss_batchs = tf.nn.relu(out_logits) - out_logits * out_targets + tf.log(1 + tf.exp(-tf.abs(out_logits)))
        loss = tf.reduce_mean(tf.reduce_sum(loss_batchs, 1))
        optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)

        #输出词向量
        embeddings_norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
        normalized_embeddings = embeddings / embeddings_norm


    #模型训练
    with tf.Session(graph=graph) as sess:
        
        tf.global_variables_initializer().run()
        
        for e in range(epoch_num):
            
            text_dataset.reset_batch_pointer()
            start = time.time()
            loss_all = 0
            for b in range(text_dataset.batch_num):
                # 将训练数据喂给模型
                batch_inputs, batch_labels = text_dataset.next_batch()
                feed = {train_inputs: batch_inputs, train_labels: batch_labels}
                loss_val,  _ = sess.run([loss, optimizer], feed)
                loss_all += loss_val
            
            end = time.time()
            print("{}/{}, train_loss = {:.3f}, time/batch = {:.3f}" .format(e, epoch_num, loss_all/text_dataset.batch_num, end - start))
        final_embeddings = normalized_embeddings.eval()
    
    return final_embeddings, text_dataset.vocabulary, text_dataset.words

if __name__ == '__main__':
    pass
