# -*- coding: utf-8 -*-

"""
document words to index
save word dict
"""
import tensorflow.examples.tutorials.word2vec
import codecs
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.preprocessing import text


def read_file_to_dict(file_path, sentence_size=10, min_word_freq=0):
    """
    # 保存: vocab_processor.save(filename)
    # 读取: text.VocabularyProcessor.restore(filename)
    # fit相当于训练, transform相当于转换
    # 具体用法见: https://github.com/tensorflow/tensorflow/tree/r1.3/tensorflow/contrib/learn/python/learn/preprocessing/tests
    # 转换一句话: vocab_processor.transform(["我 喜欢 吃饭", "b c a"]), 返回类型为numpy.ndarray的列表
    # 转换一个单词: vocab_processor.vocabulary_.get("我")
    # embedding_size = len(vocab_processor.vocabulary_), index 是从1开始的, embedding_size比单词数量多1
    :param file_path:
    :param sentence_size:
    :param min_word_freq:
    :return:
    """
    with codecs.open(file_path) as f:
        vocab_processor = text.VocabularyProcessor(sentence_size, min_frequency=min_word_freq)
        # 输入一个迭代器, 迭代器的数据是一个文本, 文本的词用空格隔开
        vocab_processor.fit(f)
    return vocab_processor


def create_input_output(file_path):
    """
    得到一个迭代器, 分别是 输入单词和输出单词, 即对于x和y
    :param file_path:
    :return:
    """
    with codecs.open(file_path) as f:
        for line in f:
            lines = line.strip().split(" ")
            sentence_length = len(lines)
            for i in range(sentence_length - 1):
                yield [lines[i + 1], lines[i]]


def word_to_vec(vocab_processor, embedding_size, word_iter):
    vocabulary_size = len(vocab_processor.vocabulary_)
    embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
    nce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size],
                                                  stddev=1.0 / np.sqrt(embedding_size)))
    nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
    inputs = tf.placeholder(tf.int32, shape=[None, 1])
    y_target = tf.placeholder(tf.int32, shape=[None, vocabulary_size])
    embed = tf.nn.embedding_lookup(embeddings, inputs)
    loss = tf.reduce_mean(tf.nn.nce_loss(nce_weights,
                                         nce_biases,
                                         y_target,
                                         embed,
                                         3,
                                         vocabulary_size))

    # run
    iter_num = 2
    for i in range(iter_num):
        print("第" + str(i) + "次迭代:\n")


def a(vocab_processor_, word_iter):
    embedding_size = 5
    l = list(word_iter)
    input_words_index = list()
    output_words_index = list()
    for element in l:
        print(element)
        input_words_index.append(vocab_processor_.vocabulary_.get(element[0]))
        output_words_index.append([vocab_processor_.vocabulary_.get(element[1])])
    print(input_words_index)
    vocabulary_size = len(vocab_processor_.vocabulary_)
    inputs = tf.placeholder(tf.int32, shape=[None])
    outputs = tf.placeholder(tf.int32, shape=[None, 1])

    embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0, seed=1))
    embed = tf.nn.embedding_lookup(embeddings, inputs)

    nce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size],
                                                  stddev=1.0 / np.sqrt(embedding_size)))
    nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
    loss = tf.reduce_mean(tf.nn.nce_loss(nce_weights,
                                         nce_biases,
                                         outputs,
                                         embed,
                                         3, vocabulary_size))
    init = tf.initialize_all_variables()

    # tensorflow run 图的部分
    sess = tf.Session()
    sess.run(init)
    temp = sess.run(embeddings)         # numpy.ndarray 类型
    print(temp)
    print(sess.run(embed, feed_dict={inputs: input_words_index[0:2]}))
    print(sess.run(loss, feed_dict={inputs: input_words_index[0:2], outputs: output_words_index[0:2]}))
    print(type(temp))





def fw(vocabulary_size, batch_size, num_sampled, embedding_size=10):
    embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
    x_inputs = tf.placeholder(tf.int32, shape=[batch_size])
    y_target = tf.placeholder(tf.int32, shape=[batch_size, 1])
    embed = tf.nn.embedding_lookup(embeddings, x_inputs)

    nce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size],
                                                  stddev=1.0 / np.sqrt(embedding_size)))


    pass


def make_train_data():
    """
    make the train data
    the data struct index1 \t index2
    index1 is the input, index2 is output
    :return:
    """
    pass


if __name__ == "__main__":
    f_path = "/home/elliottqian/Documents/PycharmProjects/nlp_study_py/nlpTools/resource/rt-polarity.neg"
    test_path = "/mnt/D/Ubuntu/PycharmProjects/nlp_study_py/nlpTools/resource/test"
    vocab_processor_ = read_file_to_dict(test_path, sentence_size=5)
    word_iter = create_input_output(test_path)
    a(vocab_processor_, word_iter)
    pass
