# -*- coding: utf-8 -*-
"""
------------------------------------------------------------------------------
    File Name:  corpora_and_vector_spaces
    Author   :  wanwei1029
    Date     :  2018/10/28
    Desc     :  本节内容主要是熟悉类Dictionary和词袋（bow)模型，词典是根据已有文档
    内容产生的一个dict，而词袋模型，指的是字典的doc2bow方法，能够基于Dictionary,将
    文档（doc)转换成bow模型,也就是（词项ID,词出现次数）的向量组合。
    Dictionary只是一个工具，最终的目的还是为了将原语料转换成词袋模型（bow)
------------------------------------------------------------------------------
"""
import logging
import os
import tempfile
from pprint import pprint
from gensim import corpora
from collections import defaultdict


# logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# TEMP_FOLDER = os.path.join(tempfile.gettempdir(), "gensim")
TEMP_FOLDER = r"D:\nas\gensim\temp"
print('Folder "{}" will be used to save temporary dictionary and corpus.'.format(TEMP_FOLDER))
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)


def dictionary_demo():
    """
    Dictionary,顾名思义，就是词项到数字编号的映射（也就是个map）。输入是一个二维数组（list），二维数组中每个元素代表着一篇文档。
    1：映射规则：循环二维数组，将其中每个元素通过方法doc2bow添加到映射中。
      doc2bow会调用sort方法对doc进行排序（不存在的），再每个新元素按id+1的方式添加到映射中。
      所以词项的编号是无规则的，只和它首次出现的文档有关。

    add_documents不会影响之前已有的字典，只在原有的字典后面添加。
    save保存的格式，是专有格式，文本编辑器打不开。
    save_as_text：保存为文本格式，用于检查，效率不如save，其保存的格式为：
    doc_num :第一行为文档个数。
    dfs:词汇id和对应文档频率的集合。
    d_1[TAB]word_1[TAB]document_frequency_1 剩下每一行分别是：id 词汇 该词汇在文档中出现的频率
    注：document_frequency 不是词汇的频率，是它在哪些文档中出现过的频率之和。
    :return:
    """
    words = [["张三", "李四", "王五", "张三", "王五", "张三", "大家"],
             ["张三", "李四", "王五", "张三", "王五", "张三", "大家"]]
    dictionary = corpora.Dictionary(words)
    print(dictionary.token2id)
    dictionary.add_documents([["王五", "赵六", "赵六", "赵六", "赵六", "赵六", "问题", "处理"], ["孙七", "孙八"]])
    print(dictionary.token2id)
    print("字典长度为：{0}".format(len(dictionary)))
    print(dictionary.id2token)
    print(dictionary.num_docs)  # 文档数量总计，就是总共有多少个list
    print(dictionary.num_nnz)   # 单次添加最大词汇量总计，如果没有add_documents操作,就和num_pos相等，有就取最大的那个。
    print(dictionary.num_pos)  # 词汇数量总计，包括重复的
    dictionary.save(os.path.join(TEMP_FOLDER, 'deer_test.dict'))
    dictionary.save_as_text(os.path.join(TEMP_FOLDER, 'deer_test.txt'))


def load_dictionary():
    """
    load_from_text  num_nnz， num_pos会丢失，但load不会。
    """
    loaded_dct = corpora.Dictionary.load_from_text(os.path.join(TEMP_FOLDER, 'deer_test.txt'))
    print(loaded_dct.token2id)
    print(loaded_dct.id2token)
    print(loaded_dct.num_docs)
    print(loaded_dct.num_nnz)
    print(loaded_dct.num_pos)
    loaded_dct2 = corpora.Dictionary.load(os.path.join(TEMP_FOLDER, 'deer_test.dict'))
    print(loaded_dct2.token2id)
    print(loaded_dct2.id2token)
    print(loaded_dct2.num_docs)
    print(loaded_dct2.num_nnz)
    print(loaded_dct2.num_pos)
    print(loaded_dct2.dfs)


def demo():
    """
    """
    documents = ["Human machine interface for lab abc computer applications",
                 "A survey of user opinion of computer system response time",
                 "The EPS user interface management system",
                 "System and human system engineering testing of EPS",
                 "Relation of user perceived response time to error measurement",
                 "The generation of random binary unordered trees",
                 "The intersection graph of paths in trees",
                 "Graph minors IV Widths of trees and well quasi ordering",
                 "Graph minors A survey"]
    stoplist = set('for a of the and to in'.split())
    texts = [[word for word in document.lower().split() if word not in stoplist] for document in documents]
    # pprint(texts)
    frequency = defaultdict(int)
    for text in texts:
        for token in text:
            frequency[token] += 1
    texts = [[token for token in text if frequency[token] > 1] for text in texts]
    # pprint(texts)
    dictionary = corpora.Dictionary(texts)
    print(dictionary)
    # 保存词典
    dictionary.save(os.path.join(TEMP_FOLDER, 'deerwester.dict'))
    print(dictionary.token2id)
    new_doc = "Human computer interaction computer"
    new_vec = dictionary.doc2bow(new_doc.lower().split(), return_missing=True)
    print(new_vec)
    # 过滤不需要的词汇，或者只保留需要的词汇，参数bad_ids和good_ids，完成后编码会改变，重新映射。
    # dictionary.filter_tokens([0, 2])
    # dictionary.filter_n_most_frequent()
    # print(dictionary.token2id)
    # 现在我们有了词典，将documents转换成bow模型。
    corpus = [dictionary.doc2bow(text) for text in texts]
    # corpus.append(dictionary.doc2bow(['survey', 'python']))
    # 此时还可以往模型中添加数据以修改模型，但加载的bow模型就不能再添加语料了,因为加载后的对象是MmCorpus了。
    pprint(corpus)
    # 保存bow模型至文件
    corpora.MmCorpus.serialize(os.path.join(TEMP_FOLDER, 'deer_test.mm'), corpus)


if __name__ == '__main__':
    test_method = "demo"
    if test_method == "demo":
        demo()
    elif test_method == "dictionary_demo":
        dictionary_demo()
    elif test_method == "load_dictionary":
        load_dictionary()

