
# 02.解析token文件，词表词频的构建
"""
对标题：进行分词，统计每个词的次品，将其对应的储存在一个文件中；
该文件的作用：
（1）以便后续的内容读取，组成两个字典，文字到文字id的映射；文字id到文字的映射
（2）通过词频过滤掉出现次数较低的词；
"""
import os
import sys
import pprint
import codecs

def count_vocab(input_description_file):
    with open(input_description_file) as f:
        lines = f.readlines()
    max_length_of_sentences = 0 # 所有句子中 最长长度
    length_dict = {} #　统计　句子长度字典　｛长度：句子总数｝
    vocab_dict = {} # 词表字典　｛词：词频｝
    for line in lines:
        image_id, description = line.strip('\n').split('\t')
        words = description.strip(' ').split() # 分词
        # words 的 格式 ['Two', 'young', 'guys', 'with', 'shaggy', 'hair', ……]

        max_length_of_sentences = max(max_length_of_sentences, len(words)) # 选择一个最大值放入
        length_dict.setdefault(len(words), 0)
        length_dict[len(words)] += 1

        # 词表 统计
        for word in words:
            vocab_dict.setdefault(word, 0)
            vocab_dict[word] += 1

    print(max_length_of_sentences)
    pprint.pprint(length_dict)
    return vocab_dict

if __name__ == '__main__':
    input_description_file = "D:/PyCharm2018/PyCharmProject/Vis2LanDemo/Demo03/flickr30ktoken/flickr30k.token"
    output_vocab_file = "D:/PyCharm2018/PyCharmProject/Vis2LanDemo/Demo03/flickr30ktoken/vocab.txt"

    vocab_dict = count_vocab(input_description_file)
    #对词表进行排序
    sorted_vocab_dict = sorted(vocab_dict.items(), key = lambda d:d[1], reverse=True)


    # encoding='windows-1252'
    with open(output_vocab_file, 'rw') as f:
        f.write('<UNK>\t1000000\n')
        for item in sorted_vocab_dict:
            f.write('%s\t%d\n' % item)



