import collections

# 停用词设定
stop_words=['\n', 'or', 'are', 'they', 'i', 'some', 'by', '—',
            'even', 'the', 'to', 'a', 'and', 'of', 'in', 'on', 'for',
            'that', 'with', 'is', 'as', 'could', 'its', 'this', 'other',
            'an', 'have', 'more', 'at','don’t', 'can', 'only', 'most']

# 读取文字档 news.txt，统计字词出现次数

# 参数设定
maxlen = 1000  # 生字表最大个数

# 生字表的集合
word_freqs = collections.Counter()
with open('./data/NEWS.txt', 'r+', encoding='UTF-8') as f:
    for line in f:
        # 转小写、分词
        words = line.lower().split(' ')
        # 统计字词出现次数
        if len(words) > maxlen:
            maxlen = len(words)
        for word in words:
            if not (word in stop_words):
                word_freqs[word] += 1

print(word_freqs.most_common(20))