import sys
import os
import re
import jieba
from sklearn.feature_extraction.text import TfidfTransformer  
from sklearn.feature_extraction.text import CountVectorizer 

FILEPATH = "D:\\00-postgraduate\\postgraduate2\\时间序列预测\数据集1.1\\11498446.csv"
STOP_WORDS_PATH = "D:\\00-postgraduate\\postgraduate2\\MicroHotPotCompetition\\cn_stopwords.txt"
WordsBag = []

#加载停用词，txt格式，一行一个
def loadStopWords(filePath)->set:
    stopWords = set()
    with open(filePath, "r",encoding="utf-8") as f:
        linestr = f.readline().strip()
        rown = 1
        while linestr != None and linestr != "":
            stopWords.add(linestr)
            rown += 1
            try:
                linestr = f.readline().strip()
            except Exception as ex: 
                print(rown)
                print(ex)
    return stopWords

#加载单个微博文件
def loadWeibo(filepath, limit = None)->list:
    weibos_all = []
    weibos_texts = []
    with open(filepath, "r",encoding="GB18030") as f:
        linestr = f.readline().strip()
        rown = 1
        while linestr != None and linestr != "":
            linestr = linestr.split(",")
            weibos_all.append(linestr)
            weibos_texts.append(linestr[9])
            rown += 1
            if limit != None and rown > limit:
                break
            try:
                linestr = f.readline().strip()
            except Exception as ex: 
                print(rown)
                print(ex)
    return weibos_all, weibos_texts
#停用词集合 微博列表
stopwords = loadStopWords(STOP_WORDS_PATH)
weibos, weibos_text = loadWeibo(FILEPATH, limit=100000)
sentences_wordlist = []
for sentence in weibos_text:
    wordlist = list(jieba.cut(sentence))
    #正则表达式，只匹配中文词语，不要符号、英文和数字
    restr = r'[\u4e00-\u9fa5]+'
    wordlist = [x for x in wordlist if re.match(restr, x) != None]

    sentences_wordlist.append(" ".join(wordlist))

vectorizer = CountVectorizer(min_df=10, ngram_range=(1, 1))
features  = vectorizer.fit_transform(sentences_wordlist)  
#获取词袋模型中的所有词语  
words = vectorizer.get_feature_names()
dict1 = vectorizer.vocabulary_
dict1_sorted_values = sorted(dict1.items(),key = lambda x:x[1],reverse = True)

print("词库大小：%d\n"%(len(dict1)))
print("前100个高频词为：\n")
print(dict1_sorted_values[0:100])

'''
#构建字典
words_dict = {}
for weibo in weibos:
    weiboText = weibo[9]
    new_words = list(jieba.cut(weiboText))
    for word in new_words:
        if word not in stopwords:
            if word not in words_dict:
                words_dict[word] = 1
            else:
                words_dict[word] = words_dict[word] + 1

print("hell0")
'''
