from peewee import*
import jieba
import re

title = []
# date = []
desc = []
words = []
no_stop_words = []
new_a = {}
db = MySQLDatabase('scauInfo', host='127.0.0.1', user='mysql用户名', passwd='mysql密码')
db.connect()
class BaseModel(Model):
    class Meta:
        database = db  # 每一个继承BaseModel类的子类都是连接db表

def main():
    getSentence()
    splitSentence()
    useStopWord()
    getTimes()

def getSentence():
    class Info(BaseModel):
        title = CharField()
        date = CharField()
        desc = CharField()
    datas = Info.select()
    for data in datas:
        title.append(data.title)
        # date.append(data.date)
        desc.append(data.desc)

def splitSentence():
    jieba.load_userdict('D:\xxx路径\specialWords.txt')
    for sentence1 in title:
        # 使用jieba进行分词,使用精确模式
        devision_words1 = jieba.cut(sentence1, cut_all=False)
        # 将分词后的结果转化为列表，然后添加到分词列表中
        words.extend(list(devision_words1))
    for sentence2 in desc:
        # 使用jieba进行分词,使用精确模式
        devision_words2 = jieba.cut(sentence2, cut_all=False)
        # 将分词后的结果转化为列表，然后添加到分词列表中
        words.extend(list(devision_words2))
    # print(words)

def useStopWord():
    stop_path = r"D:\xxx路径\stopWord.txt"  # 停用词表的位置
    stop_list = []
    for line in open(stop_path, 'r', encoding='utf-8').readlines():
        stop_list.append(line.strip())

    for word in words:  # 使用分词后的结果然后用空格进行分割,得到每个分词
        if word not in stop_list:  # 如果这个分词不在停用词表中并且不是换行或者制表符就将其加入到最后的字符串中,然后加一个空格
            word = re.sub(r'\d', "", word)  # 去除单词中的数字
            word = re.sub(r'\s', "", word)  # 去除单词中的空格
            word = re.sub(r'\W', "", word)  # 去除单词中的字母
            if word:
                if(len(word) > 1):
                    no_stop_words.append(word)

def getTimes():
    # 统计每一个单词的出现次数，使用字典的形式进行统计
    result = {}
    for word in no_stop_words:
        res = result.get(word, 0)
        if res == 0:
            result[word] = 1
        else:
            result[word] = result[word] + 1

    result = sorted(result.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)
    result = dict(result)
    for i, (k, v) in enumerate(result.items()):
        new_a[k] = v
        if i == 119:
            saveWords(list(new_a.keys()))
            # print(new_a.keys())
            break
    new_a.clear()

def saveWords(data):
    class highFreWords (BaseModel):
        word = CharField()
    highFreWords.create_table()
    i = 0
    length = len(data)
    while i < length:
        highFreWords.create(word=data[i])
        i += 1

# def getAllFrequency:
#     # 统计词频,使用上一问得到的字典
#     cum2 = {}
#     sum = 0
#     new_a.clear()
#     for i, (k, v) in enumerate(fre2.items()):
#         sum = sum + v
#         new_a[k] = sum
#     cum2 = new_a.copy()
#
# def getEveryFrequency:
#     # 使用字典得到的累计词频结果：
#     for i, (k, v) in enumerate(cum2.items()):
#         new_a[k] = v
#         if i == 9:
#             print(new_a)
#             break


if __name__ == "__main__":  # 当程序执行时
    # 调用函数
    main()