import jieba
import os
import random
from sklearn.naive_bayes import MultinomialNB
import  matplotlib.pyplot as plt
"""
:param
    text_folder 文件的存放路径
    test_size   测试数据集占比，默认20%
:return
    all_word_list 返回所有降频降序排列的训练集
    
"""
def TextProcessing(text_folder, test_size = 0.2):
    folder_list = os.listdir(text_folder)
    data_list = []          #数据集数据
    class_list = []
    for folder in folder_list:
        new_folder = os.path.join(text_folder, folder)
        print(new_folder)
        files = os.listdir(new_folder)
        j = 1
        for file in files:
            if j > 100:         #取样不超过100
                break
            with open(os.path.join(new_folder, file), 'r', encoding='utf-8') as f: #打开txt文件
                row = f.read()
            word_cut = list(jieba.cut(row, cut_all=False)) #cut精简模式
            data_list.append(word_cut)
            class_list.append(folder)
            j+=1
    data_class_list = zip(data_list, class_list)
    print(data_class_list) #将data和class_list压缩到一起
    list_class_list = list(data_class_list)
    random.shuffle(list_class_list) #乱序
    index = int(len(list_class_list) * test_size) + 1
    trainList = list_class_list[index:]
    testList = list_class_list[:index]
    train_data_list, train_class_list = zip(*trainList)
    test_data_list, test_class_list = zip(*testList)
    all_words_dict = {}
    for word_list in train_data_list:
        for word in word_list:
            if word in all_words_dict.keys():
                all_words_dict[word] += 1
            else:
                all_words_dict[word] = 1
    print(all_words_dict)
    #按照键的值倒序排列
    all_words_tuble_list = sorted(all_words_dict.items(), key = lambda f:f[1], reverse=True)
    all_words_list, all_words_nums = zip(*all_words_tuble_list)
    print(all_words_list)
    print(all_words_nums)
    all_words_list = list(all_words_list)
    return all_words_list,train_data_list, train_class_list, test_data_list, test_class_list
"""
  读取文件内容，并去重
  :parameter
  word_file 文件路径
  :returns
  word_set 读取的文件set集合
"""
def MakeWordSet(word_file):
    words_set = set()
    with open(word_file, 'r', encoding='utf-8') as f:
        for line in f.readlines():
            word = line.strip()
            if len(word) > 0:
                words_set.add(word)
    return words_set
"""
:param

"""
def TextFeatures(train_data_list, test_data_list, feature_words):
    def textfeatures(text, feature_words):
        text_words = set(text)
        features = [1 if word in text_words else 0 for word in feature_words]
        return features
    train_feature_list = [textfeatures(text, feature_words) for text in train_data_list]
    test_feature_list = [textfeatures(text, feature_words) for text in test_data_list]
    return train_feature_list, test_feature_list
"""
"""
def word_dict(all_word_list, deleteN, stopwords_set = set()):
    pass
#s = u'我想和女朋友一起去北京故宫博物院参观和闲逛。'

#cut = jieba.cut(s)
#print(list(cut))
text_folder = 'SogouC/Sample'
TextProcessing(text_folder)