#!/usr/bin/env python3
import os
import random
import jieba    #为中文字符串切割的一个库
from sklearn.naive_bayes import MultinomialNB
import matplotlib.pyplot as plt

"""
函数说明：得到所有的训练集的text信息
parameter:
    folderPath - 训练集地址
return:
    textData - 训练集文本数据
    classList - 训练集分类的目录名称
"""
def TextProcessing(folderPath):
    folderList = os.listdir(folderPath)
    dataList = []   #训练集
    classList = []  #各类文章的标签
    for folder in folderList:   #遍历子所有文章的文件夹
        newFolderPath = os.path.join(folderPath,folder) #根据子文件夹，生成新的路径
        files = os.listdir(newFolderPath)   #得到新路径下面的所有txt文件
        j = 1
        for file in files:
            if j > 100: #每类txt样本数目最多100个
                break
            with open(os.path.join(newFolderPath,file),'r',encoding = 'utf-8') as f:    #打开txt文件夹并将其申明为f变量
                text = f.read() #读取txt里面的数据

            wordCut = jieba.cut(text,cut_all = False)   #以精简模式切割text文档数据
            wordList = list(wordCut)    #将其转换为list[]数据

            dataList.append(wordList)   #将其加入dataList中
            classList.append(folder)
            j += 1
    return dataList,classList

"""
函数说明：压缩并混乱数据然后返回测试数据和训练数据，测试数据采用0.2
parameter:
    dataList - 数据源
    classList - 数据来源的路径亦或者是分类
return:
    trainDataList - 训练数据集合
    trainClassList -训练种类集合
    testDataList - 测试数据集
    testClassList - 测试种类集合
"""
def dealAndShuffleData(dataList,classList):
    dataClassList = list(zip(dataList,classList))   #将zip压缩合并，并将数据与标签对应压缩
    random.shuffle(dataClassList)
    testSize = 0.2  #选用的测试数据为20%
    index = int(len(dataClassList) * testSize) + 1  #切分训练数据和测试数据用的索引值
    trainData = dataClassList[index:]   #训练集
    testData = dataClassList[:index]    #测试集
    trainDataList,trainClassList = zip(*trainData)  #训练集解压缩
    testDataList,testClassList = zip(*testData) #测试集解压缩
    return trainDataList,trainClassList,testDataList,testClassList

"""
函数说明：统计词频
parameter:
    dataList - 可以是训练数据也可以是测试数据
return:
    wordsDict - 按照键值对大小排序好的词频库
"""
def getWordsDict(dataList):
    wordsDict = {} #先建立一个set()集合
    for wordList in dataList:   #判断是数据中的所包含的词是否都存在于wordsDict,规则如下
        for word in wordList:
            if word in wordsDict.keys():
                wordsDict[word] += 1
            else:
                wordsDict[word] = 1
    wordsDict = sorted(wordsDict.items(),key = lambda item:item[1],reverse = True)  #按键值大小由大到小排序
    return wordsDict

"""
函数说明：解压缩词频库并返回来列表
parameter:
    wordsDict - 词频库
return:
    wordsList - 词列表
    wordsNum - 对应的数字
"""
def analysWordDict(wordsDict):
    wordsList,wordsNum = zip(*wordsDict)    #解压缩词频库
    wordsList = list(wordsList)   #转换为列表形式
    return wordsList,wordsNum

"""
函数说明：得到无关紧要的高频词汇
parameter:
    stopWords - 无关词汇的文档路径
return:
    stopWordsSet - 所有无关紧要的词汇集合
"""
def getStopWordsSet(stopWords):
    stopWordsSet = set()
    with open(stopWords,'r',encoding = 'utf-8') as f:
        for line in f.readlines():
            word = line.strip() #按行分割
            if len(word) > 0 :  #当存在word时候
                stopWordsSet.add(word)
    return stopWordsSet

"""
函数说明：删除数据中的前100个无关词汇和数字还有标点符号
parameter:
    wordsList - 按倒叙排列的数据
    stopWordsSet - 无关词汇信息
    deleteN - 删除词频最高的deleteN个词
return:
    featureWords - 特征词汇集合
"""
def getFeatureWords(wordsList,deleteN,stopWordsSet):
    featureWords = []   #特征集合初始化
    n = 1
    for i in range(deleteN,len(wordsList),1):
        if n > 1000:    #featureWords的维度为1000
            break
        if not wordsList[i].isdigit() and wordsList[i] not in stopWordsSet and 1 < len(wordsList[i]) < 5:
            featureWords.append(wordsList[i])
        n += 1
    return featureWords

"""
函数说明：将featureWords向量化
parameter:
    trainDataList - 训练集
    testDataList - 测试集
    featureWords - 特征集
return:
    trainFeatureList - 训练集向量化列表
    testFeatureList - 测试集向量化列表
"""
def feartureWordsVec(trainDataList,testDataList,featureWords):

    trainFeatureList = [textFeatures(text,featureWords) for text in trainDataList]
    testFeatureList = [textFeatures(text,featureWords) for text in testDataList]
    return trainFeatureList,testFeatureList

def textFeatures(text,featureWords):
    textWords = set(text)
    features = [1 if word in textWords else 0 for word in featureWords]
    return features


"""
函数说明：新闻分类器
parameter:
    trainFeatureList - 训练集向量化的特征文本
    testFeatureList - 测试集向量化的特征文本
    trainClassList - 训练集分类标签
    testClassList - 测试集分类标签
return:
    testAccuracy - 分类器精度
"""
def textClassifier(trainFeatureList,testFeatureList,trainClassList,testClassList):
    classifier = MultinomialNB().fit(trainFeatureList,trainClassList)   #计算得到分类器标准
    testAccuracy = classifier.score(testFeatureList,testClassList)  #将测试集带入程序测验分数
    return testAccuracy


if __name__ == '__main__':
    folder_path = './SogouC/Sample' #训练集地址
    dataList,classList = TextProcessing(folder_path)
    # print(len(dataList))
    trainDataList,trainClassList,testDataList,testClassList = dealAndShuffleData(dataList,classList)
    # print(len(trainDataList),len(trainClassList),len(testDataList),len(testClassList))
    wordsDict = getWordsDict(trainDataList) #得到训练集的词频库
    wordsList,wordsNum = analysWordDict(wordsDict)  #解析词频库，返回的是词及词出现的次数
    stopWords = './stopwords_cn.txt'    #得到去除高频词汇列表的词
    stopWordsSet = getStopWordsSet(stopWords)   #得到无关紧要的高频词汇里面的词

    testAccuracyList = []
    #确定最好的deleteN参数是多少，看plt画图可以看出为480
    # deleteNs = range(0,1000,20)
    # for deleteN in deleteNs:
    #     featureWords = getFeatureWords(wordsList,deleteN,stopWordsSet)
    #     # print(featureWords)
    #     trainFeatureList,testFeatureList = feartureWordsVec(trainDataList,testDataList,featureWords)
    #     testAccuracy = textClassifier(trainFeatureList,testFeatureList,trainClassList,testClassList)
    #     testAccuracyList.append(testAccuracy)
    # plt.figure()
    # plt.plot(deleteNs,testAccuracyList)
    # plt.title("relationship")
    # plt.xlabel('deleteNs')
    # plt.ylabel('testAccuracyList')
    # plt.show()

    featureWords = getFeatureWords(wordsList,470,stopWordsSet)
    # print(featureWords)
    trainFeatureList,testFeatureList = feartureWordsVec(trainDataList,testDataList,featureWords)
    # print(len(trainFeatureList),len(testFeatureList),len(trainClassList),len(testClassList))
    testAccuracy = textClassifier(trainFeatureList,testFeatureList,trainClassList,testClassList)
    # testAccuracyList.append(testAccuracy)
    print(testAccuracy)
