import os
import pandas as pd
import joblib
import jieba
from numpy import *
import numpy as np
import pickle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.datasets.base import Bunch
from sklearn.naive_bayes import MultinomialNB  # 多项式贝叶斯算法

def readFlie(path):    #读取一个样本的记录，默认一个文件一条样本
    with open(path,'r',errors='ignore') as file:
        content = file.read()
        file.close()
        return content

def readList(path):    #读取一个csv，输出样本列表，元素形式[{"title"}...]
    data = pd.read_csv(path,encoding='utf-8')
    col = data["title"]
    sample_list = np.array(col)
    return sample_list

def saveFile(path,result):
    with open(path, 'w', errors='ignore') as file:
        file.write(result)
        file.close()

def segText(inputPath, resultPath):
    fatherLists = os.listdir(inputPath)  # 主目录
    for eachDir in fatherLists:  # 遍历主目录中各个文件夹
        eachPath = inputPath + eachDir + "/"  # 保存主目录中每个文件夹目录，便于遍历二级文件
        each_resultPath = resultPath + eachDir + "/"  # 分词结果文件存入的目录
        if not os.path.exists(each_resultPath):
            os.makedirs(each_resultPath)
        childLists = os.listdir(eachPath)  # 获取每个文件夹中的各个文件
        for eachFile in childLists:  #遍历数据集下的csv文件
            eachPathFile = eachPath + eachFile
            content = readList(eachPathFile)
            # print(len(content))
            name = 1
            for item in content:
                result = (str(item)).replace("\r\n", "").strip()
                cutResult = jieba.cut(result)
                saveFile(each_resultPath + str(name) + '.txt'," ".join(cutResult))
                name += 1

def bunchSave(inputFile, outputFile):   #处理分词结果输出为分词向量
    catelist = os.listdir(inputFile)  #读取分词结果
    bunch = Bunch(target_name=[], label=[], filenames=[], contents=[])
    bunch.target_name.extend(catelist)  # 将所有类别保存到Bunch对象中
    for eachDir in catelist:   #读取每个类别
        eachPath = inputFile + eachDir + "/"
        fileList = os.listdir(eachPath)
        for eachFile in fileList:
            fullName = eachPath + eachFile  #单个类别下的单个文件路径
            bunch.label.append(eachDir)     #将文件名(类别)添加到当前文件
            bunch.filenames.append(fullName)  # 保存当前文件的路径
            bunch.contents.append(readFlie(fullName).strip())  # 保存文件词向量
        with open(outputFile, 'wb') as file_obj:  # 持久化必须用二进制访问模式打开
            pickle.dump(bunch, file_obj)

def readBunch(path):  #读取Bunch样本词对象
    with open(path, 'rb') as file:
        bunch = pickle.load(file)
    return bunch

def writeBunch(path,resultFile):
    with open(path, 'wb') as file:
        pickle.dump(resultFile, file)

def getStopWord(inputFile):  #获取停用词表
    stopWordList = readFlie(inputFile).splitlines()
    return stopWordList

def getTFIDFMat(inputPath, stopWordList, outputPath):  # 求得单个样本的TF-IDF向量，下面路径是为了输出txt方便查阅
    bunch = readBunch(inputPath)
    tfidfspace = Bunch(target_name=bunch.target_name, label=bunch.label, filenames=bunch.filenames, tdm=[], vocabulary={})   #获取tfidf向量
    vectorizer = TfidfVectorizer(stop_words=stopWordList,sublinear_tf=True, max_df=0.5) #先用停用词表初始化向量空间，返回词的权重矩阵
    # 将样本(分词)内容转化为词频(稀疏)矩阵，单独保存字典文件
    tfidfspace.tdm = vectorizer.fit_transform(bunch.contents)
    tfidfspace.vocabulary = vectorizer.vocabulary_  # 获取词汇
    writeBunch(outputPath, tfidfspace)  #输出保存向量空间

def getTestSpace(testSetPath, trainSpacePath, stopWordList, testSpacePath):   #测试集数据按照(训练集)已固化的标准，先进行特征工程处理
    bunch = readBunch(testSetPath)
    # 构建测试集TF-IDF向量空间
    testSpace = Bunch(target_name=bunch.target_name, label=bunch.label, filenames=bunch.filenames, tdm=[],
                      vocabulary={})
    '''
       读取testSpace
       '''
    trainbunch = readBunch(trainSpacePath)    # 导入训练集的词袋
    # 使用TfidfVectorizer初始化向量空间模型  使用训练集词袋向量
    vectorizer = TfidfVectorizer(stop_words=stopWordList, sublinear_tf=True, max_df=0.5,
                                 vocabulary=trainbunch.vocabulary)
    testSpace.tdm = vectorizer.fit_transform(bunch.contents)
    testSpace.vocabulary = trainbunch.vocabulary
    writeBunch(testSpacePath, testSpace)    # 持久化


def bayesAlgorithm(trainPath, testPath):#贝叶斯算法
    trainSet = readBunch(trainPath)   #读取训练集词袋模型
    testSet = readBunch(testPath)    #读取测试集词袋模型
    clf = MultinomialNB(alpha=0.001).fit(trainSet.tdm, trainSet.label)   #使用贝叶斯处理词频矩阵和对应类别的关系
    joblib.dump(clf, './model/clf.pkl')  # 保存预测模型
    predicted = clf.predict(testSet.tdm)  #预测测试集的准确率
    total = len(predicted)
    rate = 0
    for flabel, fileName, expct_cate in zip(testSet.label, testSet.filenames, predicted):
        if flabel != expct_cate:
            rate += 1
            # print(fileName, ":实际类别：", flabel, "-->预测类别：", expct_cate)
    print("预测错误数目为:", str(rate))
    print("测试总数目为:" ,str(total))
    erroe_rate = float(rate)  / float(total)
    print("准确率:",str(1-erroe_rate))


if __name__ == '__main__':
    datapath = "./train/"  # 原始训练集路径
    stopWord_path = "./stop/stopword.txt"  # 停用词路径
    test_path = "./test/"  # 测试集路径

    split_datapath = "./split/split_data/"  # 对原始训练集分词之后的数据路径
    test_split_path = "./split/test_split/"  # 测试集分词路径

    train_dat_path = "./dat_list/train_set.dat"  # 读取分词数据之后的词向量并保存为二进制文件
    test_split_dat_path = "./dat_list/test_set.dat"  # 测试集分词bat文件路径
    testspace_dat_path = "./dat_list/testspace.dat"  # 测试集输出空间矩阵dat文件
    tfidfspace_dat_path = "./dat_list/tfidfspace.dat"  # tf-idf词频空间向量的dat文件

    # 输入训练集
    segText(datapath,  # 读入数据
            split_datapath)  # 输出分词结果
    bunchSave(split_datapath,  # 读入分词结果
              train_dat_path)  # 输出分词向量
    stopWordList = getStopWord(stopWord_path)  # 获取停用词表
    getTFIDFMat(train_dat_path,  # 读入分词的词向量
                stopWordList,  # 获取停用词表
                tfidfspace_dat_path)   # tf-idf词频空间向量的dat文件

    # 输入测试集
    segText(test_path,
            test_split_path)  # 对测试集读入文件，输出分词结果
    bunchSave(test_split_path,
              test_split_dat_path)  #
    getTestSpace(test_split_dat_path,
                 tfidfspace_dat_path,
                 stopWordList,
                 testspace_dat_path)  # 输入分词文件，停用词，词向量，输出特征空间(txt,dat文件都有)
    bayesAlgorithm(tfidfspace_dat_path,
                   testspace_dat_path)





