# -*- coding: utf-8 -*-  
import os
import pickle
import jieba
import openpyxl

import re
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfVectorizer, TfidfTransformer

# 由搜狗语料库 生成数据
from sklearn.utils import Bunch

folder_path = '.\sougou_all'
jieba.set_dictionary("./dict.txt")
jieba.initialize()
class_list = []  ##
nClass = 0
N = 500  # 每类文件 最多取 2500 个样本 80%train 20%test
train_set = []  # 训练集
test_set = []  # 测试集
all_words = {}
content_S = []  # 存放用jieba分词的内容
mydic = {"财经": 'caijing', "房产": 'fangchan', "教育": 'jiaoyu', "科技": 'keji', "军事": 'junshi', "汽车": 'qiche', "体育": 'tiyu',
         "游戏": 'youxi', "娱乐": 'yule', "其他": 'qita'}
reversedic = {"caijing": '财经', "fangchan": '房产', "jiaoyu": '教育', "keji": '科技', "junshi": '军事', "qiche": '汽车',
              "tiyu": '体育',
              "youxi": '游戏', "yule": '娱乐', "qita": '其他'}


def drop_stopwords(contents, stopwords):  # 这是取出中文停用词的函数
    contents_clean = []
    all_words_cl = []
    for line in contents:
        line_clean = []
        for word in line:
            if (word in stopwords):
                continue
            line_clean.append(word)
            all_words_cl.append(str(word))  # 清洗后所有词
        contents_clean.append(line_clean)  # 清洗后的文本
    return contents_clean, all_words_cl


def readFile(path):
    with open(path, 'r', errors='ignore', encoding='utf-8') as file:  # 文档中编码有些问题，所有用errors过滤错误
        content = file.read()
        return content


def saveFile(path, result):
    with open(path, 'w', errors='ignore', encoding='utf-8') as file:
        file.write(result)


def specificClassSegText(classname, inputPath, resultPath):  # 假如传入的classname是youxi,传入的 inputPath 是 ./soigou_all/ ,
    # 传入的resultPath是 ./sougou_cut/
    inputclassPath = inputPath + classname + "/"  # 得到的类似于  ./sougou_all/youxi/
    specific_ResultPath = resultPath + classname + "/"  # 得到的是  ./sougou_cut/youxi/
    if not os.path.exists(specific_ResultPath):  # 创建./sougou_cut/youxi/
        os.makedirs(specific_ResultPath)
    specific_childLists = os.listdir(inputclassPath)  # 得到./sougou_all/youxi/ 下的文本文件的list
    for specific_eachFile in specific_childLists:  # 类似于 1.txt
        specific_PathFile = inputclassPath + specific_eachFile  # 获取每个文本文件的路径 ,是 ./sougou_all/youxi/1.txt
        content = readFile(specific_PathFile)  # 读取
        result = (str(content)).replace("\r\n", "").strip()  # 删除多余空行与空格
        cutResult = jieba.cut(result)  # 默认方式分词，分词结果用空格隔开
        saveFile(specific_ResultPath + specific_eachFile, " ".join(cutResult))  # 调用上面函数保存文件


def segText(inputPath, resultPath):  # 这是分词的函数
    fatherLists = os.listdir(inputPath)  # 主目录
    for eachDir in fatherLists:  # 遍历主目录中各个文件夹
        eachPath = inputPath + eachDir + "/"  # 保存主目录中每个文件夹目录，便于遍历二级文件
        each_resultPath = resultPath + eachDir + "/"  # 分词结果文件存入的目录
        if not os.path.exists(each_resultPath):
            os.makedirs(each_resultPath)
        childLists = os.listdir(eachPath)  # 获取每个文件夹中的各个文件
        for eachFile in childLists:  # 遍历每个文件夹中的子文件
            eachPathFile = eachPath + eachFile  # 获得每个文件路径
            content = readFile(eachPathFile)  # 调用上面函数读取内容
            result = (str(content)).replace("\r\n", "").strip()  # 删除多余空行与空格
            cutResult = jieba.cut(result)  # 默认方式分词，分词结果用空格隔开
            saveFile(each_resultPath + eachFile, " ".join(cutResult))  # 调用上面函数保存文件


def bunchSave(inputFile, outputFile):
    catelist = os.listdir(inputFile)
    bunch = Bunch(target_name=[], label=[], filenames=[], contents=[])
    bunch.target_name.extend(catelist)  # 将十个类别保存到Bunch对象中的target_name 里去
    for eachDir in catelist:  # 遍历每一个文件夹,这个文件夹是经过jieba分词过的文件夹
        eachPath = inputFile + eachDir + "/"  # 这是进入到每个类别文件夹里
        fileList = os.listdir(eachPath)  # 取出一个文件夹下的所有文件名列表
        for eachFile in fileList:  # 类别目录下的每个子文件
            fullName = eachPath + eachFile  # 子文件全路径
            bunch.label.append(eachDir)  # 当前分类标签
            bunch.filenames.append(fullName)  # 保存当前文件的全路径
            bunch.contents.append(readFile(fullName).strip())  # 保存文件词向量
    with open(outputFile, 'wb') as file_obj:  # 持久化必须用二进制访问模式打开
        pickle.dump(bunch, file_obj)
        # pickle.dump(obj, file, [,protocol])函数的功能：将obj对象序列化存入已经打开的file中。
        # obj：想要序列化的obj对象。
        # file:文件名称。
        # protocol：序列化使用的协议。如果该项省略，则默认为0。如果为负值或HIGHEST_PROTOCOL，则使用最高的协议版本


def readBunch(path):
    with open(path, 'rb') as file01:
        bunch = pickle.load(file01)
        # pickle.load(file)
        # 函数的功能：将file中的对象序列化读出。
    return bunch


def writeBunch(path, bunchFile):
    with open(path, 'wb') as file:
        pickle.dump(bunchFile, file)


def getStopWord(inputFile):  # 获取分词的列表
    stopWordList = readFile(inputFile).splitlines()
    return stopWordList


def getTFIDFMat(inputPath, stopWordList, outputPath):  # 求得TF-IDF向量
    bunch = readBunch(inputPath)
    tfidfspace = Bunch(target_name=bunch.target_name, label=bunch.label, filenames=bunch.filenames, tdm=[],
                       vocabulary={})
    # 初始化向量空间
    vectorizer = TfidfVectorizer(stop_words=stopWordList, sublinear_tf=True, max_df=0.5)
    transformer = TfidfTransformer()  # 该类会统计每个词语的TF-IDF权值
    # 文本转化为词频矩阵，单独保存字典文件
    tfidfspace.tdm = vectorizer.fit_transform(bunch.contents)
    tfidfspace.vocabulary = vectorizer.vocabulary_  # 获取词汇
    writeBunch(outputPath, tfidfspace)


def getTestSpace(testSetPath, trainSpacePath, stopWordList, testSpacePath):
    bunch = readBunch(testSetPath)
    # 构建测试集TF-IDF向量空间
    testSpace = Bunch(target_name=bunch.target_name, label=bunch.label, filenames=bunch.filenames, tdm=[],
                      vocabulary={})
    # 导入训练集的词袋
    trainbunch = readBunch(trainSpacePath)
    # 使用TfidfVectorizer初始化向量空间模型  使用训练集词袋向量
    vectorizer = TfidfVectorizer(stop_words=stopWordList, sublinear_tf=True, max_df=0.5,
                                 vocabulary=trainbunch.vocabulary)
    transformer = TfidfTransformer()
    testSpace.tdm = vectorizer.fit_transform(bunch.contents)
    testSpace.vocabulary = trainbunch.vocabulary
    # 持久化
    writeBunch(testSpacePath, testSpace)


def getTestSpaceBySingleSentence(sentence, trainSpacePath, stopWordList):
    # 导入训练集的词袋
    trainbunch = readBunch(trainSpacePath)
    # 使用TfidfVectorizer初始化向量空间模型  使用训练集词袋向量
    vectorizer = TfidfVectorizer(stop_words=stopWordList, sublinear_tf=True, max_df=0.5,
                                 vocabulary=trainbunch.vocabulary)
    b = vectorizer.transform(sentence)
    b.tdm = vectorizer.fit_transform(sentence.contents)
    clf = MultinomialNB(alpha=0.001).fit(trainbunch.tdm, trainbunch.label)
    predicted = clf.predict(b.tdm)
    print("预测类别： ", predicted[0])



def bayesAlgorithmToGUI(trainPath, testPath, tc, XLS_Source):
    trainSet = readBunch(trainPath)
    testSet = readBunch(testPath)
    tc.AppendText("我开始预测了啊\n")
    clf = MultinomialNB(alpha=0.001).fit(trainSet.tdm, trainSet.label)
    # alpha:0.001 alpha 越小，迭代次数越多，精度越高
    # print(shape(trainSet.tdm))  #输出单词矩阵的类型
    # print(shape(testSet.tdm))
    predicted = clf.predict(testSet.tdm)
    wb = openpyxl.load_workbook(XLS_Source)  # 打开工作簿
    sheet = wb["类别"]  # 得到工作表的list
    for filename, predict in zip(testSet.filenames, predicted):
        index = re.findall(r"\d+?\d*", filename)  # 获得当前的索引号
        i = int(index[0])
        sheet.cell(row=i, column=2).value = reversedic.get(predict)
    wb.save(XLS_Source)
    tc.AppendText("预测完毕\n")


# 把xls文件全部转换成txt文件，然后才好进行分析
def XLSToTxt(inputPath, outputpath):  # 使用的时候 outputpath 如： ./test/
    wb = openpyxl.load_workbook(inputPath)
    # 获取所有工作表名
    names = wb.sheetnames
    # wb.get_sheet_by_name(name) 已经废弃,使用wb[name] 获取指定工作表
    sheet = wb[names[0]]
    # 获取最大行数
    maxRow = sheet.max_row
    if not os.path.exists(outputpath + "all"):
        os.makedirs(outputpath + "all/")
    for irows in range(2, maxRow + 1):  # xls文件到python的索引是从1开始的，然后第1行是表头，所以从2
        contents = sheet.cell(row=irows, column=3).value + "," + sheet.cell(row=irows, column=4).value
        saveFile(outputpath + "all" + "/" + str(irows.__index__()) + ".txt", contents)


def del_file(path):  # 清空文件夹下的所有文件及其文件夹
    for i in os.listdir(path):
        path_file = os.path.join(path, i)  # 取文件绝对路径
        if os.path.isfile(path_file):
            os.remove(path_file)
        else:
            del_file(path_file)
    os.rmdir(path)


def StartAnalysis(XLS_Source, tc2):
    if os.path.exists("./test/"):
        del_file("./test/")
    if os.path.exists("./test_segResult/"):
        del_file("./test_segResult/")
    if os.path.exists("./test_set.dat"):
        os.remove("./test_set.dat")
    if os.path.exists("./testspace.dat"):
        os.remove("./testspace.dat")
    tc2.AppendText("正在分析文件...\n")
    XLSToTxt(XLS_Source, "./test/")
    segText("./test/", "./test_segResult/")
    bunchSave("./test_segResult/", "./test_set.dat")
    stopWordList = getStopWord("./stop_words.txt")
    getTestSpace("./test_set.dat", "./tfidf_space.dat", stopWordList, "./testspace.dat")
    bayesAlgorithmToGUI("./tfidf_space.dat", "./testspace.dat", tc2, XLS_Source)
    tc2.AppendText("请等我执行一些清扫工作... \n")
    if os.path.exists("./test/"):
        del_file("./test/")
    if os.path.exists("./test_segResult/"):
        del_file("./test_segResult/")
    if os.path.exists("./test_set.dat"):
        os.remove("./test_set.dat")
    if os.path.exists("./testspace.dat"):
        os.remove("./testspace.dat")
    tc2.AppendText("所有工作都已完成，可以关闭了！ \n")