import random
import re

import jieba
import jieba.posseg as pseg
import numpy as np
from sklearn.externals import joblib
from sklearn.metrics import zero_one_loss
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn import svm
from sklearn.ensemble import AdaBoostClassifier
from mood_bayes import trainingNaiveBayes, predict


def get_mood(t):
    mood_types = ["积极", "消极", "客观"]
    if t > len(mood_types):
        return "未找到情绪"
    return mood_types[t - 1]


def build_key_word(path):
    """
    通过词频产生特征
    :param path: 训练数据集路径
    :return: 
    """
    d = {}
    with open(path, encoding="utf-8") as fp:
        for line in fp:
            for w in pseg.cut(line.strip()):
                # 滤去地名
                if w.flag == 'ns':
                    continue
                word = w.word
                result = shark_strs(word)
                if not result or result == ' ':  # 空字符
                    continue
                if result in set(stop):  # 滤去停用词
                    continue
                if len(word) > 1:  # 至少2字词语才有意义，避免大量无意义的词语进入统计范围
                    d[word] = d.get(word, 0) + 1

    # 根据词的出现频率排序
    kw_list = sorted(d, key=lambda x: d[x], reverse=True)
    size = int(len(kw_list) * 0.3)  # 取最前的一部分
    mood = set(kw_list[:size])
    return list(mood)


def shark_strs(word):
    """
    截取字符（中文）
    """
    p = re.compile('\w'.encode('utf-8'), re.L)
    result = p.sub("".encode('utf-8'), word.encode('utf-8')).decode('utf-8')
    return result


def loadDataSet(path):
    """
    返回每条句子（微博）的分词与标签
    :param path: 数据地址
    :return: 每条句子(微博)的分词和标签
    """
    line_cut = []
    label = []
    with open(path, encoding="utf-8") as fp:
        for line in fp:
            temp = line.strip()
            try:
                sentence = temp[2:].lstrip()  # 每条微博
                label.append(int(temp[:2]))  # 获取标注
                word_list = get_words(sentence)
                line_cut.append(word_list)
            except Exception:
                continue
    return line_cut, label


def get_words(sentence):
    """
    分词
    :param sentence: 待分词的句子
    :return: 分词完成的集合
    """
    word_list = []
    sentence = str(sentence).replace('\u200b', '')
    for word in jieba.cut(sentence.strip()):
        result = shark_strs(word)
        if not result or result == ' ':  # 空字符
            continue
        word_list.append(word)
    word_list = list(set(word_list) - set(stop) - set('\u200b')
                     - set(' ') - set('\u3000') - set('️'))
    return word_list


def setOfWordsToVecTor(vocabularyList, moodWords):
    """
    每条句子(微博)向量化
    :param vocabularyList: 特征词
    :param moodWords: 待向量化分词（一句）
    :return: 
    """
    vocabMarked = [0] * len(vocabularyList)
    for smsWord in moodWords:
        if smsWord in vocabularyList:
            vocabMarked[vocabularyList.index(smsWord)] += 1
    return np.array(vocabMarked)


def setOfWordsListToVecTor(vocabularyList, train_mood_array):
    """
    所有句子(微博)准备向量化
    :param vocabularyList: 特征词
    :param train_mood_array: 待向量化分词（所有）
    :return: 
    """
    vocabMarkedList = []
    for i in range(len(train_mood_array)):
        vocabMarked = setOfWordsToVecTor(vocabularyList, train_mood_array[i])
        vocabMarkedList.append(vocabMarked)
    return vocabMarkedList


def bayes_performance_test(train_set, label):
    """
    计算准确率
    比较sklearn的框架和自己写的nb的准确率
    方式：训练数据的一部分（500条）作为测试数据，剩下的作为训练数据进行错误率测试
    """
    train_set = train_set.copy()
    label = label.copy()
    test_word_array = []
    test_word_arrayLabel = []
    # 从中随机选取100条用来测试，并删除原来的位置，即不重复选取
    testCount = 500
    for i in range(testCount):
        try:
            randomIndex = int(random.uniform(0, len(train_set)))
            test_word_arrayLabel.append(label[randomIndex])
            test_word_array.append(train_set[randomIndex])
            del (train_set[randomIndex])
            del (label[randomIndex])
        except Exception as e:
            print(e)

    multi = MultinomialNB()
    multi = multi.fit(train_set, label)
    # joblib.dump(multi, '../../../model/gnb.model')
    # muljob = joblib.load('../../../model/gnb.model')
    result = multi.predict(test_word_array)
    count = 0
    for i in range(len(test_word_array)):
        type = result[i]
        if type != test_word_arrayLabel[i]:
            count = count + 1
            # print(test_word_array[i], "----", result[i])
    print("error rate: ", count / float(testCount))

    print("nb训练集集拟合分数：", multi.score(train_set, label))
    print("nb测试集拟合分数：", multi.score(test_word_array, test_word_arrayLabel))
    predict_test(multi)
    # 自己写的nb
    PosWords, NegWords, NeutralWords, prior_Pos, prior_Neg, prior_Neutral = \
        trainingNaiveBayes(train_set, label)
    predict(test_word_array, test_word_arrayLabel, testCount, PosWords, NegWords, NeutralWords, prior_Pos,
            prior_Neg,
            prior_Neutral)


def adboosting_test(train_set, label):
    """
    集成学习测试 基学习器使用svm
    :param train_set: 
    :param label: 
    :return: 
    """
    train_set = train_set.copy()
    label = label.copy()
    test_word_array = []
    test_word_arrayLabel = []
    # 从中随机选取500条用来测试，并删除原来的位置，即不重复选取
    testCount = 500
    for i in range(testCount):
        try:
            randomIndex = int(random.uniform(0, len(train_set)))
            test_word_arrayLabel.append(label[randomIndex])
            test_word_array.append(train_set[randomIndex])
            del (train_set[randomIndex])
            del (label[randomIndex])
        except Exception as e:
            print(e)

    # 最大基学习器数量
    n_estimators = 400
    # 学习率（步长）
    learning_rate = 1.0
    ada_real = AdaBoostClassifier(
        base_estimator=svm.LinearSVC(),
        learning_rate=learning_rate,
        n_estimators=n_estimators,
        algorithm="SAMME")
    ada_real.fit(train_set, label)
    score = ada_real.score(train_set, label)
    print("adboosting 训练集拟合分数：", score)
    print("adboosting 测试集分数：", ada_real.score(test_word_array, test_word_arrayLabel))

    # 显示出每一个基学习器生成后的错误率
    ada_real_err = np.zeros((n_estimators,))  # 变成一个一维的矩阵，长度为n
    for i, y_pred in enumerate(ada_real.staged_predict(test_word_array)):  # 测试
        ada_real_err[i] = zero_one_loss(y_pred, test_word_arrayLabel)  # 得出不同的，然后除于总数
    print(ada_real_err)

    predict_test(ada_real)


def predict_test(mullib):
    """
    输入单句测试
    :param mullib:  训练好的模型
    """
    while True:
        test_word = input("请输入评论：")
        word_list = get_words(test_word)
        array = [setOfWordsToVecTor(vocabList, word_list)]
        result = mullib.predict(array)
        print(get_mood(result[0]))


if __name__ == '__main__':
    # 初始化，数据加载，得到训练数据的特征向量train_mood_array 和对应的标签label

    jieba.load_userdict("../../../data/nlp/word.txt")  # 加载用户词典
    stop = [line.strip() for line in open('../../../data/nlp/ad/stop.txt', 'r', encoding='utf-8').readlines()]  # 停用词
    # 特征集
    vocabList = build_key_word("../../../data/nlp/train.txt")
    print("特征词：", vocabList)
    # 分词结果，标签
    line_cut, label = loadDataSet("../../../data/nlp/train.txt")
    # 将分词结果比较特征集，转换为向量
    train_mood_array = setOfWordsListToVecTor(vocabList, line_cut)

    # 计算准确率
    # 朴素贝叶斯
    # bayes_performance_test(train_mood_array, label)
    # 集成学习
    adboosting_test(train_mood_array, label)

    # 朴素贝叶斯 保存训练好的模型
    # model_lib_path = "nb.model"
    # muljob = joblib.load(model_lib_path)
    # multi_nb = MultinomialNB()
    # multi_nb = multi_nb.fit(train_mood_array, label)
    # joblib.dump(multi_nb, 'nb.model')
