import numpy as np
import re
import random
import pandas as pd
from sklearn.naive_bayes import GaussianNB, MultinomialNB, ComplementNB, BernoulliNB
from chp4_stop_words import get_stop_words


"""
文本分类：https://www.cnblogs.com/jiangxinyang/p/10241243.html
word2vec
textcnn
charcnn
bi-lstm
bi-lstm + attention
RCNN
Adversarial LSTM
transformer
ELMo
BERT
"""


class ClassifyNavieBayes(object):
    def __init__(self, train_data, train_label):
        self.train_data = train_data
        self.train_label = train_label

    def create_vocab_list(self):
        """
        将数组arr的元素去重后，返回一维数组
        :return:
        """
        arr_list = [x for y in self.train_data for x in y]
        return list(set(arr_list))

    def words_2_vec(self, data):
        """
        将data按vec转为向量：存在的word位置为1，否则为0
        :param data:
        :return:
        """
        # 数据去重
        vec = self.create_vocab_list()
        return [1 if word in data else 0 for word in vec]

    @staticmethod
    def train_naive_bayes(data, label):
        """
        训练朴素贝叶斯
        计算每个类的每个单词的概率，同类的单词概率和为1
        :param data: 原始数据向量化（单词向量化）
        :param label:
        :return:
        1、每个单词在每类的概率
        2、每类label在全部数据集的概率
        """
        data_length = len(data)
        word_length = len(data[0])

        p_words = dict()

        for i in range(data_length):
            #  按分类进行计算
            # p_words[label[i]] = p_words.get(label[i], np.zeros(word_length)) + np.array(data[i])
            p_words[label[i]] = p_words.get(label[i], np.ones(word_length)) + np.array(data[i])

        # return {k: v / v.sum() for k, v in p_words.items()}, p_label
        # return {k: np.log(v / v.sum() + 2) for k, v in p_words.items()}, p_label
        p0_words = np.ones(len(p_words))
        p1_words = np.ones(len(p_words))
        for k, v in p_words.items():
            if k == 0:
                p0_words = np.log(v / v.sum() + 1)
            else:
                p1_words = np.log(v / v.sum() + 1)
        return p0_words, label.count(0) / len(label), p1_words, label.count(1) / len(label)

    def train(self):
        """
        训练
        :return:
        """
        # 原始数据向量化
        data_vec = [self.words_2_vec(data=info) for info in self.train_data]

        #  bayes 训练
        self.p0_words, self.p0_label, self.p1_words, self.p1_label = \
            self.train_naive_bayes(data=data_vec, label=self.train_label)

    def predict_naive_bayes(self, vec_words):
        """
        predict
        :param vec_words:向量化的预测原始数据
        :return:
        """
        # bayes公式
        p0 = sum(vec_words * self.p0_words) + np.log(self.p0_label)
        p1 = sum(vec_words * self.p1_words) + np.log(self.p1_label)

        return 0 if p0 > p1 else 1

    def predict(self, data):
        """
        预测
        :param data:
        :return:
        """
        vec_data = self.words_2_vec(data=data)
        return self.predict_naive_bayes(vec_words=vec_data)

    def sklearn_predict(self, data):
        """
        sklearn 朴素贝叶斯模型
        :return:
        """
        # 原始数据向量化
        data_vec = [self.words_2_vec(data=info) for info in self.train_data]
        test_vec = self.words_2_vec(data=data)
        test_vec = test_vec if len(np.shape(test_vec)) > 1 else [test_vec]

        clf_gnb = GaussianNB()
        clf_mnb = MultinomialNB()
        clf_cnb = ComplementNB()
        clf_bnb = BernoulliNB()
        gnb_fit = clf_gnb.fit(data_vec, self.train_label)
        mnb_fit = clf_mnb.fit(data_vec, self.train_label)
        cnb_fit = clf_cnb.fit(data_vec, self.train_label)
        bnb_fit = clf_bnb.fit(data_vec, self.train_label)
        return gnb_fit.predict(test_vec), mnb_fit.predict(test_vec), cnb_fit.predict(test_vec), \
               bnb_fit.predict(test_vec)


def get_data():
    posting_sentence_list = ['my dog has flea problems help please',
                             'maybe not take hime to dog park stupid',
                             'my dalmation is so cute I love hime',
                             'stop posting stupid worthless garbage',
                             'mr licks ate my steak how to stop hime',
                             'quit buying worthless dog food stupid']
    posting_list = [sentence.split(' ') for sentence in posting_sentence_list]

    class_vec = [0, 1, 0, 1, 0, 1]  # 0:正常言论； 1：侮辱性文字
    return posting_list, class_vec


def trans_message2arr(message):
    """
    分词
    :param message:
    :return:
    """
    # arr = message.split(' ')
    arr = re.split('\\W+', message)
    arr = [word.lower() for word in arr if len(word) > 2]
    return arr


def read_file(file):
    """
    读取文件数据
    :param file:
    :return:
    """
    with open(file, 'r') as f:
        email_text = f.read()
    return trans_message2arr(message=email_text)


def main_words():
    # 获取数据
    data, labels = get_data()

    test1 = ['love', 'my', 'dalmation']
    test2 = ['stupid', 'garbage']

    classify_nb = ClassifyNavieBayes(train_data=data, train_label=labels)
    classify_nb.train()
    pre1 = classify_nb.predict(data=test1)
    pre2 = classify_nb.predict(data=test2)
    # sklearn
    sk_pre1 = classify_nb.sklearn_predict(data=test1)
    sk_pre2 = classify_nb.sklearn_predict(data=test2)
    print('{}: code: {},sklearn: {}; {}: code: {},sklearn: {}'.format(test1, pre1, sk_pre1, test2, pre2, sk_pre2))


def main_email():
    test_index = random.sample(range(1, 44), 10)
    # 获取数据
    train_data = []
    train_class = []

    test_data = []
    test_class = []

    for i in range(1, 23):
        for label in ['ham', 'spam']:
            label_flag = 0 if label == 'ham' else 1
            file = './data/chp4/{}/{}.txt'.format(label, i)
            file_words = read_file(file)

            class_length = len(train_class) + len(test_class)
            if class_length in test_index:
                test_data.append(file_words)
                test_class.append(label_flag)
            else:
                train_data.append(file_words)
                train_class.append(label_flag)

    print('训练集数量： {}， 测试集数量： {}'.format(len(train_data), len(test_data)))
    classify_nb = ClassifyNavieBayes(train_data=train_data, train_label=train_class)
    classify_nb.train()

    error_count = 0
    gnb_error_count = 0
    mnb_error_count = 0
    cnb_error_count = 0
    bnb_error_count = 0
    for num, test in enumerate(test_data):
        pre = classify_nb.predict(data=test)
        # 仅判断将垃圾邮件预测为正常邮件的情况
        sk_gnb, sk_mnb, sk_cnb, sk_bnb = classify_nb.sklearn_predict(data=test)
        if test_class[num] == 1:
            if pre == 0:
                error_count += 1
            if sk_gnb == 0:
                gnb_error_count += 1
            if sk_mnb == 0:
                mnb_error_count += 1
            if sk_cnb == 0:
                cnb_error_count += 1
            if sk_bnb == 0:
                bnb_error_count += 1

    print('错误率: code: {}, sklearn: {} {} {} {}'.format(error_count / len(test_class),
                                                       gnb_error_count / len(test_class),
                                                       mnb_error_count / len(test_class),
                                                       cnb_error_count / len(test_class),
                                                       bnb_error_count / len(test_class)))


def main_mis():
    """
    纽约和旧金山征婚广告
    :return:
    """
    def train_test_split(data, ratio=0.8):
        """
        按指定比例区分训练集和测试集
        :param data:
        :param ratio:
        :return:
        """
        words, classes = data
        train_size = int(len(words) * ratio)
        train_index = random.sample(range(len(words)), train_size)
        train_words = []
        train_class = []
        test_words = []
        test_class = []
        for num, word in enumerate(words):
            if num in train_index:
                train_words.append(word)
                train_class.append(classes[num])
            else:
                test_words.append(word)
                test_class.append(classes[num])
        return train_words, train_class, test_words, test_class

    def calc_most_freq(arr, topnum=30):
        """
        获取高频词
        :param arr:
        :param topnum:
        :return:
        """
        arr = [word for data in arr for word in data]
        words_count = {word: arr.count(word) for word in set(arr)}
        words_sorted = sorted(words_count.items(), key=lambda d: d[1], reverse=True)
        freq_words = words_sorted[: topnum]
        return [word[0] for word in freq_words]

    def load_data():
        """
        加载纽约和旧金山的征婚广告
        :return:
        """
        en_words, _ = get_stop_words()

        file = './data/chp4/city_mis_ad.csv'
        df = pd.read_csv(file)
        sfbay = []
        newyork = []
        for index, row in df.iterrows():
            sfbay_words = trans_message2arr(message=row['sfbay'])
            newyork_words = trans_message2arr(message=row['newyork'])
            #  去掉停用词
            sfbay_words = [word for word in sfbay_words if word not in en_words]
            newyork_words = [word for word in newyork_words if word not in en_words]

            sfbay.append(sfbay_words)
            newyork.append(newyork_words)
        return sfbay, newyork

    sf, ny = load_data()
    freq_words = calc_most_freq(arr=sf + ny)
    print('freq_words: {}'.format(freq_words))

    all_words = []
    all_class = []
    for num in range(min(len(sf), len(ny))):
        ny_arr = []
        for word in ny[num]:
            if word in freq_words:
                ny_arr.append(word)
        if ny_arr:
            all_words.append(ny_arr)
            all_class.append(1)

        sf_arr = []
        for word in sf[num]:
            if word in freq_words:
                sf_arr.append(word)
        if sf_arr:
            all_words.append(sf_arr)
            all_class.append(0)

    print('all_words: {}'.format(len(all_words)))
    train_words, train_class, test_words, test_class = train_test_split(data=[all_words, all_class])
    print('训练集数量： {}， 测试集数量： {}'.format(len(train_words), len(test_words)))
    classify_nb = ClassifyNavieBayes(train_data=train_words, train_label=train_class)
    classify_nb.train()

    vec_words = classify_nb.create_vocab_list()
    p0_words = classify_nb.p0_words
    p1_words = classify_nb.p1_words

    # 区域用词偏好
    p0_dict = dict(zip(vec_words, p0_words))
    p1_dict = dict(zip(vec_words, p1_words))
    p0_words_sorted = sorted(p0_dict.items(), key=lambda d: d[1], reverse=True)
    p1_words_sorted = sorted(p1_dict.items(), key=lambda d: d[1], reverse=True)
    print([word[0] for word in p0_words_sorted])
    print([word[0] for word in p1_words_sorted])

    error_count = 0
    gnb_error_count = 0
    mnb_error_count = 0
    cnb_error_count = 0
    bnb_error_count = 0
    for num, test in enumerate(test_words):
        pre = classify_nb.predict(data=test)
        sk_gnb, sk_mnb, sk_cnb, sk_bnb = classify_nb.sklearn_predict(data=test)
        if test_class[num] == 1:
            if pre == 0:
                error_count += 1
            if sk_gnb == 0:
                gnb_error_count += 1
            if sk_mnb == 0:
                mnb_error_count += 1
            if sk_cnb == 0:
                cnb_error_count += 1
            if sk_bnb == 0:
                bnb_error_count += 1

    print('错误率: code: {}, sklearn: {} {} {} {}'.format(error_count / len(test_class),
                                                       gnb_error_count / len(test_class),
                                                       mnb_error_count / len(test_class),
                                                       cnb_error_count / len(test_class),
                                                       bnb_error_count / len(test_class)))


if __name__ == '__main__':
    # 负面词语
    # main_words()
    # 垃圾邮件检测
    # main_email()
    # 纽约和旧金山征婚广告：从个人广告获取区域倾斜
    main_mis()
