from pyspark.ml.classification import NaiveBayes, NaiveBayesModel
from pyspark.ml.linalg import Vectors
from pyspark.sql import SparkSession
from pyspark.sql import Row
import re
import os
import jieba
import jieba.posseg
import pickle


data_dir = './newquestion/'
vocabularys = './newdict/vocabulary.txt'


class NaiveBayesModelMe:
    def __init__(self):
        self.vocabularys = []
        self.model_path = "./nb_model"
        # 初始化一个spark的session
        self.spark = SparkSession \
            .builder \
            .appName('my_first_app_name') \
            .getOrCreate()

    def load(self, data_dir, vocabularys):
        # 读取vocabularys文件，构建高频的词库
        with open(vocabularys, encoding='utf-8') as f:
            vocabulary = f.read().splitlines()
            for i, line in enumerate(vocabulary):
                vocabulary[i] = line.split(':')[-1]
        # print(vocabulary[0])
        texts = []
        self.x = []
        self.vocabularys = vocabulary
        # 遍历读取文件夹中的数据文件，共13个文件
        for parent, dirnames, filenames in os.walk(data_dir):
            self.x = []
            for filename in filenames:
                if filename[0] != '.':
                    # print(os.path.join(data_dir, filename))
                    with open(os.path.join(data_dir, filename), encoding='UTF-8') as f:
                        file = f.read().splitlines()
                    texts.append(file)
                    self.x.append(filename)
        document = {}
        # print(len(self.x))
        # 对每一个数据文件中的词进行分词操作，并将其文件名作为字典索引的key
        for i, text in enumerate(texts):
            lines = []
            for line in text:
                word = jieba.cut(line)
                lines.append(' '.join(word))
            document[self.x[i]] = lines

        document_new = {}
        # 对分词后的数据文件的每一行进行和高频词库的匹配，将数据文件的每一行转为一个行向量
        for i in range(len(document)):
            vectors = []
            for line in document[self.x[i]]:
                line = line.split(" ")
                vector = [0 for x in range(len(vocabulary))]
                for word in line:
                    if word in vocabulary:
                        index = vocabulary.index(word)
                        vector[index] = 1
                vectors.append(vector)
            document_new[self.x[i]] = vectors

        # print(len(document_new))
        # 提取数据文件Title中的数字为类别标签,将其保存为训练集的形式
        self.train_data = []
        for i in range(len(document_new)):
            num = re.findall('\d+', self.x[i])
            self.train_data.append((int(num[0]), document_new[self.x[i]]))

        output = open('./newdict/data.pkl', 'wb')
        pickle.dump(self.train_data, output)
        output = open('./newdict/vocabulary.pkl', 'wb')
        pickle.dump(self.vocabularys, output)

    def fit(self):
        # 构建一个spark类型的dataframe格式的训练集形式
        pkl_file = open('./newdict/data.pkl', 'rb')
        train_data = pickle.load(pkl_file)

        df = self.spark.createDataFrame(
            [Row(label=train_data[j][0], weight=0.1, features=Vectors.dense(train_data[j][1][i])) for j in range(13) for
             i in range(len(train_data[j][1]))])

        nb = NaiveBayes(smoothing=1.0, modelType="multinomial", weightCol="weight")
        # nb = DecisionTreeClassifier()
        print("训练正在开始-------------->")
        model = nb.fit(df)
        model.save(self.model_path)

    def test(self, sentence, vocabularys):
        sentence = ' '.join(jieba.cut(sentence))
        sentence = sentence.split(" ")
        # print('句子抽象化后的结果: {}'.format(sentence))
        vector = [0 for x in range(len(vocabularys))]
        for word in sentence:
            if word in vocabularys:
                index = vocabularys.index(word)
                vector[index] = 1

        model = NaiveBayesModel.load(self.model_path)
        # model = DecisionTreeClassificationModel.load(self.model_path)
        test0 = self.spark.createDataFrame([Row(features=Vectors.dense(vector))])
        result = model.transform(test0).head()
        print('The model index is: {}'.format(int(result.prediction)))
        return int(result.prediction)


if __name__ == "__main__":
    sentence = '新世界的导演'
    # sentence1 = '卧虎藏龙的分数是多少'
    # sentence1 = ' '.join(jieba.cut(sentence1))
    # sentence1 = sentence1.split(" ")
    # print(sentence1)
    # sentence2 = '卧虎藏龙的分数是多少'
    # sentence2 = pre_dosegment(sentence2)
    # sentence2 = ' '.join(sentence2)
    # print(sentence2)

    model = NaiveBayesModelMe()
    # model.load(data_dir=data_dir, vocabularys=vocabularys)
    # model.fit()
    pkl_file = open("./newdict/vocabulary.pkl", "rb")
    vocabulary = pickle.load(pkl_file)
    model.test(sentence, vocabulary)
