from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import make_pipeline
from datetime import datetime

import pandas as pd

import csv
import jieba
import pickle


def __apply_jiebe(ser: pd.Series) -> pd.Series:
    tmp = jieba.cut(ser['content'])
    res = ''
    for i in tmp:
        if i not in stop_words:
            res = i if res == '' else res + ' ' + i
    ser['content'] = res
    return ser


def __load_stop_words():
    f = open(file='./stop_words.txt', mode='r', encoding='utf-8')
    res = set()
    while lin := f.readline():
        res.add(lin[:-1])
    return res


def load_train_data(file1: str, file2: str) -> (pd.Series, pd.Series):
    w1 = csv.reader(open(file=file1, mode='r', encoding='utf-8', newline=''))
    w2 = csv.reader(open(file=file2, mode='r', encoding='utf-8', newline=''))

    content = []
    in_out = []

    for i in w1:
        tmp = jieba.cut(i[0])
        res = ''
        for j in tmp:
            if j not in stop_words:
                res = j if res == '' else res + ' ' + j
        content.append(res)
        in_out.append(int(i[1]))

    for i in w2:
        tmp = jieba.cut(i[0])
        res = ''
        for j in tmp:
            if j not in stop_words:
                res = j if res == '' else res + ' ' + j
        content.append(res)
        in_out.append(int(i[1]))

    return pd.Series(content), pd.Series(in_out)


stop_words = __load_stop_words()

if __name__ == '__main__':
    # dms = pickle.load(file=open(file='../PickledFile/dm.pickled', mode='rb'))  # type: pd.DataFrame
    # cts = pickle.load(file=open(file='../PickledFile/comments.pickled', mode='rb'))  # type: pd.DataFrame
    #
    # print(f'{dms.size=}')
    # print(f'{cts.size=}')
    #
    # dms['content_origin'] = dms['content']
    # cts['content_origin'] = cts['content']
    #
    # dms = dms.apply(func=__apply_jiebe, axis=1)
    # cts = cts.apply(func=__apply_jiebe, axis=1)
    #
    # pickle.dump(obj=dms, file=open(file='../PickledFile/dm_split.pickled', mode='wb'))
    # pickle.dump(obj=cts, file=open(file='../PickledFile/comments_split.pickled', mode='wb'))
    begin = datetime.now()

    dms = pickle.load(file=open(file='../PickledFile/dm_split.pickled', mode='rb'))  # type: pd.DataFrame
    cts = pickle.load(file=open(file='../PickledFile/comments_split.pickled', mode='rb'))  # type: pd.DataFrame
    train_data, train_target = load_train_data(file1='./调侃鼓励严肃/弹幕sample.csv', file2='./调侃鼓励严肃/评论sample.csv')
    print('数据加载与分词结束………………………………………………………………………………………………………………………………………………………………………………………………………………')

    model = make_pipeline(TfidfVectorizer(), MultinomialNB())
    model.fit(train_data, train_target)
    print('模型训练结束……………………………………………………………………………………………………………………………………………………………………………………………………………………………')

    print('开始预测……………………………………………………………………………………………………………………………………………………………………………………………………………………………………')
    dms['res'] = model.predict(dms['content'])
    cts['res'] = model.predict(cts['content'])

    dms = dms.drop(labels=['content'], axis=1)
    cts = cts.drop(labels=['content'], axis=1)

    end = datetime.now()
    print(end - begin)

    pickle.dump(obj=dms, file=open(file='../PickledFile/dm_predicted(3).pickled', mode='wb'))
    pickle.dump(obj=cts, file=open(file='../PickledFile/comments_predicted (3).pickled', mode='wb'))
