import pandas as pd
import jieba
from importlib import reload
import sys
import pickle
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
import tkinter

reload(sys)
data = "data/"

def drop_stopwords(contents,stopwords):
    """
    去停用词
    :param contents:
    :param stopwords:
    :return:
    """
    contents_clean = []
    all_words = []
    for line in contents:
        line_clean = []
        for word in line:
            if word in stopwords:
                continue
            line_clean.append(word)
            all_words.append(str(word))
        contents_clean.append(line_clean)
    return contents_clean,all_words


def model():
    """
    处理数据、jieba分词、去除停用词、Tf-Idf算法文本向量化、朴素贝叶斯文本分类识别好评差评
    :return:
    """
    df_comment = pd.read_csv(data+'CED_dataset.csv', names=['label', 'review'],sep='_',header=0, encoding='utf-8')
    print(df_comment.shape)
    content = df_comment.review.astype(str).values.tolist()
    content_S = []
    for line in content:
        current_segment = jieba.lcut(line)
        if len(current_segment) > 1 and current_segment != '\r\n':  # 换行符
            content_S.append(current_segment)
        else:
            content_S.append(" ")
    df_content = pd.DataFrame({'content_S': content_S})
    #读取停用词，并调用函数去除停用词
    stopwords = pd.read_csv("data/stopwords.txt", index_col=False, sep="\t", quoting=3, names=['stopword'], encoding='utf-8')
    # print(stopwords.head(20))
    contents = df_content.content_S.values.tolist()
    stopwords = stopwords.stopword.values.tolist()
    contents_clean,all_words = drop_stopwords(contents,stopwords)
    df_train=pd.DataFrame({'contents_clean':contents_clean,'label':df_comment['label']})
    x_train, x_test, y_train, y_test = train_test_split(df_train['contents_clean'].values, df_train['label'].values, random_state=1)
    words = []
    for line_index in range(len(x_train)):
        try:
            words.append(' '.join(x_train[line_index]))
        except:
            print ("有问题")
    vectorizer = TfidfVectorizer(analyzer='word', max_features=4000, lowercase=False)
    vectorizer.fit(words)

    f = open('vectorizer.txt', 'wb')
    pickle.dump(vectorizer, f)
    f.close()
    classifier = MultinomialNB()
    classifier.fit(vectorizer.transform(words), y_train)
    test_words = []
    for line_index in range(len(x_test)):
        try:
            test_words.append(' '.join(x_test[line_index]))
        except:
            print ("有问题")
    print(classifier.predict_proba(vectorizer.transform(test_words)[1]))
    # print(classifier.predict(vectorizer.transform(test_words)[1])[1])
    print(classifier.score(vectorizer.transform(test_words), y_test))
    f = open('classifier.txt', 'wb')
    pickle.dump(classifier, f)
    f.close()
    return classifier, vectorizer


def predict(classifier,vectorizer, str):
    test = []
    sp_word = jieba.lcut(str)
    stopwords = pd.read_csv("data/stopwords.txt", index_col=False, sep="\t", quoting=3, names=['stopword'],
                            encoding='utf-8')
    for i in range(len(sp_word)-1,-1,-1):
        for stop in stopwords['stopword']:
            if stop == sp_word[i]:
                del sp_word[i]
                break
    sp_word = ' '.join(sp_word)
    test.append(sp_word)
    result = classifier.predict_proba(vectorizer.transform(test)[0])[0]
    if result[0]<result[1]:
        return '谣言'
    else:
        return '非谣言'