import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mlxtend.frequent_patterns import apriori
from mlxtend.preprocessing import TransactionEncoder
import json
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
# 读取数据

data_train = pd.read_csv('train_procedure.csv', names=['class', 'words'])
data_test = pd.read_csv('test.csv', names=['class', 'words'])
data_train['words'] = data_train['words'].apply(lambda x:np.array(x.split(sep=' '), dtype='<U4'))
data_test['words'] = data_test['words'].apply(lambda x:np.array(x.split(sep=' '), dtype='<U4'))

# 停用词加载

with open('中文停用词表.txt', mode='r+', encoding='utf-8') as fp:
    stop_words = fp.readlines()

# 生成维度

word_freq_dict = dict()
for index, row in data_train.iterrows():
    for word in row['words']:
        if word in stop_words:
            continue
        word_freq_dict[word] = word_freq_dict.get(word, 0)+1

filtered = [k for k, v in word_freq_dict.items() if v > 1]
print('dimensions generated.')

# 生成onehot矩阵

def onehot(data, dimension):
    temp = np.zeros((data.shape[0], len(dimension)))
    word_bag = pd.DataFrame(temp, columns=dimension)
    for index, row in data.iterrows():
        for word in row['words']:
            if word in filtered:
                word_bag.loc[index, word] += 1
    
    return np.array(word_bag)

train_onehot = onehot(data_train, filtered)
test_onehot = onehot(data_test, filtered)
print('onehot generated')

# 生成语料库corpus中每个词对应的idf
def IDF(corpus):
    n = corpus.shape[0]
    s = np.array(corpus.sum(axis=0))
    s = np.log10(n/s)
    return s

idf = IDF(train_onehot)

# 生成测试集和训练集的tfidf表示

train_tfidf = train_onehot*idf
test_tfidf = test_onehot*idf
print('tfidf generated')

# 使用logistic regression进行分类

LR = LogisticRegression(solver='liblinear')
LR.fit(train_tfidf, data_train['class'])
predict_logistic = LR.predict(test_tfidf)

# 使用Multinomial NB进行分类

MNB = MultinomialNB()
MNB.fit(train_tfidf, data_train['class'])
predict_MultinomialNB = MNB.predict(test_tfidf)

# 计算准确率acc

def acc(predict, label):
    predict = np.where(predict==0, 'negative', 'positive')
    accuracy = (predict==label).sum()/len(predict)
    return accuracy

acc_LR = acc(predict_logistic, data_test['class'])
acc_MNB = acc(predict_MultinomialNB, data_test['class'])
print('logistic regression准确率为：', acc_LR)
print('Multinomial regression准确率为：', acc_MNB)




# 讨论1：tfidf是否必要？

## 使用logistic regression进行分类

LR1 = LogisticRegression(solver='liblinear')
LR1.fit(train_onehot, data_train['class'])
predict_logistic1 = LR1.predict(test_onehot)

## 使用Multinomial NB进行分类

MNB1 = MultinomialNB()
MNB1.fit(train_onehot, data_train['class'])
predict_MultinomialNB1 = MNB1.predict(test_onehot)

## 计算准确率acc

acc_LR1 = acc(predict_logistic1, data_test['class'])
acc_MNB1 = acc(predict_MultinomialNB1, data_test['class'])
print('logistic regression准确率(onehot)为：', acc_LR1)
print('Multinomial regression准确率(onehot)为：', acc_MNB1)

