import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mlxtend.frequent_patterns import apriori
from mlxtend.preprocessing import TransactionEncoder
import json
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
# 读取数据

data_train = pd.read_csv('train_procedure.csv', names=['class', 'words'])
data_test = pd.read_csv('test.csv', names=['class', 'words'])
data_train['words'] = data_train['words'].apply(lambda x:np.array(x.split(sep=' '), dtype='<U4'))
data_test['words'] = data_test['words'].apply(lambda x:np.array(x.split(sep=' '), dtype='<U4'))

# 停用词加载

with open('中文停用词表.txt', mode='r+', encoding='utf-8') as fp:
    stop_words = fp.readlines()

stop_words = list(map(lambda x: x.rstrip(), stop_words))

# 中文否定词加载

with open('中文否定词.txt', mode='r', encoding='utf-8') as fp:
    negation_words = fp.readlines()

negation_words = list(map(lambda x: x.rstrip(), negation_words))

# 衡量情感极性

# count = 0
# temp = data_train.copy()
# for index, row in temp.iterrows():
#     if set(row['words'])&set(negation_words):
#         row['class'] = 1-row['class']
#         count+=1
# print(count)

word_freq_dict = dict()
for index, row in data_train.iterrows():
    for word in row['words']:
        if word in stop_words:
            continue
        if word in negation_words:
            continue
        if word in word_freq_dict:
            if row['class']==1:
                word_freq_dict[word][0] += 1
            elif row['class']==0:
                word_freq_dict[word][1] += 1
        else:
            if row['class']==1:
                word_freq_dict[word] = [1, 0]
            elif row['class']==0:
                word_freq_dict[word] = [0, 1]

# filtered中即为情感极性较大的词

filtered = [k for k, v in word_freq_dict.items() if abs(v[0]-v[1])/sum(v) >= 0.15]

# 生成每句话对应的onehot向量，同时考虑negation词对于onehot的反转

def onehot(data, dimension, negation_words):
    temp = np.zeros((data.shape[0], len(dimension)))
    word_bag = pd.DataFrame(temp, columns=dimension)
    for index, row in data.iterrows():
        cache = False
        for word in row['words']:
            if word in negation_words:
                cache = ~cache
                continue
            if word in filtered:
                v = word_freq_dict[word]
                if cache:
                    word_bag.loc[index, word] -= 1
                    cache = False
                else:
                    word_bag.loc[index, word] += 1
    return np.array(word_bag)

train_onehot = onehot(data_train, filtered, negation_words)
test_onehot = onehot(data_test, filtered, negation_words)

# 生成每句话对应的tfidf向量，同时考虑negation词对于tfidf的反转

def IDF(corpus):
    n = corpus.shape[0]
    s = np.array(np.abs(corpus).sum(axis=0))
    s[s==0] = 1
    s = np.log10(n/s)
    return s

idf = IDF(train_onehot)

train_tfidf = train_onehot*idf
test_tfidf = test_onehot*idf
print('tfidf generated')

# 使用logistic regression进行分类

LR = LogisticRegression(solver='liblinear')
LR.fit(train_tfidf, data_train['class'])
predict_logistic = LR.predict(test_tfidf)

# 使用Multinomial NB进行分类

# MNB = MultinomialNB()
# MNB.fit(train_tfidf, data_train['class'])
# predict_MultinomialNB = MNB.predict(test_tfidf)

# 计算准确率acc

def acc(predict, label):
    predict = np.where(predict==0, 'negative', 'positive')
    accuracy = (predict==label).sum()/len(predict)
    return accuracy

acc_LR = acc(predict_logistic, data_test['class'])
# acc_MNB = acc(predict_MultinomialNB, data_test['class'])
print('logistic regression准确率为：', acc_LR)
# print('Multinomial regression准确率为：', acc_MNB)


import jieba
sentence = input('input your sentence here:\n')
while sentence!='quit':
    sentence = list(jieba.cut(sentence))
    sentence = pd.DataFrame({'class':None, 'words':[sentence,]})
    sentence_onehot = onehot(sentence, filtered, negation_words)
    sentence_tfidf = sentence_onehot*idf
    la = LR.predict(sentence_tfidf)
    print('positive' if la[0]==1 else 'negative')
    sentence = input('input your sentence here:\n')