'''
Chinese sentiment analysis
'''
import csv

from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from gensim.models.word2vec import Word2Vec
import numpy as np
import pandas as pd
import jieba
import joblib #把数据转化为二进制
from sklearn.svm import SVC
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
import sys

'''
数据预处理：载入数据
           预处理
           切分训练集和测试集
'''
def load_file_and_processing():
    # pos = pd.read_table('pos.csv', header=None, index_col=None)
    # neg = pd.read_table('neg.csv', header=None, index_col=None)
    # print(pos[0])

    data = pd.read_csv('simplifyweibo_4_moods.csv', header=0)
    # data1 = shuffle(data)
    # data1.to_csv('weibo_4moods.csv')
    pos1 = data[data.label == 0]
    pos = pos1[['review']]
    print(pos1['review'])
    print(pos)
    pos2 = pos.iloc[0:1000]
    pos = pos2[['review']]
    print(pos2['review'])
    print(pos)
    neg1 = data[data.label > 0]
    neg = neg1[['review']]
    # print(neg['review'])
    neg2 = neg.iloc[0:1000]
    neg = neg2[['review']]
    # print(neg2['review'])

    # cw = lambda x:list(jieba.cut(x))                #jieba分词
    # pos['c_w'] = pos[0].apply(cw)				#此处会报错，读取时给列命名，在apply jieba.cut()不会报错
    # neg['c_w'] = neg[0].apply(cw)
    pos['c_w'] = [jieba.lcut(sent) for sent in pos['review']]
    neg['c_w'] = [jieba.lcut(sent) for sent in neg['review']]

    # 合并neg和pos
    pos_and_neg = np.append(pos['c_w'], neg['c_w'], axis=0)
    # pos_and_neg = np.concatenate((pos['cut_word'],neg['cut_word']))
    # pos_and_neg
    # 构造对应的标签数组
    table = np.append((np.ones(len(pos))), (np.zeros(len(neg))), axis=0)
    # table.shape
    # 切分训练集合测试集
    x_train, x_test, y_train, y_test = train_test_split(pos_and_neg, table, test_size=0.2)

    np.save('y_train.npy', y_train)
    np.save('y_test.npy', y_test)
    return x_train,x_test



'''
对每个句子的所有词向量取均值，生成一个句子的vector
'''
def build_sentence_vector(text,size,model):
    vec = np.zeros(size).reshape((1,size))
    count = 0
    for word in text:
        try:
            vec += model.wv[word].reshape((1,size))
            count += 1
        except KeyError:
            continue
    if count != 0:
        vec /= count
    return vec


'''
对每个句子的所有词向量取均值，生成一个句子的vector
'''
def build_sentence(text,size,model):
    vec = np.zeros(size).reshape((1,size))
    count = 0
    for word in text:
        try:
            vec += model[word].reshape((1,size))
            count += 1
        except KeyError:
            continue
    if count != 0:
        vec /= count
    return vec


'''
计算词向量
'''
def get_train_vecs(x_train,x_test):
    n_dim = 300
    #初始化模型和词表
    model = Word2Vec(vector_size=n_dim,min_count=10)    #词频少于min_count次数的单词会被丢弃掉, 默认值为5
    model.build_vocab(x_train)

    #在评论集上训练模型
    model.train(x_train,total_examples=model.corpus_count,epochs=50)

    train_vecs = np.concatenate([build_sentence_vector(z,n_dim,model) for z in x_train])
    print(build_sentence_vector(z,n_dim,model) for z in x_train)
    # print('train_vecs',train_vecs)
    np.save('train_vecs.npy',train_vecs)
    print('train_vecs size:')
    print(train_vecs.shape)

    #在测试集上训练
    model.train(x_test,total_examples=model.corpus_count,epochs=50)
    model.save('w2v_model.pkl')
    print('w2v_model.pkl')
    #build test tweet vector then scale
    test_vecs = np.concatenate([build_sentence_vector(z,n_dim,model) for z in x_test])
    # print('test_vecs',test_vecs)
    np.save('test_vecs.npy',test_vecs)
    print('test_vecs size:')
    print(test_vecs.shape)


def get_data():
    train_vecs = np.load('train_vecs.npy')
    y_train = np.load('y_train.npy')
    test_vecs = np.load('test_vecs.npy')
    y_test = np.load('y_test.npy')
    return train_vecs,y_train,test_vecs,y_test


'''
训练模型
'''

def svm_train(train_vecs,y_train,test_vecs,y_test):
    clf = SVC(kernel='rbf',verbose=True,probability=True)
    clf.fit(train_vecs,y_train)
    joblib.dump(clf, 'w2v_model.pkl',compress=3)
    print('模型准确率为：',clf.score(test_vecs,y_test))
    pred_probas = clf.predict_proba(test_vecs)[:, 1]
    fpr, tpr, _ = roc_curve(y_test, pred_probas)
    roc_auc = auc(fpr, tpr)
    plt.plot(fpr, tpr, label='area = %.2f' % roc_auc)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.legend(loc='lower right')
    plt.show()


'''
构建待测句子向量
'''
def get_predict_vecs(words):
    n_dim = 300
    model =joblib.load('w2v_model.pkl')
    train_vecs = build_sentence(words,n_dim,model)
    return train_vecs


'''
对单个句子进行情感分析
'''
def svm_predict(string):
    words = jieba.cut(string)          #jieba.lcut直接返回list
    words_vecs = get_predict_vecs(words)
    clf =joblib.load(filename='w2v_model.pkl')

    result = clf.predict(words_vecs)

    print('1为积极情绪，0为消极情绪')
    # print(result)
    print('句子的结果是:', result[0])

    if int(result[0] == 1):
        print(string, '是：积极情绪')
    else:
        print(string, '是：消极情绪')

if __name__ == '__main__':
    x_train, x_test = load_file_and_processing()
    get_train_vecs(x_train, x_test)
    train_vecs, y_train, test_vecs, y_test = get_data()
    svm_train(train_vecs, y_train, test_vecs, y_test)
    string = "MP3音质太差"
    svm_predict(string)




