import pandas as pd
import jieba
from sklearn.feature_extraction.text import CountVectorizer
from snownlp import SnowNLP
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split

data = pd.read_csv('data/data1.csv')
data['star'].unique()


def make_label(star):
    if star > 3:
        return 1
    else:
        return 0


def snow_result(comemnt):
    s = SnowNLP(comemnt)
    if s.sentiments >= 0.6:
        return 1
    else:
        return 0


def chinese_word_cut(mytext):
    return " ".join(jieba.cut(mytext))


data['sentiment'] = data.star.apply(make_label)
data.head()


# text1 = '这个东西不错'
# text2 = '这个东西很垃圾'
# s1 = SnowNLP(text1)
# s2 = SnowNLP(text2)
# print(s1.sentiments, s2.sentiments)
# data['snlp_result'] = data.comment.apply(snow_result)
# data.head(5)
# counts = 0
# for i in range(len(data)):
#     if data.iloc[i, 2] == data.iloc[i, 3]:
#         counts += 1
# print(counts / len(data))