import pandas as pd
import jieba

# 1、读取数据

comments = pd.read_csv("../data/豆瓣电影评价.txt",
                       names=["share_id", "pjr", "rating", "comment_time", "comment_location", "vote_count",
                              "comment_content"], encoding="UTF-8")

# 取出评分
y = comments["rating"]
# 内容
comment_content = comments["comment_content"]
# 停留词
tl = [
    "，",
    "。",
    "“",
    "”",
    "/",
    "的",
    "、",
    "了",
    "是"
]


def sagment(comment):
    words = jieba.lcut(comment)
    words = list(filter(lambda word: word not in tl, words))
    return " ".join(words)


# 2、对评价进行分词
comment_content = comment_content.map(sagment)

# 3、将分好词文本转换成向量
from sklearn.feature_extraction.text import CountVectorizer

vectorizer = CountVectorizer()
vectorizer.fit(comment_content)
# 将文本转换成向量
x = vectorizer.transform(comment_content)

# 4、将数据切分成训练集和测试集
from sklearn.model_selection import train_test_split

x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)

# 5、选择算法
# 贝叶斯分类，主要用于文本分类，通过概率进行分类
from sklearn.naive_bayes import MultinomialNB

nb = MultinomialNB()
# 将训练集带入算法训练模型
model = nb.fit(x_train, y_train)

# 模型评估
print(model.score(x_test, y_test))

# 将训练好的模型保存到文件中
import pickle

# 模型的保存
with open('文本分类模型.pickle', 'wb') as f:
    pickle.dump(nb, f)  # 将训练好的模型clf存储在变量f中，且保存到本地

# 加载模型
with open('文本分类模型.pickle', 'rb') as f:
    model1 = pickle.load(f)  # 将训练好的模型clf存储在变量f中，且保存到本地

# 使用模型
print(model1.predict(x_test))
