import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer

df = pd.read_csv("../datas/bayes_xinxi.txt")  # 读取数据
# 正则匹配，a-z，A-Z所有中文
tfCoder = CountVectorizer(token_pattern="[a-zA-Z|\u4e00-\u9fa5]+")  # TF模型
X = df["words"]
Y = df["Y"]
X = tfCoder.fit_transform(X)  # 训练TF模型
print(tfCoder.get_feature_names())
print(X.toarray())

X_ = ["Chinese Chinese Chinese Tokyo Japan"]  # 训练数据
X_ = tfCoder.transform(X_).A  # 将训练数据转为array类型

from sklearn.naive_bayes import MultinomialNB
model = MultinomialNB()
model.fit(X, Y)
print(model.predict(X_))
print('-----------')
print(Y[model.predict(X_)])
print(Y)


# names_feature = tfCoder.get_feature_names()  # 获取词语名
# print(names_feature)
# Y = list(set(Y))  # 获取类别种类的list
# Y.sort() #0,1
# class_prior = []  # 用于存储先验概率
# feature_prob = []  # 用于存储条件概率
# lam = 1
# for y in Y:  # 遍历每个类别
#     df2 = df[df["Y"] == y]  # 获取每个类别下的DF
#     prior = df2.shape[0] / df.shape[0]  # 计算先验概率
#     # 将先验概率加入集合
#     class_prior.append(prior)
#
#     a = tfCoder.transform(df2["words"]).A  # 计算条件概率
#     a = np.sum(a, axis=0)
#     prob = (a + lam) / (a.sum() + lam * len(names_feature))
#     # 将条件概率加入集合
#     feature_prob.append(prob)#2次   y=0多有的条件概率，  y=1所有的条件概率
# feature_prob = np.array(feature_prob)
# class_prior = np.array(class_prior)
# print(feature_prob)
# print("-----------进行测试-----------")
# X = ["Chinese Chinese Chinese Tokyo Japan"]  # 训练数据
# X = tfCoder.transform(X).A  # 将训练数据转为array类型
# print(X)
# # past = #求后验概率
# print(feature_prob ** X)
# past = np.prod(feature_prob ** X, axis=1) * class_prior
# print(past)
# # #输出概率最大的类别
# print(Y[np.argmax(past)])
