import os
import jieba
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
import joblib
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
# 数据加载
def loadfile(filepath,leibie):
    '''加载文件内容和标签'''
    filelist = os.listdir(filepath)
    content = []
    label = []

    for file in filelist:



        with open(filepath+"/"+file,encoding='utf-8')as f


            content.append("".join(jieba.cut(f.read())))
            label.append(leibie)
    return content,label
#"家居","房产","教育","时尚","时政","游戏","科技","财经"
leibie=["体育","娱乐","家居","房产"]

content=[]
label=[]

for i in leibie:
    filepath='D:/python/Workspace/FNLP/thucnews'+"/"+i+"/"
    c,b=loadfile(filepath,i)
    content=content+c
    label=label+b
#打乱数据集
'''
	Args:	
		data:数据
		label：标签
		deepth：转化成one_hot变量时传入的参数如果为-1则不转化为onehot
		tag：标记参数，如果tag为1 则将训练集数据转化成numpy数组并且将标签转化为onehot变量
'''
def data_split(data,label,depth=-1,tag=1):
  len(data)
  total_label=len(label)
  label_index=np.arange(0,total_label)
  np.random.shuffle(label_index)
  train_num=int(total_label*0.8)
  X_train=[]
  y_train=[]

  #打乱训练集
  for i in label_index[:train_num]:
    X_train.append(data[i])
    y_train.append(label[i])
  X_test=[]
  y_test=[]

 #打乱测试集
  for i in label_index[train_num:]:
    X_test.append(data[i])
    y_test.append(label[i])
  if tag==1 and depth!=-1:
    X_train=np.asarray(X_train)
    X_test=np.asarray(X_test)
    y_train=tf.one_hot(y_train,depth=deepth)
    y_test=tf.one_hot(y_test,depth=deepth)
  return X_train, X_test, y_train, y_test
X_train, X_test, y_train, y_test = data_split(content, label)
with open('./ChineseStopWords.txt',encoding='utf-8')as file:
    stopwords = file.read().split("\n")
tfidf = TfidfVectorizer(stop_words=stopwords,max_df=0.5)
traindata = tfidf.fit_transform(X_train)

testdata = tfidf.transform(X_test)

#多项式朴素贝叶斯

nb_model = MultinomialNB(alpha=0.001)
nb_model.fit(traindata,y_train)
predict_test = nb_model.predict(testdata)
print(predict_test)
print("多项式朴素贝叶斯文本的分类准确率为：",metrics.accuracy_score(predict_test,y_test))
# dirs='./textModel'
# if not os.path.exists(dirs):
#     os.makedirs(dirs)
# joblib.dump(nb_model,dirs+'/NB.pkl')