import jieba
import os
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
def loaddata(path,classlabel):
    allfile=os.listdir(path)
    textdata=[]
    classall=[]
    for thisfile in allfile:
        data=open(path+'/'+thisfile,'rb').read().strip()
        data1=jieba.cut(data)
        text=''
        for item in data1:
            text+=item+" "
        textdata.append(text)
        classall.append(classlabel)
    return textdata,classall
text1,class1=loaddata('testdata/医疗',0)
text2,class2=loaddata('testdata/财经',1)
train_text=text1+text2
classall=class1+class2
count_vect=CountVectorizer()
train_x_counts=count_vect.fit_transform(train_text)
# tf-idf模型
tf_ts=TfidfTransformer(use_idf=True).fit(train_x_counts)
train_x_tf=tf_ts.transform(train_x_counts)
print(train_x_tf)
# 训练
clf=MultinomialNB().fit(train_x_tf,classall)
# 分类
test_data=['阿里巴巴 融资 5 亿元','我 一直 在 健身','今年 盈利了 5亿元']
test_x_counts=count_vect.transform(test_data)
test_x_tfidf=tf_ts.transform(test_x_counts)
predicted=clf.predict(test_x_tfidf)
print(predicted)



