# This is a sample Python script.

# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.



# See PyCharm help at https://www.jetbrains.com/help/py
import pandas as pd
import jieba
import jieba.posseg as pssg
from sklearn import svm

from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import GaussianNB
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import OneHotEncoder
from sklearn import preprocessing
from sklearn import tree
from sklearn.multiclass import  OneVsRestClassifier
import numpy as np


train_data = pd.DataFrame(pd.read_csv('C:/Users/行者夏/Desktop/dataset/train.csv'))
test_data = pd.DataFrame(pd.read_csv('C:/Users/行者夏/Desktop/dataset/test.csv'))


train_data.columns = ['info1', 'info2', 'info3', 'info4']
test_data.columns = ['info1', 'info2', 'info3']


#数据清洗
train_sample = train_data['info2'].dropna()#训练集数据清洗
test_sample = test_data['info2'].dropna()#测试集数据清洗
print("清洗前的训练集新闻数量为：",train_data.shape[0]);
print("清洗后的训练集新闻数量为：",train_sample.shape[0]);
train_label = train_data[['info4']]#这里如果写成data['info4']会出错

'''
#尝试去停用词
stop_file = open("C:/Users/行者夏/Desktop/dataset/stopwords.txt", 'r', encoding='UTF-8')
stopwords = []
for stopword in stop_file:
    stopwords.append(stopword.strip().encode("utf-8").decode("utf-8-sig"))
'''


'''
#尝试使用pssg.cut分词（效果不如使用jieba.cut好）
for idx in train_sample.index:
    temp_sample = list(pssg.cut(train_sample[idx]))
    temp_str = ""
    for send in temp_sample:
        #if send not in stopwords:
        temp_str = temp_str + send.word + "/" + send.flag+" "
    train_sample[idx] = temp_str

for idx in test_sample.index:
    temp_sample = list(pssg.cut(test_sample[idx]))
    temp_str = ""
    for send in temp_sample:
        #if send not in stopwords:
        temp_str = temp_str + send.word + "/" + send.flag +" "
    test_sample[idx] = temp_str
'''

#使用jieba.cut分词
for idx in train_sample.index:
    temp_sample = list(jieba.cut(train_sample[idx]))
    temp_str = ""
    for send in temp_sample:
        #if send not in stopwords:
        temp_str = temp_str + send + " "
    train_sample[idx] = temp_str

for idx in test_sample.index:
    temp_sample = list(jieba.cut(test_sample[idx]))
    temp_str = ""
    for send in temp_sample:
        #if send not in stopwords:
        temp_str = temp_str + send + " "
    test_sample[idx] = temp_str


#通过tf实现向量化
tfidf = TfidfVectorizer()
train_sample = tfidf.fit_transform(train_sample)
test_sample = tfidf.transform(test_sample)


#贝叶斯分类实现
clf = MultinomialNB(alpha=0.1)#通过调参后发现，系数为0.1，准确率最高
clf.fit(train_sample, train_label)
test_result = clf.predict(test_sample)
np.savetxt('C:/Users/行者夏/Desktop/dataset/result_bayes.txt', test_result, fmt="%d")
#np.savetxt('./result_bayes.txt', test_result, fmt="%d")











