﻿# -*- coding: utf-8 -*-
"""
Created on Thu Mar 12 18:10:32 2020

@author: LYL
"""
import sys

from datetime import datetime

import re

from tqdm import tqdm

import jieba

import time

import pickle  # 持久化

from sklearn.naive_bayes import MultinomialNB  # 多项式贝叶斯算法

from sklearn.feature_extraction.text import TfidfTransformer  # TF-IDF向量转换类

from sklearn.feature_extraction.text import TfidfVectorizer  # TF_IDF向量生成类

from sklearn.datasets.base import Bunch

path="专栏文章/"

resultPath="H:/专栏文章/result/"

moelPath="H:/专栏文章/model/"

train_set="H:/专栏文章/bunch/train_set.dat"

tf_set="H:/专栏文章/bunch/tf_set.dat"

tf_set1="H:/分类/tfidfspace.dat"

stopWordsFile='H:/专栏文章/stopword.txt'

testFile='H:/原始数据/兴趣/4211157.txt'

test_set="H:/专栏文章/bunch/test_set.dat"

resultFile='H:/原始数据/4211157.txt'

testSpace="H:/专栏文章/bunch/testspace.dat"


#fatherLists = os.listdir(path)  # 主目录



def predict(ss,title):
    ss=re.sub('[^\w\u4e00-\u9fff]+', '',ss)#去除非中英文字符
    result = ss.replace("\r\n", "").strip()  # 删除多余空行与空格
    cutResult = jieba.cut(result)  # 默认方式分词，分词结果用空格隔开
    words=" ".join(cutResult)
    bunch = Bunch(target_name=[], label=[], filenames=[], contents=[])
    #bunch.target_name.extend(catelist)  # 将类别保存到Bunch对象中
    bunch.filenames.append(title)
    bunch.contents.append(words.strip()) 
    stopWords=getStopWord(stopWordsFile)#停用词
    
    # 构建测试集TF-IDF向量空间
    testSet = Bunch(target_name=bunch.target_name, label=bunch.label, filenames=bunch.filenames, tdm=[],
                      vocabulary={})

    # 导入训练集的词袋
    trainSet = readBunch(tf_set)
    # 使用TfidfVectorizer初始化向量空间模型  使用训练集词袋向量
    vectorizer = TfidfVectorizer(stop_words=stopWords, sublinear_tf=True, max_df=0.5,
                                 vocabulary= trainSet.vocabulary)
    testSet.tdm = vectorizer.fit_transform(bunch.contents)
    testSet.vocabulary =  trainSet.vocabulary
    
    clf = MultinomialNB(alpha=0.001).fit(trainSet.tdm, trainSet.label)
    #alpha:0.001 alpha 越小，迭代次数越多，精度越高
    #print(shape(trainSet.tdm))  #输出单词矩阵的类型
    #print(shape(testSet.tdm))
    predicted = clf.predict(testSet.tdm)
   
    print(predicted[0])
    


def getStopWord(inputFile):
    stopWordList = readFile(inputFile).splitlines()
    return stopWordList    
def readBunch(path):
    with open(path, 'rb') as file:
        bunch = pickle.load(file)
        #pickle.load(file)
        #函数的功能：将file中的对象序列化读出。
    return bunch
def readFile(file):
    with open(file ,'r',errors='ignore',encoding='utf-8') as f:
        content=f.read()
        content=re.sub('[^\w\u4e00-\u9fff]+', '',content)#去除非中英文字符
        return content
    f.close()

if __name__ == '__main__':
    ss= sys.argv[1]
    predict(ss,ss)
