from split_data import *
from make_train_test import *
from pre_process import *
from tfidf import *
from predict import *
def make_split():
    '''
    总体处理文件，进行分类，执行一遍就好
    '''
    path = 'toutiao_cat_data.txt'
    split_txt(path)

def make_tt():
    '''
    生成训练集、测试集，可以多次运行生成不同样本
    '''
    split_path = 'split_data'
    make_train_test(split_path)   ###训练集和测试集一起改变
    
    make_train(split_path)  #####一般不改变训练集
    make_test()

def makebunch():
    wordbag_path_train = 'bunch_obj/train_set.dat' ##bunch train存储路径
    wordbag_path_test = 'bunch_obj/test_set.dat'   ##bunch test存储路径
    txt_path_train = 'train.txt'                    
    txt_path_test = 'test.txt'
    '''对训练集进行bunch化操作'''
    make_Bunch(wordbag_path_train,txt_path_train)
    '''对测试集进行bunch化操作'''
    make_Bunch(wordbag_path_test,txt_path_test)
    
def make_vectorspace():
    stopword_path = 'stop_words.txt'       ###停词路径
    bunch_path_train = 'bunch_obj/train_set.dat'  ##bunch train存储路径
    bunch_path_test = 'bunch_obj/test_set.dat'
    space_path_train = 'tfidf_bunch/train_tfidfspace.dat' ##bunch space存储路径
    space_path_test = 'tfidf_bunch/test_tfidfspace.dat'
    vector_space(stopword_path, bunch_path_train, space_path_train)
    train_tfidf_path = 'tfidf_bunch/train_tfidfspace.dat'
    vector_space(stopword_path, bunch_path_test, space_path_test, train_tfidf_path)  


def main():
    #make_split()   #先从整体中分类
    #make_tt()      #在选出训练集、测试集（可以变化）
    #makebunch()      #进行预处理，然后bunch化存储
    #make_vectorspace()  #生成特征向量空间
    result_show()
    
if __name__ == "__main__":
    main()

