import pickle
import numpy as np
from sklearn import metrics

import os
import random
import shutil

# 保存至文件
def savefile(savepath, content):
    with open(savepath, "wb") as fp:
        fp.write(content)

# 读取文件
def readfile(path):
    with open(path, "rb") as fp:
        content = fp.read()
    return content

def writebunchobj(path, bunchobj):
    with open(path, "wb") as file_obj:
        pickle.dump(bunchobj, file_obj)

# 读取bunch对象
def readbunchobj(path):
    with open(path, "rb") as file_obj:
        bunch = pickle.load(file_obj)
    return bunch

# 计算错误率、分类精度
def metrics_result(actual, predict, total):
    #print("Enter metrics_result!!!")
    error_count = np.sum(predict != actual)
    error_rate = float(error_count) * 100 / float(total)
    print("模型错误率:", error_rate, "%")

    print('精度:{0:.3f}'.format(metrics.precision_score(actual, predict, average='weighted')))
    print('召回:{0:.3f}'.format(metrics.recall_score(actual, predict, average='weighted')))
    print('f1-score:{0:.3f}'.format(metrics.f1_score(actual, predict, average='weighted')))


def print_dir_len(path):

    cate_list = os.listdir(path)  # 获取未分词语料库中所有类别

    for mydir in cate_list:
        class_path = path+mydir+'/' # 生成当前类别的目录 如 data/体育/
        file_list = os.listdir(class_path) # 生成分类下文本名称列表
        length = len(file_list)
        print(mydir,length)


if __name__ == '__main__':

    # path = '../data/'
    # path1 = './train_corpus/'
    # path2 = './test_corpus/'
    # path1 = '../A7_txt_data/'

    # print('——————分隔线——————')
    # print_dir_len(path2)
    #
    # print('\n——————分隔线——————\n')
    #
    # path = '../A7_txt_data/'
    # print("A7_txt_data len: ")
    # print_dir_len(path)
    #
    # print('\n——————分隔线——————\n')
    #
    # path = '../THUCNews/'
    # print("THUCNews len: ")
    # print_dir_len(path)
    #
    # print('\n——————分隔线——————\n')
    #
    # path = '../toutiao_dataset/'
    # print("toutiao_dataset len: ")
    # print_dir_len(path)
    #
    # print('\n——————分隔线——————\n')
    #
    # path = '../My_CNews_DataSet/'
    # print("My_CNews_DataSet len: ")
    # print_dir_len(path)

    print('\n——————分隔线——————\n')

    path = './train_corpus/'
    print("train_corpus: ")
    print_dir_len(path)

    print('\n——————分隔线——————\n')
    path = './test_corpus/'
    print("test_corpus: ")
    print_dir_len(path)

    # path2 = '../THUCNews/'
    # print("THUCNews len: ")
    # print_dir_len(path2)

# 假设有一个停用词列表文件stopwords.txt，其中每行包含一个停用词
def load_stopwords(stopwords_file_path):
    with open(stopwords_file_path, 'r', encoding='utf-8') as f:
        stopwords = set([line.strip() for line in f])
    return stopwords