from pyhanlp import SafeJClass

import zipfile
import os
from pyhanlp.static import download, remove_file, HANLP_DATA_PATH


def test_data_path():
    """
    获取测试数据路径，位于$root/data/test，根目录由配置文件指定。
    :return:
    """
    data_path = os.path.join(HANLP_DATA_PATH, 'test')
    if not os.path.isdir(data_path):
        os.mkdir(data_path)
    return data_path


## 验证是否存在 MSR语料库，如果没有自动下载
def ensure_data(data_name, data_url):
    root_path = test_data_path()
    dest_path = os.path.join(root_path, data_name)
    if os.path.exists(dest_path):
        return dest_path

    if data_url.endswith('.zip'):
        dest_path += '.zip'
    download(data_url, dest_path)
    if data_url.endswith('.zip'):
        with zipfile.ZipFile(dest_path, "r") as archive:
            archive.extractall(root_path)
        remove_file(dest_path)
        dest_path = dest_path[:-len('.zip')]
    return dest_path


sogou_corpus_path = ensure_data('搜狗文本分类语料库迷你版',
                                'http://file.hankcs.com/corpus/sogou-text-classification-corpus-mini.zip')

## ===============================================
## 以下开始朴素贝叶斯分类


NaiveBayesClassifier = SafeJClass('com.hankcs.hanlp.classification.classifiers.NaiveBayesClassifier')
IOUtil = SafeJClass('com.hankcs.hanlp.corpus.io.IOUtil')


def train_or_load_classifier():
    model_path = sogou_corpus_path + '.ser'
    if os.path.isfile(model_path):
        return NaiveBayesClassifier(IOUtil.readObjectFrom(model_path))
    classifier = NaiveBayesClassifier()  # 朴素贝叶斯分类器
    classifier.train(sogou_corpus_path)
    model = classifier.getModel()
    IOUtil.saveObjectTo(model, model_path)
    return NaiveBayesClassifier(model)


# 读取数据
def loadArticle(fileName):
    '''
	读取原始自用数据集的测试文章
	:param fileName: 文件名
	:return: 处理之后的文章
	'''
    # 我们需要将其空格去掉
    with open(fileName, encoding='utf-8') as file:
        # 按行读取
        test_article = []
        for line in file.readlines():
            # 去除空格，以及换行符
            line = line.replace("<content>", "")
            line = line.replace("</content>", "")
            line = line.replace(" ", "")
            line = line.replace("　", "")
            line = line.strip()
            test_article.append(line)
    return test_article


def predict(classifier, text):
    print("text", text, "label", classifier.classify(text))
    result_dict[classifier.classify(text)].append(text)
    # 如需获取离散型随机变量的分布，请使用predict接口
    # print("《%16s》\t属于分类\t【%s】" % (text, classifier.predict(text)))


if __name__ == '__main__':
    classifier = train_or_load_classifier()
    test_article = loadArticle("mytest.txt")
    result_dict = {'体育': [], '健康': [], '军事': [], '教育': [], '汽车': []}
    for i in range(len(test_article)):
        predict(classifier, test_article[i])
    # 将不同类的文本保存到不同的 txt 文件中
    output_dir = 'bayes_output'
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    for class_name, texts in result_dict.items():
        output_file = os.path.join(output_dir, '{}.txt'.format(class_name))
        with open(output_file, 'w', encoding='utf-8') as f:
            for text in texts:
                f.write(text + '\n')
