# -*- coding: utf-8 -*-
import csv
import inspect
import logging
import multiprocessing
import os
import re
import string
from copy import deepcopy
from functools import wraps
from traceback import format_exc

from zhon import hanzi
import func_timeout
import jionlp as jio
import langid
import pandas as pd
import pdfplumber
import psutil
import jsonlines
from ftlangdetect import detect as ft_detect
from func_timeout import func_set_timeout
from harvesttext import HarvestText
from jionlp import clean_text
from jionlp import remove_id_card
from jionlp import remove_ip_address
from jionlp import remove_qq
from jionlp import tra2sim
from langdetect import detect
from logzero import logger
from logzero import loglevel
from loguru import logger
from nltk.tokenize import sent_tokenize
from orderedset import OrderedSet

# from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters
# punkt_param = PunktParameters()
# abbreviation = ['i.e']
# punkt_param.abbrev_types = set(abbreviation)
# tokenizer = PunktSentenceTokenizer(punkt_param)


jio.logging = jio.set_logger(level="WARN", log_dir_name=None)

import pkg_resources
from symspellpy.symspellpy import SymSpell

CORES = min(psutil.cpu_count() - 8, 96)

tasks = [
    {
        'func': 'wanjuan_Exam',
        'path': 'Exam-cn',
        'sample': {
            # 'n': 10,
            'frac': 0.1,
        },
    },
    # # 中文数据集
    # {
    #     'func': 'wanjuan',
    #     'path': 'chinese-corpus/WuDaoCorpus2.0_base_200G',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # 中英对照
    # {
    #     'func': 'en_zh_corpus',
    #     'path': '',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.1,
    #     },
    # },
    # {
    #     'func': 'en_zh_corpus',
    #     'path': 'en_zh-corpus/cwmt',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.1,
    #     },
    # },
    # {
    #     'func': 'en_zh_corpus',
    #     'path': 'en_zh-corpus/news-commentry',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.1,
    #     },
    # },
    # {
    #     'func': 'en_zh_corpus',
    #     'path': 'en_zh-corpus/others',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.1,
    #     },
    # },
    # {
    #     'func': 'en_zh_corpus',
    #     'path': 'en_zh-corpus/ParaCrawl_v9',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.1,
    #     },
    # },
    # {
    #     'func': 'en_zh_corpus',
    #     'path': 'en_zh-corpus/ted',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.1,
    #     },
    # },
    # {
    #     'func': 'en_zh_corpus',
    #     'path': 'en_zh-corpus/UNCorpus_en_zh',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.1,
    #     },
    # },
    # {
    #     'func': 'en_zh_corpus',
    #     'path': 'en_zh-corpus/OPUS',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.1,
    #     },
    # },
    # {
    #     'func': 'en_zh_corpus',
    #     'path': 'en_zh-corpus/lanjun_en_zh',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.1,
    #     },
    # },
    # # 英文数据集
    # {
    #     'func': 'redpajama',
    #     'path': '',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # {
    #     'func': 'redpajama',
    #     'path': 'english-corpus/c4',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # {
    #     'func': 'redpajama',
    #     'path': 'english-corpus/common_crawl/2023-06',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # {
    #     'func': 'redpajama',
    #     'path': 'english-corpus/github',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # {
    #     'func': 'redpajama',
    #     'path': 'english-corpus/stackexchange',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # {
    #     'func': 'redpajama',
    #     'path': 'english-corpus/wikipedia',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # {
    #     'func': 'redpajama',
    #     'path': 'english-corpus/book',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # # TODO 蓝军，直接用，jsonline
    # {
    #     'func': 'qingbao',
    #     'path': '02-lanjun/corpus-data/pre-data/bl-0602-jsonl',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # # TODO 中文书籍，文件重复，按行清洗
    # {
    #     'func': 'qingbao_zh_common',
    #     'path': '',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # # TODO 中文书籍，文件重复，按行清洗
    # {
    #     'func': 'qingbao_zh_common',
    #     'path': '01-qingbao/wiki/release_v3.0/cn/books',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # # TODO 中文，免责声明，按行清洗
    # {
    #     'func': 'qingbao_zh_common',
    #     'path': '01-qingbao/wiki/release_v3.0/cn/others/classified_protection',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # TODO 中文,基本清洗，广告
    # {
    #     'func': 'qingbao_zh_common',
    #     'path': '',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # # TODO 中文，人物介绍
    # {
    #     'func': 'qingbao_zh_common',
    #     'path': '01-qingbao/wiki/release_v3.0/cn/others/wiki',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # # TODO 中文，无效内容多（0.5k以下的不要，中文不超过xxx的不要）
    # {
    #     'func': 'qingbao_zh_common',
    #     'path': '',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # # TODO 中文，公司白皮书，处理不要太激进
    # {
    #     'func': 'qingbao_zh_sangfor',
    #     'path': '01-qingbao/wiki/release_v3.0/cn/others/sangfor',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # # TODO 英文,基本清洗-有连续词-有语言叠加
    # {
    #     'func': 'qingbao_en_common',
    #     'path': '',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # # TODO 英文,基本清洗-有连续词-无语种叠加
    # {
    #     'func': 'qingbao_en_common',
    #     'path': '01-qingbao/wiki/release_v3.0/en/papers',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # # TODO 英文,基本清洗 txt（1k以下的不要）-有连续词-无语种叠加
    # {
    #     'func': 'qingbao_en_common',
    #     'path': '01-qingbao/wiki/release_v3.0/en/websites',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    #
    # # TODO 英文,基本清洗-无连续词-无语种叠加
    # {
    #     'func': 'qingbao_en_common',
    #     'path': '01-qingbao/wiki/new_data_0627/d3_paperAbstract',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # # TODO 英文,基本清洗-无连续词-无语种叠加-有乱码则处理连续词
    # {
    #     'func': 'qingbao_en_common',
    #     'path': '01-qingbao/wiki/new_data_0627/train_sentences',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # # TODO 英文,基本清洗 jsonline-无连续词-无语种叠加
    # {
    #     'func': 'qingbao_en_jsonl',
    #     'path': '01-qingbao/wiki/release_v3.0/en/others/wiki/en_wiki_sec',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    #
    # # TODO 新情报
    # {
    #     'func': 'qingbao_en_jsonl',
    #     'path': '01-qingbao/wiki/en/books',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # {
    #     'func': 'qingbao_en_jsonl',
    #     'path': '01-qingbao/wiki/en/others',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # {
    #     'func': 'qingbao_en_jsonl',
    #     'path': '01-qingbao/wiki/en/papers',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # {
    #     'func': 'qingbao_en_jsonl',
    #     'path': '01-qingbao/wiki/en/websites',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # # TODO 情报英文-新安全语料
    # {
    #     'func': 'qingbao_en_jsonl',
    #     'path': '01-qingbao/new_data/en',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # # TODO 情报中文-新安全语料
    # {
    #     'func': 'qingbao_zh_jsonl',
    #     'path': '01-qingbao/new_data/zh',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
    # TODO 情报中文-新安全语料
    # {
    #     'func': 'qingbao_zh_txt',
    #     'path': '',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 1,
    #     },
    # },
    # # TODO MSS pdf
    # {
    #     'func': 'mss_pdf_zh',
    #     'path': '07-mss/zh',
    #     'sample': {
    #         # 'n': 10,
    #         'frac': 0.005,
    #     },
    # },
]


def exception_decorator(exception_tuple=(OSError,), exp_return=None):
    """
    异常处理装饰器
    :param exception_tuple: 捕获的异常类
    :param exp_return: 异常捕获后的默认返回值
    :return:
    """

    def decorator(func):
        """
        异常处理装饰器
        :param func: 装饰器作用函数
        :return:
        """

        @wraps(func)
        def catch_exception(*args, **kwargs):
            """
            捕获异常
            :param args: 传参
            :param kwargs: 传参
            :return:
            """
            try:
                return func(*args, **kwargs)
            except exception_tuple as exp:
                # traceback打印
                logger.error("error:{}, function:{}, args:{}, kwargs:{}, traceback:{}".format(exp, func.__name__, args,
                                                                                              kwargs, format_exc()))
                # 捕获异常返回值, 避免空间污染, 这里使用深拷贝
                return deepcopy(exp_return)

        return catch_exception

    return decorator


class CleanData:

    @classmethod
    def jionlp_clean(cls, content, stop_words=("转发微博",)):
        # 基础的的清洗
        # 1.去除html
        # 2.全角变半角
        # 3.去除异常字符
        # 4.去除多余字段
        # 5.去除括号后的内容
        # 6.去除url
        # 7.去除email
        # 8.去除电话号码
        # 9.去除电话号码和邮箱的前缀
        # 10.去除ip地址
        # 11.去除身份证号
        # 12.去除qq
        # 13.繁体字转简体
        # 14.微博话题
        # 15.emoji
        clean_content = clean_text(
            str(content),
            remove_html_tag=True,
            convert_full2half=True,
            remove_exception_char=True,
            remove_redundant_char=True,
            remove_parentheses=True,
            remove_url=True,
            remove_email=True,
            remove_phone_number=True,
            delete_prefix=True,
        )
        clean_content = remove_ip_address(clean_content)
        clean_content = remove_id_card(clean_content)
        clean_content = remove_qq(clean_content)
        clean_content = tra2sim(clean_content)

        ht = HarvestText()
        clean_content = ht.clean_text(
            str(clean_content),
            remove_url=True,
            email=True,
            weibo_at=True,
            stop_terms=stop_words,
            emoji=True,
            weibo_topic=True,
            deduplicate_space=True,
        )
        return clean_content

    @classmethod
    def jionlp_clean_zh(cls, content, ht, stop_words=("转发微博",)):
        # 基础的的清洗
        # 1.去除html
        # 2.全角变半角
        # 3.去除异常字符
        # 4.去除多余字段
        # 5.去除括号后的内容
        # 6.去除url
        # 7.去除email
        # 8.去除电话号码
        # 9.去除电话号码和邮箱的前缀
        # 10.去除ip地址
        # 11.去除身份证号
        # 12.去除qq
        # 13.繁体字转简体
        # 14.微博话题
        # 15.emoji
        if not content:
            return ''
        clean_content = clean_text(
            str(content),
            remove_html_tag=True,
            convert_full2half=True,
            remove_exception_char=True,
            remove_redundant_char=True,
            remove_url=True,
            remove_email=True,
            remove_phone_number=True,
            delete_prefix=True,
            redundant_chars=' -\t\n啊哈呀~\u3000\xa0•·・,',
        )
        clean_content = remove_ip_address(clean_content)
        clean_content = remove_id_card(clean_content)
        clean_content = remove_qq(clean_content)
        clean_content = tra2sim(clean_content)

        clean_content = ht.clean_text(
            str(clean_content),
            remove_url=True,
            email=True,
            weibo_at=True,
            emoji=True,
            weibo_topic=True,
            deduplicate_space=True
        )

        return clean_content

    @classmethod
    def jionlp_clean_en(cls, content, ht, stop_words=("转发微博",)):
        # 基础的的清洗
        # 1.去除html
        # 2.全角变半角
        # 3.去除异常字符
        # 4.去除多余字段
        # 5.去除括号后的内容
        # 6.去除url
        # 7.去除email
        # 8.去除电话号码
        # 9.去除电话号码和邮箱的前缀
        # 10.去除ip地址
        # 11.去除身份证号
        # 12.去除qq
        # 13.繁体字转简体
        # 14.微博话题
        # 15.emoji
        if not content:
            return ''
        clean_content = clean_text(
            str(content),
            remove_html_tag=True,
            convert_full2half=True,
            remove_exception_char=True,
            remove_redundant_char=True,
            remove_url=True,
            remove_email=True,
            remove_phone_number=True,
            delete_prefix=True,
            redundant_chars=' -\t\n啊哈呀~\u3000\xa0•·・,',
        )
        clean_content = remove_ip_address(clean_content)
        clean_content = remove_id_card(clean_content)
        clean_content = remove_qq(clean_content)

        clean_content = ht.clean_text(
            str(clean_content),
            remove_url=True,
            email=True,
            weibo_at=True,
            emoji=True,
            weibo_topic=True,
            deduplicate_space=True,
        )

        return clean_content

    @classmethod
    def jionlp_clean_mss(cls, content, ht, stop_words=("转发微博",)):
        # 基础的的清洗
        # 1.去除html
        # 2.全角变半角
        # 3.去除异常字符
        # 4.去除多余字段
        # 5.去除括号后的内容
        # 6.去除url
        # 7.去除email
        # 8.去除电话号码
        # 9.去除电话号码和邮箱的前缀
        # 10.去除ip地址
        # 11.去除身份证号
        # 12.去除qq
        # 13.繁体字转简体
        # 14.微博话题
        # 15.emoji
        if not content:
            return ''
        clean_content = clean_text(
            str(content),
            remove_html_tag=False,
            convert_full2half=True,
            remove_exception_char=True,
            remove_redundant_char=True,
            remove_url=False,
            remove_email=True,
            remove_phone_number=True,
            delete_prefix=True,
        )

        clean_content = ht.clean_text(
            str(clean_content),
            remove_url=False,
            email=True,
            weibo_at=True,
            emoji=True,
            weibo_topic=True,
            deduplicate_space=True,
            remove_tags=False,
            stop_terms=stop_words,
        )

        return clean_content

    @classmethod
    @exception_decorator(exception_tuple=(Exception,), exp_return='')
    def ruler(cls, content):
        if not content:
            return ''
        # punctuation_ratio = len(re.findall(r'[^\w\s]+', content)) / len(content)
        # TODO 非中文比例
        zh_len = len(''.join(re.findall(r'[^\u4e00-\u9fa5]+', content)))
        content_len = len(content)
        punctuation_ratio = zh_len / content_len
        # TODO 非中文字符占比超50% 或 中文数小于5
        if punctuation_ratio > 0.5 or (content_len - zh_len) <= 5:
            # logger.error('{}, {}, {}, {}'.format(content, content_len - zh_len, content_len, punctuation_ratio))
            return ''
        return content

    @classmethod
    @exception_decorator(exception_tuple=(Exception,), exp_return='')
    def ruler_en_punctuation(cls, content):
        if not content:
            return ''
        # TODO 英文字母比例
        # en_len = len(''.join(re.findall(r'[a-zA-Z]+', content)))
        en_len = len(content.translate(str.maketrans('', '', string.punctuation)))
        punctuation_ratio = en_len / len(content)
        # TODO 英文字母占比低于50%
        if punctuation_ratio < 0.5:
            # logger.error('{}, {}'.format(content, punctuation_ratio))
            return ''
        return content

    @classmethod
    @exception_decorator(exception_tuple=(Exception,), exp_return='')
    def ruler_zh_punctuation(cls, content):
        if not content:
            return ''
        # TODO 非字符比例
        zh_len = len(content) - len(re.findall("[{}\s]".format(hanzi.punctuation), content))
        ratio = zh_len / len(content)
        # TODO 中文字母占比低于50%
        if ratio < 0.5:
            # logger.error('{}, {}'.format(content, ratio))
            return ''
        return content

    @classmethod
    @exception_decorator(exception_tuple=(Exception,), exp_return='')
    def ruler_number_punctuation(cls, content):
        if not content:
            return ''
        # TODO 数字比例
        number_len = len(re.findall("[\d\s\-]", content))
        ratio = number_len / len(content)
        # TODO 数字比例大于50
        if (ratio >= 0.5 and len(content) <= 200) or \
                (ratio >= 0.3 and len(content) <= 10) or ratio >= 0.75:
            # logger.error('{}, {}'.format(content, ratio))
            return ''
        return content

    @classmethod
    @exception_decorator(exception_tuple=(Exception,), exp_return='')
    def ruler_no_zh_en_punctuation(cls, content):
        if not content:
            return ''
        # TODO 非字符、非中英文比例
        no_en_zh_len = len(content) - \
                       len(re.findall("[{}]".format(hanzi.punctuation), content)) - \
                       len(re.findall(r'[\u4e00-\u9fa5a-zA-Z0-9]', content))
        ratio = no_en_zh_len / len(content)
        # TODO 非字符、非中英文比例大于50%
        if ratio >= 0.5:
            # logger.error('{}, {}'.format(content, ratio))
            return ''
        return content

    @classmethod
    @exception_decorator(exception_tuple=(Exception,), exp_return='')
    def en_lang_detect(cls, content, lang, score, filename):
        if not content:
            return content
        # TODO 英文score小于0.65
        # fasttext
        result = ft_detect(text=str(content).replace('\n', '.'), low_memory=False)
        if result['lang'] != lang or (result['lang'] == lang and result['score'] <= score):
            return ''
        if 'en/books' in filename:
            r_lang = detect(str(content))
            if r_lang != lang and result['lang'] == lang and result['score'] <= 0.9:
                return ''
        return content

    @classmethod
    @exception_decorator(exception_tuple=(Exception,), exp_return='')
    def en_zh_lang_detect(cls, content):
        if not content:
            return content
        # TODO 英文score小于0.65
        # fasttext
        result = ft_detect(text=str(content).replace('\n', '.'), low_memory=False)
        if result['lang'] not in ['en', 'zh']:
            return ''
        if (result['lang'] == 'zh' and result['score'] <= 0.65) or \
                (result['lang'] == 'en' and result['score'] <= 0.65):
            return ''
        if result['lang'] == 'en':
            r_lang = detect(str(content))
            if r_lang != 'en' and result['lang'] == 'en' and result['score'] <= 0.9:
                return ''
        return content

    @classmethod
    @exception_decorator(exception_tuple=(Exception,), exp_return=False)
    def contain_sensitive(cls, utter, sensitive_words, max_length=110):
        '''
        判断一句话中是否包含敏感词
        :param utter: 每一行的数据
        :param sensitive_words: 敏感词列表
        :param max_length: 敏感词最长的长度
        :return: bool，包含为True，不包含为False
        '''
        all_gram = {
            utter[i: j + 1]
            for i in range(len(utter))
            for j in range(i, min(len(utter), i + max_length))
        }

        overlap = sensitive_words & all_gram
        if len(overlap) > 0:
            return True
        return False

    @classmethod
    @exception_decorator(exception_tuple=(Exception,), exp_return='')
    def run(cls, content):
        clean_content = cls.jionlp_clean(content)
        clean_content = cls.ruler(clean_content)
        return clean_content

    @staticmethod
    @exception_decorator(exception_tuple=(Exception,), exp_return=True)
    def get_sensitive_words(filenames):
        '''
        获取敏感词
        :param dirname: 存放敏感词的文件的目录
        :return:
        '''
        sensitive_words = set()
        for filename in filenames:
            with open(filename, 'r', encoding='UTF-8', errors='ignore') as f:
                data = [item.strip() for item in f.readlines() if len(item.strip()) > 0]
                sensitive_words = sensitive_words | set(data)
        return sensitive_words

    @classmethod
    @exception_decorator(exception_tuple=(Exception,), exp_return='')
    def en_split(cls, sym_spell, content):
        if not content:
            return content
        result = sym_spell.word_segmentation(str(content))
        return result.corrected_string

    @classmethod
    @exception_decorator(exception_tuple=(Exception,), exp_return=True)
    def is_single_english_word(cls, line):
        # 匹配只包含英文字母的单词
        pattern = r'^[a-zA-Z.]+$'
        # 判断是否只有一个单词
        if len(line.split()) != 1:
            return False
        # 判断是否全是英文单词
        if re.match(pattern, line):
            return True
        return False

    @classmethod
    @exception_decorator(exception_tuple=(Exception,), exp_return=True)
    def is_single_english_word_and_number(cls, line):
        # 匹配只包含英文字母的单词
        pattern = r'^[a-zA-Z0-9.:\-…・]+$'
        # 判断是否只有一个单词
        if len(line.split()) <= 2:
            return False
        # 判断是否全是英文单词
        if re.match(pattern, line):
            return True
        return False


class Sample:

    @staticmethod
    @exception_decorator(exception_tuple=(Exception, func_timeout.FunctionTimedOut,))
    @func_set_timeout(600)
    def wudao(filename, sample, dest_file):
        df = pd.read_json(filename, lines=False)
        if len(df) > 0 and 'content' in df.columns.tolist():
            df = df.sample(**sample, ignore_index=True)
            df = df.dropna()
            df[['content']].to_csv(dest_file, index=False, header=False)
        logger.info('{} {} {} done'.format(filename, sample, dest_file))

    @staticmethod
    @exception_decorator(exception_tuple=(Exception, func_timeout.FunctionTimedOut,))
    def wanjuan_Exam(filename, sample, dest_file):
        # 预编译正则表达式模式并使用有意义的变量名
        time_pattern = re.compile(r'^[\[［][\d:\s]+[\]］]\s*.+\n', flags=re.MULTILINE)
        title_pattern = re.compile(r'^[=#]+[a-zA-Z0-9\-\s,.!:]+[=#\s]*\n', flags=re.MULTILINE)
        chapter_pattern = re.compile(r'^(IX|IV|V?I|[\d.]|第\s*[\d.]+\s*章)+\s+.+\n', flags=re.MULTILINE)
        image_table_pattern = re.compile(r'^[图表]\s*(IX|IV|V?I|[\d.A-Z])+.+\n', flags=re.IGNORECASE | re.MULTILINE)
        copyright_pattern = re.compile(r'^[\d\s]+版权所有', flags=re.MULTILINE)
        disclaimer_pattern = re.compile(r'免责声明：.*(相应责任。|相应责\n任。)[\s\n]*', flags=re.DOTALL)
        copyright_statement_pattern = re.compile(r'版权声明：.*', flags=re.DOTALL)
        numeric_statement_pattern = re.compile(r'\n\d+\s*声明.*', flags=re.DOTALL)
        copyright_notice_pattern = re.compile(r'Copyright Notice.*', flags=re.DOTALL)
        book_copyright_pattern = re.compile(r'本书版权.*', flags=re.DOTALL)
        html_elements_pattern = re.compile(r'(&amp;nbsp|&lt;|&gt;)')
        # print(filename)
        if not filename.endswith('jsonl'):
            return
        reader = pd.read_json(filename, lines=True, chunksize=100000)
        for dfs in reader:
            dfs = dfs.dropna()
            check = dfs.notnull().all(axis=1)
            fields = ["id","q_type","q_main","option_a","option_b","option_c","option_d","option_e","std_ans","answer","answer_detail","grade","major","keypoint"]
            if len(dfs) > 0 and fields == dfs.columns.tolist():
                dfs = dfs.sample(**sample, ignore_index=True)
                dfs = dfs.dropna()
                contents = []
                for i in range(len(dfs)):
                    try:
                        values = ''.join(str({key:value for key,value in dfs.loc[i].items() if (key != "id" and value != "" and key )}))
                        values = values.replace("{", "").replace("}", "").replace("\'", "")
                        contents.append(values)
                    except KeyError as e:
                        print(f"KeyError: {e} does not exist in the DataFrame.")
                final_lines = []
                clean = CleanData()
                sensitive_words = clean.get_sensitive_words(
                    [
                        './sensitive_words/色情类.txt',
                        './sensitive_words/中文广告.txt',
                        './sensitive_words/中文黑名单.txt',
                    ]
                )
                ht = HarvestText()
                for content in contents:
                    clean_content = content + '\n'
                    clean_content = time_pattern.sub('', clean_content)
                    # TODO 中文标题（数字和罗马数字、标题==、##）
                    clean_content = title_pattern.sub('', clean_content)
                    clean_content = chapter_pattern.sub('', clean_content)
                    # TODO 图表（数字和罗马数字）
                    clean_content = image_table_pattern.sub('', clean_content)
                    # TODO 版权声明
                    clean_content = copyright_pattern.sub('', clean_content)
                    clean_content = disclaimer_pattern.sub('', clean_content)
                    clean_content = copyright_statement_pattern.sub('', clean_content)
                    clean_content = numeric_statement_pattern.sub('', clean_content)
                    clean_content = copyright_notice_pattern.sub('', clean_content)
                    clean_content = book_copyright_pattern.sub('', clean_content)
                    # TODO html元素
                    clean_content = html_elements_pattern.sub('', clean_content)
                    lines = []
                    for line in clean_content.split('\n'):
                        # TODO 一行字符少于60的不要
                        if len(line) < 60:
                            continue
                        # TODO 敏感词
                        if clean.contain_sensitive(line, sensitive_words, max_length=100):
                            continue
                        # TODO 基础清洗
                        zh = clean.jionlp_clean_zh(line, ht)
                        # TODO 标点符号过多
                        zh = clean.ruler_zh_punctuation(zh)
                        # TODO 纯数字过多
                        zh = clean.ruler_number_punctuation(zh)
                        # TODO 非中英文字符过多
                        zh = clean.ruler_no_zh_en_punctuation(zh)
                        if len(zh.strip()) == 0:
                            continue
                        # TODO 纯数字/单词
                        if clean.is_single_english_word_and_number(zh):
                            continue
                            # if len(zh.strip()) > 5:
                            #     # TODO 语种检测
                            #     zh = clean.en_zh_lang_detect(zh)
                            # if len(zh.strip()) <= 5:
                            # continue
                        lines.append(zh.strip())
                    final_lines += list(OrderedSet(lines))
                # TODO UTF-8兜底
                if not final_lines:
                    return
                with open(dest_file, 'a', encoding='UTF-8', errors='ignore') as w_fd:
                    w_fd.write('\n'.join(list(OrderedSet(final_lines))))
                logger.info('{} {} {} done'.format(filename, sample, dest_file))

    @staticmethod
    @exception_decorator(exception_tuple=(Exception,))
    def en_zh_corpus(filename, sample, dest_file):
        df = pd.read_csv(filename, sep='\t', usecols=[0, 1], header=None, encoding_errors='ignore')
        df = df.set_axis(['en', 'zh'], axis=1)
        if len(df) == 0:
            return
        # df = df.sample(**sample, ignore_index=True)
        before_len = len(df)
        # 重复行-传统去重
        df = df.drop_duplicates(['zh'], ignore_index=True)
        df = df.drop_duplicates(['en'], ignore_index=True)
        dest_tuples = []
        clean = CleanData()
        sensitive_words = clean.get_sensitive_words(
            [
                './sensitive_words/色情类.txt',
                './sensitive_words/涉枪涉爆违法信息关键词.txt',
                './sensitive_words/广告.txt',
                './sensitive_words/黑名单词库.txt',
            ]
        )
        stop_words = clean.get_sensitive_words(
            ['./sensitive_words/广告.txt', ]
        )

        for index, row in df.iterrows():

            # 语种检测-done
            en = clean.en_zh_lang_detect(row['en'], 'en', 0.65)
            if not en:
                continue

            # 文本清洗-done
            zh = clean.jionlp_clean(row['zh'], stop_words=stop_words)

            # TODO 规则过滤
            zh = clean.ruler(zh)
            if not zh:
                continue

            # TODO 敏感词（脏话和色情词库）
            if clean.contain_sensitive(zh, sensitive_words, max_length=100):
                continue

            # TODO 文本纠错（先不开）
            # from pycorrector.macbert.macbert_corrector import MacBertCorrector
            # bert_correct = MacBertCorrector()
            # correct_zh, detail = bert_correct.macbert_correct(zh)

            # TODO 反垃圾内容（广告、垃圾信息,如微博话题、知乎体）

            # TODO 低质过滤（句子通顺度）

            dest_tuples.append((row['en'], zh))

        dest_df = pd.DataFrame(dest_tuples, columns=['en', 'zh'])
        now_len = len(dest_df)
        dest_df.to_csv(dest_file, sep='\t', index=False, header=False)
        logger.info('{} {} {} done, before:{}, now:{}'.format(filename, sample, dest_file, before_len, now_len))

    @staticmethod
    @exception_decorator(exception_tuple=(Exception, func_timeout.FunctionTimedOut,))
    @func_set_timeout(600)
    def redpajama(filename, sample, dest_file):
        df = pd.read_json(filename, lines=True)
        if len(df) > 0 and 'text' in df.columns.tolist():
            df = df.sample(**sample, ignore_index=True)
            df = df.dropna()
            df[['text']].to_csv(dest_file, index=False, header=False)
        logger.info('{} {} {} done'.format(filename, sample, dest_file))

    @staticmethod
    @exception_decorator(exception_tuple=(Exception,))
    def qingbao_zh_sangfor(filename, sample, dest_file):
        with open(filename, 'r', encoding='UTF-8', errors='ignore') as fd:
            read_lines = fd.readlines()
        regexs = [
            r'\s*第\s*\d+\s*章.*?[\.;。；,，]',
            r'(.*图\s*\d+.*|.*表\s*\d+.*)',
            r'^#+.*',
        ]
        lines = []
        clean = CleanData()
        for line in read_lines:
            zh = clean.jionlp_clean(line)
            for regex in regexs:
                zh = re.sub(regex, '', zh)
            zh = clean.ruler(zh)
            if not zh:
                continue
            lines.append(zh)
        final_line = '\n'.join(lines)
        regexs = [
            r'[^\n]*参考文献.*',
        ]
        for regex in regexs:
            final_line = re.sub(regex, '', final_line)
        now_lines = [line.strip() for line in final_line.split('\n') if len(line.strip()) != 0]
        with open(dest_file, 'w', encoding='UTF-8', errors='ignore') as w_fd:
            w_fd.write('\n'.join(list(OrderedSet(now_lines))))
        logger.info(
            '{} {} {} done, before:{}, now:{}'.format(filename, sample, dest_file, len(read_lines), len(now_lines)))

    @staticmethod
    @exception_decorator(exception_tuple=(Exception,))
    def qingbao_zh_common(filename, sample, dest_file):
        # TODO 0.5k以下的不要
        if os.path.getsize(filename) < 500:
            return
        # TODO 只针对txt
        if not filename.endswith('.txt'):
            return
        lines = []
        clean = CleanData()
        sensitive_words = clean.get_sensitive_words(
            [
                './sensitive_words/色情类.txt',
                './sensitive_words/广告.txt',
                './sensitive_words/黑名单词库.txt',
            ]
        )
        stop_words = clean.get_sensitive_words(
            ['./sensitive_words/广告.txt', ]
        )
        with open(filename, 'r', encoding='UTF-8', errors='ignore') as r_fd:
            read_lines = re.sub(r'\n参考文献.*', '', r_fd.read(), flags=re.S).split('\n')
        regexs = [
            r'\s*第\s*\d+\s*章.*?[\.;。；,，]',
            r'(.*图\s*\d+.*|.*表\s*\d+.*)',
            r'^#+.*'
        ]
        for line in read_lines:
            if clean.contain_sensitive(line, sensitive_words, max_length=100):
                continue
            zh = clean.jionlp_clean(line, stop_words=stop_words)
            for regex in regexs:
                zh = re.sub(regex, '', zh)
            zh = clean.ruler(zh)
            if not zh:
                continue
            lines.append(zh.strip())
        with open(dest_file, 'w', encoding='UTF-8', errors='ignore') as w_fd:
            w_fd.write('\n'.join(list(OrderedSet(lines))))
        logger.info(
            '{} {} {} done, before:{}, now:{}'.format(filename, sample, dest_file, len(read_lines), len(lines)))

    @staticmethod
    @exception_decorator(exception_tuple=(Exception,))
    def qingbao_zh_jsonl(filename, sample, dest_file):
        if not filename.endswith('jsonl'):
            return
        df = pd.read_json(filename, lines=True)
        if len(df) == 0 or 'content' not in df.columns.tolist():
            return
        df = df.drop_duplicates(['content'], ignore_index=True)
        df = df.dropna()
        contents = df['content'].values.tolist()
        final_lines = []
        clean = CleanData()
        sensitive_words = clean.get_sensitive_words(
            [
                './sensitive_words/色情类.txt',
                './sensitive_words/中文广告.txt',
                './sensitive_words/中文黑名单.txt',
            ]
        )
        ht = HarvestText()

        for content in contents:
            # TODO 参考文献引用
            clean_content = re.sub(r'^[\[［][\d:\s]+[\]］]\s*.+\n', '', content + '\n', flags=re.MULTILINE)
            # TODO 中文标题（数字和罗马数字、标题==、##）
            clean_content = re.sub(
                r'^[=#]+[a-zA-Z0-9\-\s,.!:]+[=#\s]*\n', '', clean_content, flags=re.MULTILINE)
            clean_content = re.sub(
                r'^(IX|IV|V?I|[\d.]|第\s*[\d.]+\s*章)+\s+.+\n', '',
                clean_content, flags=re.MULTILINE)
            # TODO 图表（数字和罗马数字）
            clean_content = re.sub(
                r'^[图表]\s*(IX|IV|V?I|[\d.A-Z])+.+\n', '', clean_content, flags=re.IGNORECASE | re.MULTILINE)
            # TODO html元素
            clean_content = re.sub(r'(&amp;nbsp|&lt;|&gt;)', '', clean_content)
            lines = []
            for line in clean_content.split('\n'):
                # TODO 敏感词
                if clean.contain_sensitive(line, sensitive_words, max_length=100):
                    continue
                # TODO 基础清洗
                zh = clean.jionlp_clean_zh(line, ht)
                # TODO 标点符号过多
                zh = clean.ruler_zh_punctuation(zh)
                # TODO 纯数字过多
                zh = clean.ruler_number_punctuation(zh)
                # TODO 非中英文字符过多
                zh = clean.ruler_no_zh_en_punctuation(zh)
                if len(zh.strip()) == 0:
                    continue
                # TODO 纯数字/单词
                if clean.is_single_english_word_and_number(zh):
                    continue
                    # if len(zh.strip()) > 5:
                    #     # TODO 语种检测
                    #     zh = clean.en_zh_lang_detect(zh)
                    # if len(zh.strip()) <= 5:
                    continue
                lines.append(zh.strip())
            final_lines += list(OrderedSet(lines))
            # TODO UTF-8兜底
        if not final_lines:
            return
        with open(dest_file, 'w', encoding='UTF-8', errors='ignore') as w_fd:
            w_fd.write('\n'.join(list(OrderedSet(final_lines))))
        logger.info('{} {} {} done'.format(filename, sample, dest_file))

    @staticmethod
    @exception_decorator(exception_tuple=(Exception,))
    def qingbao_zh_txt(filename, sample, dest_file):
        if not filename.endswith('.txt'):
            return
        with open(filename, 'r', encoding='UTF-8', errors='ignore') as f:
            contents = f.readlines()

        if not contents:
            return

        final_lines = []
        clean = CleanData()
        sensitive_words = clean.get_sensitive_words(
            [
                './sensitive_words/色情类.txt',
                './sensitive_words/中文广告.txt',
                './sensitive_words/中文黑名单.txt',
            ]
        )

        ht = HarvestText()

        for content in contents:
            # TODO 参考文献引用
            clean_content = re.sub(r'^[\[［][\d:\s]+[\]］]\s*.+\n', '', content + '\n', flags=re.MULTILINE)
            # TODO 中文标题（数字和罗马数字、标题==、##）
            clean_content = re.sub(
                r'^[=#]+[a-zA-Z0-9\-\s,.!:]+[=#\s]*\n', '', clean_content, flags=re.MULTILINE)
            clean_content = re.sub(
                r'^(IX|IV|V?I|[\d.]|第\s*[\d.]+\s*章)+\s+.+\n', '',
                clean_content, flags=re.MULTILINE)
            # TODO 图表（数字和罗马数字）
            clean_content = re.sub(
                r'^[图表]\s*(IX|IV|V?I|[\d.A-Z])+.+\n', '', clean_content, flags=re.IGNORECASE | re.MULTILINE)
            # TODO 版权声明
            clean_content = re.sub(r'^[\d\s]+版权所有', '', clean_content, flags=re.MULTILINE)
            clean_content = re.sub(r'免责声明：.*(相应责任。|相应责\n任。)[\s\n]*', '', clean_content, flags=re.DOTALL)
            clean_content = re.sub(r'版权声明：.*', '', clean_content, flags=re.DOTALL)
            clean_content = re.sub(r'\n\d+\s*声明.*', '', clean_content, flags=re.DOTALL)
            clean_content = re.sub(r'Copyright Notice.*', '', clean_content, flags=re.DOTALL)
            clean_content = re.sub(r'本书版权.*', '', clean_content, flags=re.DOTALL)
            # TODO html元素
            clean_content = re.sub(r'(&amp;nbsp|&lt;|&gt;)', '', clean_content)
            lines = []
            for line in clean_content.split('\n'):
                # TODO 一行字符少于60的不要
                if len(line) < 60:
                    continue
                # TODO 敏感词
                if clean.contain_sensitive(line, sensitive_words, max_length=100):
                    continue
                # TODO 基础清洗
                zh = clean.jionlp_clean_zh(line, ht)
                # TODO 标点符号过多
                zh = clean.ruler_zh_punctuation(zh)
                # TODO 纯数字过多
                zh = clean.ruler_number_punctuation(zh)
                # TODO 非中英文字符过多
                zh = clean.ruler_no_zh_en_punctuation(zh)
                if len(zh.strip()) == 0:
                    continue
                # TODO 纯数字/单词
                if clean.is_single_english_word_and_number(zh):
                    continue
                # if len(zh.strip()) > 5:
                #     # TODO 语种检测
                #     zh = clean.en_zh_lang_detect(zh)
                if len(zh.strip()) <= 5:
                    continue
                lines.append(zh.strip())
            final_lines += list(OrderedSet(lines))
        # TODO UTF-8兜底
        if not final_lines:
            return

        with open(dest_file, 'w', encoding='UTF-8', errors='ignore') as w_fd:
            w_fd.write('\n'.join(list(OrderedSet(final_lines))))

        logger.info(
            '{} {} {} done, before:{}, now:{}'.format(filename, sample, dest_file, len(contents), len(final_lines)))

    @staticmethod
    @exception_decorator(exception_tuple=(Exception,))
    def qingbao_en_jsonl(filename, sample, dest_file):
        # TODO 干掉rdot.org.jsonl
        if not filename.endswith('jsonl') or 'rdot.org' in filename:
            return
        df = pd.read_json(filename, lines=True)
        # df = df.sample(n=50, ignore_index=True)
        if len(df) == 0 or 'content' not in df.columns.tolist():
            return
        df = df.drop_duplicates(['content'], ignore_index=True)
        df = df.dropna()
        contents = df['content'].values.tolist()
        final_lines = []
        clean = CleanData()

        ht = HarvestText()

        for content in contents:
            # TODO 参考文献引用
            clean_content = re.sub(r'^\[\d+\]\s*.+\n', '', content + '\n', flags=re.MULTILINE)
            # TODO 英文标题（数字和罗马数字、标题==、##）
            clean_content = re.sub(r'^[=#]+[a-zA-Z0-9\-\s,.!:]+[=#\s]*\n', '', clean_content, flags=re.MULTILINE)
            clean_content = re.sub(r'^(IX|IV|V?I|[\d.])+\s+[a-zA-Z0-9\s&-/]+\n', '', clean_content, flags=re.MULTILINE)
            # TODO 图表（数字和罗马数字）
            clean_content = re.sub(
                r'^(Stage|Table|Figure|Fig)\s*(IX|IV|V?I|[\d.A-Z])+.+\n', '', clean_content,
                flags=re.IGNORECASE | re.MULTILINE)
            # TODO html元素
            clean_content = re.sub(r'(&amp;nbsp|&lt;|&gt;)', '', clean_content)
            lines = []
            for line in clean_content.split('\n'):
                # TODO 基础清洗
                en = clean.jionlp_clean_en(line, ht)
                # 连续英文分割成单词
                # en = clean.en_split(sym_spell, en)
                # 单字母过多
                # if len(en.split()) > 0 and len(re.findall(r'\b[a-zA-Z]\b', en)) / len(en.split()) > 0.5:
                # en = clean.en_split(sym_spell, ''.join(en.split()))
                # TODO 标点符号过多
                en = clean.ruler_en_punctuation(en)
                if len(en.strip()) == 0:
                    continue
                lines.append(en.strip())
            # TODO 分句
            token_lines = []
            for line in sent_tokenize(' '.join(list(OrderedSet(lines))), language="english"):
                # TODO 单个单词
                if clean.is_single_english_word(line):
                    continue
                # TODO 英文字母数过少
                if len(re.findall(r'[a-zA-Z]', line)) <= 5:
                    continue
                # TODO 基础清洗
                en = clean.jionlp_clean_en(line, ht)
                # TODO 标点符号过多
                en = clean.ruler_en_punctuation(en)
                if len(en.strip()) == 0:
                    continue
                # TODO 语种检测
                en = clean.en_lang_detect(en, 'en', 0.65, filename)
                if len(en.strip()) == 0:
                    continue
                token_lines.append(en.strip())
            final_lines += list(OrderedSet(token_lines))
            # TODO UTF-8兜底
        if not final_lines:
            return
        with open(dest_file, 'w', encoding='UTF-8', errors='ignore') as w_fd:
            w_fd.write('\n'.join(list(OrderedSet(final_lines))))
        logger.info('{} {} {} done'.format(filename, sample, dest_file))

    @staticmethod
    @exception_decorator(exception_tuple=(Exception,))
    def mss_pdf_zh(filename, sample, dest_file):
        # if not filename.endswith('pdf') or 'SF-0000-0588' not in filename:
        if not filename.endswith('pdf'):
            return
        contents = []
        with pdfplumber.open(filename) as pdf:
            for page in pdf.pages:
                contents.append(page.extract_text())

        clean = CleanData()
        ht = HarvestText()

        clean_content = '\n'.join(list(OrderedSet(contents))) + '\n'
        clean_content = re.sub(r'^第[\d\s]+页\n', '', clean_content, flags=re.MULTILINE)
        clean_content = re.sub(r'^[\d\s]+版权所有', '', clean_content, flags=re.MULTILINE)
        clean_content = re.sub(r'免责声明：.*(相应责任。|相应责\n任。)[\s\n]*', '', clean_content, flags=re.DOTALL)
        clean_content = re.sub(r'版权声明：.*', '', clean_content, flags=re.DOTALL)
        clean_content = re.sub(r'\n\d+\s*声明.*', '', clean_content, flags=re.DOTALL)

        clean_content = clean.jionlp_clean_mss(clean_content, ht,
                                               stop_words=("深信服科技股份有限公司",
                                                           "深信服", "股份", "有限公司",
                                                           "深信\n", "\n信服", "深 信 服"))

        final_lines = [item.strip() for item in clean_content.split('\n') if len(item.strip()) > 0]

        with open(dest_file, 'w', encoding='UTF-8', errors='ignore') as w_fd:
            w_fd.write('\n'.join(list(OrderedSet(final_lines))))

        logger.info('{} {} {} done'.format(filename, sample, dest_file))

    @staticmethod
    @exception_decorator(exception_tuple=(Exception,))
    def mss_pdf_instruction(filename, sample, dest_file):
        if not filename.endswith('pdf'):
            return
        contents = []
        with pdfplumber.open(filename) as pdf:
            for page in pdf.pages:
                contents.append(page.extract_text())

        clean_content = re.sub(r'\d+\s*版权所有\s*深信服科技股份有限公司\s*\n', '', '\n'.join(contents),
                               flags=re.MULTILINE)
        clean_content = re.sub(
            r'免责声明：\n本文档所提供的修复方案.*公司不承担相应责任。\n', '', clean_content, flags=re.DOTALL)

        json_info = {}
        for instruction in re.findall(r'\s*漏洞名称\s*(.+)\s*\n', clean_content):
            json_info['instruction'] = instruction.strip()

        for input in re.findall(r'(漏洞详情信息\s*\n.+)\s*漏洞修复方案\n', clean_content, flags=re.DOTALL):
            json_info['input'] = input.strip()

        for output in re.findall(r'(修复建议\s*\n.+)附件\s*\n', clean_content, flags=re.DOTALL):
            json_info['output'] = output.strip()

        with jsonlines.open(dest_file, 'w') as w_fd:
            w_fd.write(json_info)

        logger.info('{} {} {} done'.format(filename, sample, dest_file))

    @staticmethod
    @exception_decorator(exception_tuple=(Exception,))
    def qingbao(filename, sample, dest_file):
        df = pd.read_json(filename, lines=True)
        if len(df) == 0 or 'content' not in df.columns.tolist():
            return
        # df = df.sample(**sample, ignore_index=True)
        df = df.dropna()
        df[['content']].to_csv(dest_file, index=False, header=False)
        logger.info('{} {} {} done'.format(filename, sample, dest_file))

    @staticmethod
    @exception_decorator(exception_tuple=(Exception, func_timeout.FunctionTimedOut,))
    @func_set_timeout(600)
    def lanjun_jsonl(filename, sample, dest_file):
        df = pd.read_json(filename, lines=True)
        if len(df) > 0 and 'content' in df.columns.tolist():
            df = df.sample(**sample, ignore_index=True)
            df = df.dropna()
            df[['content']].to_csv(dest_file, index=False, header=False)
            logger.info('{} {} {} done'.format(filename, sample, dest_file))

    @staticmethod
    @exception_decorator(exception_tuple=(Exception, func_timeout.FunctionTimedOut,))
    @func_set_timeout(600)
    def sec_txt(filename, sample, dest_file):
        df = pd.read_csv(filename, usecols=[0], header=None)
        df = df.set_axis(['content'], axis=1)
        if len(df) > 0 and 'content' in df.columns.tolist():
            df = df.sample(**sample, ignore_index=True)
            df = df.dropna()
            df[['content']].to_csv(dest_file, index=False, header=False)
            logger.info('{} {} {} done'.format(filename, sample, dest_file))

    @staticmethod
    @exception_decorator(exception_tuple=(Exception, func_timeout.FunctionTimedOut,))
    def wanjuan_txt(filename, sample, dest_file):
        if not filename.endswith('.txt'):
            return
        with open(filename, 'r', encoding='UTF-8', errors='ignore') as f:
            lines = [line.strip('"\n') for line in f.readlines()]
            df = pd.DataFrame({'content':lines})
            if len(df) > 0 and 'content' in df.columns.tolist():
                df = df.sample(**sample, ignore_index=True)
                df = df.dropna()
                df[['content']].to_csv(dest_file, index=False, header=False)
                logger.info('{} {} {} done'.format(filename, sample, dest_file))

    @staticmethod
    def err_callback(err):
        logger.error(err)


def main():
    # 预训练目录
    root_dir = 'D:\\研究生\\深信服实习\\销售机器人\\清洗数据脚本\\data_clean'
    # 采样结果目录
    sample_dir = 'D:\\研究生\\深信服实习\\销售机器人\\清洗数据脚本\\data_clean\\out'

    sym_spell = SymSpell(max_dictionary_edit_distance=0, prefix_length=7)
    dictionary_path = pkg_resources.resource_filename("symspellpy", "frequency_dictionary_en_82_765.txt")
    sym_spell.load_dictionary(dictionary_path, term_index=0, count_index=1)

    real_tasks = []
    for task in tasks:
        pathname = os.path.join(root_dir, task['path'])
        if os.path.isdir(pathname):
            for filename in os.listdir(pathname):
                real_file = os.path.join(pathname, filename)
                dest_dir = os.path.join(sample_dir, task['path'])
                if not os.path.exists(dest_dir):
                    os.makedirs(dest_dir)
                dest_file = os.path.join(dest_dir, '{}.txt'.format(os.path.splitext(filename)[0]))
                real_tasks.append((task['func'], real_file, task['sample'], dest_file, sym_spell,))
    logger.info('ready run')
    pool = multiprocessing.Pool(processes=CORES)
    for func, real_file, sample, dest_file, sym_spell in real_tasks:
        if func != '':
            pool.apply_async(
                getattr(Sample, func), args=(real_file, sample, dest_file,), error_callback=Sample.err_callback)
        else:
            pool.apply_async(
                getattr(Sample, func), args=(real_file, sample, dest_file, sym_spell,),
                error_callback=Sample.err_callback)
    pool.close()
    pool.join()

    logger.info('done')


if __name__ == '__main__':
    main()
