#!/usr/bin/python3
# -*- coding: utf-8 -*-
# File  : tokenize_corpus.py
# Author: anyongjin
# Date  : 2020/9/7
'''
tokenize corpus text for general model distill
对语料分词生成ID，用于通用模型蒸馏
'''
import re
import os
import json
import logging
import pickle

logger = logging.getLogger()
logger.setLevel(logging.INFO)


def gen_news2016zh(file_path):
    '''
    生成news2016zh的语料
    :param file_path:
    :return:
    '''
    fdata = open(file_path, 'r', encoding='utf-8')
    while True:
        try:
            line = fdata.readline()
        except Exception as e:
            logger.warning(f'read news2016zh error:{e}')
            break
        line = line.strip()
        if not line:
            break
        try:
            data = json.loads(line)
            if not data or 'content' not in data:
                continue
            yield data['content']
        except Exception as e:
            logger.error(f'error while fetch from news2016:{e}')
    logger.warning(f'news2016 read complete')
    fdata.close()


def gen_wiki_or_news(file_path):
    '''
    生成中文wiki（或按行组织的新闻）的语料
    :param file_path:
    :return:
    '''
    name = os.path.basename(file_path)
    fdata = open(file_path, 'r', encoding='utf-8')
    while True:
        try:
            line = fdata.readline()
        except Exception as e:
            logger.warning(f'read {name} error: {e}')
            break
        if not line:
            break
        line = line.strip()
        if not line or line.startswith('==') or line.startswith('【') and len(line) <= 15:
            continue
        yield line
    logger.warning(f'read {name} complete')
    fdata.close()

token_map = [
    (' ', '[unused1]'),
    ('\n', '[unused2]')
]


def get_char_count(content):
    '''统计中文，英文单词，英文字符，数字,标点符号的字符数量'''
    chi_cnt = 0
    eng_cnt = 0
    letter_cnt = 0
    num_cnt = 0
    punct_num = 0
    if not content:
        return chi_cnt, eng_cnt, letter_cnt, num_cnt, punct_num
    last_char_type = 0
    for i in range(len(content)):
        char = content[i]
        char_type = 0
        if '\u4e00' <= char <= '\u9fa5':
            chi_cnt += 1
            char_type = 1
        elif 'Z' >= char >= 'A' or 'a' <= char <= 'z':
            letter_cnt += 1
            if last_char_type == 2 or last_char_type == 3 \
                    or last_char_type == 0 and i + 1 < len(content) and content[i + 1] == ' ':
                char_type = 2
            else:
                char_type = 3
            if char_type == 2 and char_type != last_char_type:
                eng_cnt += 1
        elif '0' <= char <= '9':
            num_cnt += 1
            char_type = 4
        elif char != ' ' and char != '　':
            punct_num += 1
        last_char_type = char_type
    return chi_cnt, eng_cnt, letter_cnt, num_cnt, punct_num


def clean_text(text, min_word_len=10):
    chi_, eng_, let_, num_, punct = get_char_count(text)
    total = chi_ + eng_ + num_ + punct
    if chi_ + eng_ <= min_word_len or (chi_ + eng_) / total < 0.7:
        return None
    text = re.sub(r'[ ]{5,}', '    ', text)
    return text


def build_token_data(in_list, bert_dir, out_path, seq_len=512, target_size=3):
    '''
    读取多个语料库文件，生成分词后的数据。
    :param in_list: [(generator, weight), ...]
    :param bert_dir: bert模型的目录
    :param out_path: output file
    :param seq_len: 最大行长度，默认512，bert的序列长度
    :param target_size: 要生成的语料的行数，单位：千行
    :return:
    '''
    data_iters = []
    total_weight = sum([item[1] for item in in_list])
    start = 0
    for item in in_list:
        weight_fac = item[1] / total_weight
        data_iters.append((item[0], start, start + weight_fac))
        start += weight_fac
    fout = open(out_path, 'wb')

    # load tokenizer
    from DistillBert.dstoken import Tokenizer
    tokenizer = Tokenizer(bert_dir, 'BertTokenizer', token_map)

    # handle files
    line_count = 0
    import random
    random.seed(55)
    batch_size = 1024
    cache_list = []
    while line_count < target_size * 1000:
        rand_val = random.random()
        idx, data_iter = next(((i, it[0]) for i, it in enumerate(data_iters) if it[1] <= rand_val <= it[2]), (-1, None))
        if data_iter is None:
            if not data_iters:
                break
            continue
        try:
            while True:
                line = clean_text(next(data_iter))
                if line:
                    break
        except StopIteration:
            data_iters.pop(idx)
            continue
        if not line:
            continue
        word_ids = tokenizer.get_input_ids(line, padding=False, truncation=False).tolist()
        for i in range(0, len(word_ids), seq_len):
            batch_ids = word_ids[i: i + seq_len]
            if len(batch_ids) * 10 < seq_len:
                continue
            cache_list.append(batch_ids)
            line_count += 1
            if line_count > 0 and line_count % 100 == 0:
                logger.warning(f'processed {line_count} lines')
        if len(cache_list) >= batch_size:
            pickle.dump(cache_list[: batch_size], fout)
            fout.flush()
            cache_list = cache_list[batch_size:]
    if cache_list:
        pickle.dump(cache_list, fout)
    fout.close()
    logger.warning('generate complete')


def statistic_line_len(data_dir, min_line_len=30, max_line_len=100, group_size: int = 5):
    '''
    统计行长度
    :param data_dir:
    :param min_line_len:
    :param group_size:
    :return:
    '''
    from DistillBert.data import build_generator_from_cache
    from collections import Counter
    from DistillBert.dstoken import Tokenizer
    import math
    token_map = [
        [' ', '[unused1]'],
        ['\n', '[unused2]']
    ]
    token_dir = 'D:/Data/transformers/albert_tiny_zh'
    tokenizer = Tokenizer(token_dir, 'BertTokenizer', token_map_list=token_map, padding='do_not_pad')
    names = os.listdir(data_dir)
    all_sta = Counter()

    def read_csv_lines(fdata):
        lines = fdata.readlines(4096)
        if not lines:
            raise EOFError('end')
        return lines
    for n in names:
        if not n.endswith('.csv'):
            continue
        path = os.path.join(data_dir, n)
        logger.info(f'processing {n}')
        len_grps = Counter()
        data_iter = build_generator_from_cache(path, 1024, epoch_num=1, read_fn=read_csv_lines)
        for batch_data in data_iter:
            for rd in batch_data:
                text = rd.decode('utf-8').strip()
                if not text:
                    continue
                tokens = tokenizer.get_input_ids(text)
                seq_len = len(tokens)
                if seq_len <= min_line_len:
                    len_grps[f'<={min_line_len}'] += 1
                elif seq_len > max_line_len:
                    len_grps[f'>{max_line_len}'] += 1
                else:
                    left_val = int(seq_len / group_size)
                    right_val = math.ceil(seq_len / group_size)
                    if right_val == left_val:
                        left_val -= group_size
                    left_val *= group_size
                    right_val *= group_size
                    len_grps[f'({left_val},{right_val}]'] += 1
        stas = list(sorted(len_grps.items(), key=lambda x: x[0]))
        all_sta.update(len_grps)
        print(stas)
    all_stas = list(sorted(all_sta.items(), key=lambda x: x[0]))
    print(all_stas)



if __name__ == '__main__':
    data_dir = '/data/Data/corpus/'
    in_list = [
        (gen_news2016zh(data_dir + 'news2016zh_train.json'), 1),
        (gen_wiki_or_news(data_dir + 'wiki.txt'), 2),
        (gen_wiki_or_news(data_dir + 'news_sohu.txt'), 1),
        (gen_wiki_or_news(data_dir + 'news_tensite.txt'), 1)
    ]
    bert_dir = '/data/Data/transformers/albert_tiny_zh'
    out_path = data_dir + 'token_ids.pkl'
    build_token_data(in_list, bert_dir, out_path, target_size=99999)
    # statistic_line_len('d:/Data/bert_classify/cv_classify_data/train')

