"""
IFLYTEK 长文本分类
https://www.cluebenchmarks.com/introduce.html
该数据集共有1.7万多条关于app应用描述的长文本标注数据，包含和日常生活相关的各类应用主题，共119个类别："打车":0,"地图导航":1,"免费WIFI":2,"租车":3,….,"女性":115,"经营":116,"收款":117,"其他":118(分别用0-118表示)。每一条数据有三个属性，从前往后分别是 类别ID，类别名称，文本内容。
数据量：训练集(12,133)，验证集(2,599)，测试集(2,600)
例子：
{"label": "110", "label_des": "社区超市", "sentence": "朴朴快送超市创立于2016年，专注于打造移动端30分钟即时配送一站式购物平台"}
"""

from tqdm import tqdm
from pprint import pprint
import json
from collections import Counter
import os
from torch.utils.data import Dataset, DataLoader
import logging
import torch.nn
import numpy as np
from tool import torch_tool
from loaders.data import trans_txt_by_pretrain_xlnet

logging.basicConfig(level=logging.INFO, format='[%(levelname)s %(filename)s %(funcName)s:%(lineno)d] %(message)s')
log = logging.getLogger(__file__)


def read_json_list(path):
    out = []
    lines = open(path, encoding='utf8').readlines()
    for line in lines:
        if line:
            doc = json.loads(line, encoding='utf8')
            out.append(doc)
    return out


def count_len(data_dir):
    len__count = Counter()
    label__count = Counter()
    for ptype in ['train', 'test', 'valid']:  #
        path = f'{data_dir}/{ptype}.json'
        for doc in read_json_list(path):
            num = len(doc['sentence']) // 100 * 100
            len__count[num] += 1
            if 'label_des' in doc:
                label__count[doc['label_des']] += 1
    print('=====================================================================')
    print('字数， 文档数量')
    for num, count in sorted(len__count.items(), key=lambda x: x[0]):
        print(num, count)
    print('=====================================================================')
    print('标签， 文档数量')
    pprint(label__count)


class LabelDataset(Dataset):
    def __init__(self, config, path, start=None, end=None, repeat=None):
        self.pad_id = config.pad_id
        info_path = os.path.join(config.data_dir, 'labels.json')
        data_info = read_json_list(info_path)
        self.type_list = [t['label_des'] for t in data_info]
        self.btype__idx = {btype: i for i, btype in enumerate(self.type_list)}
        self.vocab = torch_tool.SentencePieceVocab(config.sp_path)
        self.max_len = config.max_len

        data = read_json_list(path)
        old_total_num = len(data)

        self.raw_data = data[start:end]
        self.data = []
        for doc in tqdm(self.raw_data, desc='trans_data', leave=False):
            new_doc = self.trans_doc(doc)
            if new_doc is not None:
                self.data.append(new_doc)
        if repeat:
            self.data = self.data * repeat
        log.info(f'原始数量={old_total_num}  当前数量={len(self.data)}  path={path}')

    def test_trans(self, num=20):
        for doc in self.raw_data[:num]:
            self.trans_doc(doc, show=True)

    def trans_doc(self, d, show=False):
        txt = d['sentence'][:self.max_len]
        out = {'txt': txt}
        if 'id' in d:
            out['id'] = d['id']
        new_txt = trans_txt_by_pretrain_xlnet(txt)
        out['input_ids'] = [self.vocab.char_id(t) for t in new_txt]
        if show:
            reverse_input_txt = ''.join([self.vocab.char(t) for t in out['input_ids']])
            if '<unk>' in reverse_input_txt:
                print('=' * 60)
                print(txt)
                print('-' * 60)
                print(reverse_input_txt)
        if 'label' in d:
            out['gold_idx'] = int(d['label'])
            out['gold_type'] = d['label_des']
        return out

    def __len__(self):
        return len(self.data)

    def __getitem__(self, item):
        doc = self.data[item]
        return doc

    def joint(self, batch):
        out = {}
        # input_ids ~ (bsz, max_seq_len)
        out['input_ids'] = self.tensor(torch_tool.pad_batch_seq([t['input_ids'] for t in batch], pad=self.pad_id))
        out['batch_txt'] = [doc['txt'] for doc in batch]
        if 'gold_idx' in batch[0]:
            out['gold_types'] = [doc['gold_type'] for doc in batch]
            # gold_idx ~ (bsz)
            out['gold_idx'] = self.tensor([doc['gold_idx'] for doc in batch])
        if 'id' in batch[0]:
            out['id'] = [t['id'] for t in batch]
        return out

    def tensor(self, values, dtype=torch.long):
        if isinstance(values, (list, np.ndarray)):
            return torch_tool.cuda(torch.tensor(values, dtype=dtype))
        elif isinstance(values, dict):
            return {k: self.tensor(v) for k, v in values.items()}
        else:
            return values


def test_trans(config, dtype, num=1000):
    path = os.path.join(config.data_dir, f'{dtype}.json')
    data_set = LabelDataset(config, path)
    data_set.test_trans(num)


def get_data(config, dtype, start=None, end=None, repeat=None, batch_size=1):
    path = os.path.join(config.data_dir, f'{dtype}.json')
    data_set = LabelDataset(config, path, start=start, end=end, repeat=repeat)
    shuffle = dtype == 'train'
    dataloader = DataLoader(data_set, batch_size=batch_size, shuffle=shuffle, collate_fn=data_set.joint)
    if dtype == 'train':
        dtype_name = 'TrainSet'
    elif dtype == 'valid':
        dtype_name = 'ValidSet'
    else:
        dtype_name = 'Test_Set'  # 加个下划线， 让这3种数据的名称长度相同
    dataloader.dtype = dtype_name
    return dataloader


if __name__ == '__main__':
    from learners.learner4_xlnet import Config

    test_trans(Config, 'train', num=200000)
