from tqdm import tqdm
from pprint import pformat
import json
import random
import pickle
from collections import Counter
import os
from torch.utils.data import Dataset, DataLoader
import logging
import torch.nn
import numpy as np
import pandas as pd
from tool import torch_tool

logging.basicConfig(level=logging.INFO, format='[%(levelname)s %(filename)s %(funcName)s:%(lineno)d] %(message)s')
log = logging.getLogger(__file__)


class WasherAll:
    def __init__(self, csv_path, test_radio=0.1):
        self.test_radio = test_radio
        btype__count = Counter()
        self.all_data = []
        pd_all = pd.read_csv(csv_path)
        pd_dict = pd_all.to_dict()
        for k in pd_dict['label']:
            gold_type = str(pd_dict['label'][k])
            btype__count[gold_type] += 1
            txt = pd_dict['review'][k]
            self.all_data.append({'gold_type': gold_type, 'txt': txt, '_id': k})
        self.data_info = {'type_list': sorted(list(btype__count)), 'type__count': dict(btype__count)}
        self.dtype__data = self.split(self.all_data)

    def split(self, all_data):
        dtype__data = {'train': [], 'valid': [], 'test': []}
        random.shuffle(all_data)
        for doc in all_data[:20]:
            log.info('-' * 20 + '\n' + pformat(doc))
        num = int(len(all_data) * self.test_radio)
        dtype__data['valid'].extend(all_data[: num])
        dtype__data['test'].extend(all_data[num: num * 2])
        dtype__data['train'].extend(all_data[num * 2:])
        return dtype__data

    def save(self, data_dir):
        log.info(f'data_dir={data_dir}')
        os.makedirs(data_dir, exist_ok=True)
        dtype__num = {}
        for dtype in ['train', 'valid', 'test']:
            path = os.path.join(data_dir, f'{dtype}.pkl')
            data = self.dtype__data[dtype]
            dtype__num[dtype] = len(data)
            with open(path, 'wb') as f:
                pickle.dump(data, f)
        log.info(f'dtype__num={dtype__num}')
        info_path = os.path.join(data_dir, 'data_info.json')
        self.data_info['dtype__num'] = dtype__num
        with open(info_path, 'w') as f:
            json.dump(self.data_info, f, indent=2, ensure_ascii=False)


class LabelDataset(Dataset):
    def __init__(self, config, path, start=None, end=None, repeat=None):
        self.pad_id = config.pad_id
        info_path = os.path.join(config.data_dir, 'data_info.json')
        data_info = json.load(open(info_path))
        self.type_list = data_info['type_list']
        self.type__count = [data_info['type__count'][btype] for btype in self.type_list]
        self.btype__idx = {btype: i for i, btype in enumerate(self.type_list)}
        self.vocab = torch_tool.SentencePieceVocab(config.sp_path)
        self.max_len = config.max_len
        with open(path, 'rb') as f:
            data = pickle.load(f)
            old_total_num = len(data)

        self.raw_data = data[start:end]
        self.data = []
        for doc in tqdm(self.raw_data, desc='trans_data', leave=False):
            new_doc = self.trans_doc(doc)
            if new_doc is not None:
                self.data.append(new_doc)
        if repeat:
            self.data = self.data * repeat
        log.info(f'原始数量={old_total_num}  当前数量={len(self.data)}  path={path}')

    def test_trans(self, num=20):
        for doc in self.raw_data[:num]:
            self.trans_doc(doc, show=True)

    def trans_doc(self, d, show=False):
        if isinstance(d['txt'], str):
            txt = d['txt'][:self.max_len]
            out = {'txt': txt, '_id': d['_id']}
            new_txt = trans_txt_by_pretrain_xlnet(txt)
            out['input_ids'] = [self.vocab.char_id(t) for t in new_txt]
            if show:
                reverse_input_txt = ''.join([self.vocab.char(t) for t in out['input_ids']])
                if '<unk>' in reverse_input_txt:
                    print('=' * 60)
                    print(txt)
                    print('-' * 60)
                    print(reverse_input_txt)
            out['gold_idx'] = self.btype__idx[d['gold_type']]
            out['gold_type'] = d['gold_type']
        else:
            out = None
        return out

    def __len__(self):
        return len(self.data)

    def __getitem__(self, item):
        doc = self.data[item]
        return doc

    def joint(self, batch):
        out = {}
        # input_ids ~ (bsz, max_seq_len)
        out['input_ids'] = self.tensor(torch_tool.pad_batch_seq([t['input_ids'] for t in batch], pad=self.pad_id))
        out['batch_txt'] = [doc['txt'] for doc in batch]
        out['gold_types'] = [doc['gold_type'] for doc in batch]
        # gold_idx ~ (bsz)
        out['gold_idx'] = self.tensor([doc['gold_idx'] for doc in batch])
        return out

    def tensor(self, values, dtype=torch.long):
        if isinstance(values, (list, np.ndarray)):
            return torch_tool.cuda(torch.tensor(values, dtype=dtype))
        elif isinstance(values, dict):
            return {k: self.tensor(v) for k, v in values.items()}
        else:
            return values


def get_data(config, dtype, start=None, end=None, repeat=None, batch_size=1):
    path = os.path.join(config.data_dir, f'{dtype}.pkl')
    data_set = LabelDataset(config, path, start=start, end=end, repeat=repeat)
    shuffle = dtype == 'train'
    dataloader = DataLoader(data_set, batch_size=batch_size, shuffle=shuffle, collate_fn=data_set.joint)
    if dtype == 'train':
        dtype_name = 'TrainSet'
    elif dtype == 'valid':
        dtype_name = 'ValidSet'
    else:
        dtype_name = 'Test_Set'  # 加个下划线， 让这3种数据的名称长度相同
    dataloader.dtype = dtype_name
    dataloader.type__count = data_set.type__count
    return dataloader


def trans_txt_by_pretrain_xlnet(txt):
    # 预训练的词典里很多符号是缺失的，转成对应的英文符号，避免信息丢失
    for old, new in [
        ['️', 'ப'],
        [' ', 'ப'],
        [' ', 'ப'],
        ['​', 'ப'],
        [' ', 'ப'],
        ['　', 'ப'],
        ['﹑', '、'],
        ['､', '、'],
        ['＾', '^'],
        ['㎡', '平'],
        ['＿', '_'],
        ['，', ','],
        ['！', '!'],
        ['︰', ':'],
        ['：', ':'],
        ['；', ';'],
        ['＼', '\\'],
        ['｜', '/'],
        ['／', '/'],
        ['－', '-'],
        ['﹣', '-'],
        ['％', '%'],
        ['＊', '*'],
        ['＆', '&'],
        ['￥', '$'],
        ['＂', '"'],
        ['＋', '+'],
        ['（', '('],
        ['）', ')'],
        ['＞', '>'],
        ['＜', '<'],
        ['；', ';'],
        ['．', '。'],
        ['？', '?'],
        ['＠', '@'],
        ['＃', '#'],
        ['＝', '='],
        ['０', '0'],
        ['①', '1'],
        ['⑴', '1'],
        ['１', '1'],
        ['②', '2'],
        ['⑵', '2'],
        ['２', '2'],
        ['３', '3'],
        ['③', '3'],
        ['⑶', '3'],
        ['４', '4'],
        ['⑷', '4'],
        ['④', '4'],
        ['５', '5'],
        ['⑤', '5'],
        ['６', '6'],
        ['⑥', '6'],
        ['７', '7'],
        ['⑦', '7'],
        ['８', '8'],
        ['⑧', '8'],
        ['９', '9'],
        ['⑨', '9'],
        ['⑩', '十'],
        ['Ａ', 'A'],
        ['Ｂ', 'B'],
        ['ｂ', 'B'],
        ['Ｃ', 'C'],
        ['Ｄ', 'D'],
        ['ｇ', 'G'],
        ['Ｇ', 'G'],
        ['Ｒ', 'R'],
        ['Ｓ', 'S'],
        ['Ⅰ', 'I'],
        ['ｪ', 'I'],
        ['Ｉ', 'I'],
        ['Ｏ', 'O'],
        ['Ｐ', 'P'],
        ['Ｔ', 'T'],
        ['Ｋ', 'K'],
        ['Ｖ', 'V'],
        ['Ｌ', 'L'],
        ['ｕ', 'U'],
        ['Ｕ', 'U'],
        ['Ｈ', 'H'],
        ['Ｍ', 'M'],
        ['Ｎ', 'N'],
        ['Ｘ', 'X'],
        ['❌', 'X'],
        ['Ｙ', 'Y'],
        ['⾃', '自'],
        ['⼀', '一'],
        ['⽅', '方'],
        ['⾯', '面'],
        ['⾜', '足'],
        ['⽤', '用'],
        ['飕', '嗖'],
        ['碜', '掺'],
        ['噔', '登'],
        ['硌', '铬'],
        ['⼒', '力'],
        ['⾼', '高'],
        ['⾔', '言'],
        ['⽂', '文'],
        ['呒', '无'],
        ['怄', '抠'],
        ['唿', '呼'],
        ['Ⅲ', '3'],
        ['［', ']'],
        ['™', 'இ'],  # 瞎改。。。。。
        ['﹏', 'ᅳ'],
        ['', 'ப'],
        ['］', '['],
        ['镚', '币'],   # 钢镚
        ['叒', '又'],
        ['煳', '糊'],
        ['缱', '遣'],
        ['嗮', '晒'],
        ['锸', '萨'],
        ['憷', '怵'],
        ['嚯', '霍'],
        ['٩๑', 'က'],
        ['❤', 'က'],
        ['▽', '_'],
        ['₃', '_'],
        ['٩', '_'],
        ['ᴗ', '_'],
        ['◡', '_'],   # ●◡●
        ['๑', '@'],   # ๑•̀﹃•́و✧
        ['ㅂ', '━'],
        ['゜', '━'],
        ['ヽ', '━'],
        ['﹃', '━'],
        ['￣', '━'],
        ['▔', '━'],
        ['`', '━'],
        ['´', '━'],
        ['۶', '━'],
        ['｀', '━'],
        ['˴', '━'],
        ['', '♦'],
        ['￭', '♦'],
        ['〓', '♦'],
        ['▾', '♦'],
        ['✧', '♦'],
        ['ㆍ', '●'],
        ['', '●'],
        ['', '●'],
        ['◉', '●'],
        ['✪', '●'],
        ['', '●'],
        ['', '●'],
        ['⭕', '●'],
        ['◈', '●'],
        ['｡', '●'],
        ['', '▷'],
        ['➤', '▷'],
        ['▨', '▷'],
        ['▶', '▷'],
        ['➠', '▷'],
        ['➡', '▷'],
        ['ゞ', '▷'],
        ['┬', 't'],  # 文字表情  不替换更好？
        ['┍', 'r'],  # 文字表情
        ['…', '...'],  # 警告：这里文本长度变了
    ]:
        txt = txt.replace(old, new)
    return txt


def test_trans(config, dtype, num=20):
    path = os.path.join(config.data_dir, f'{dtype}.pkl')
    data_set = LabelDataset(config, path)
    data_set.test_trans(num)


def print_vocab_all_word():
    vocab = torch_tool.SentencePieceVocab('E:/code/data/pretrain_model_file/xlnet/chinese_small_xlnet/spiece.model')
    txt = ' '.join([vocab.char(t) for t in range(32000)])
    print(txt)


if __name__ == '__main__':
    # 洗数据
    csv_path = 'E:/code/data/common/携程网_酒店_评价分类/ChnSentiCorp_htl_all.csv'  # 总体7766  正向5322  负向2444
    test_radio = 0.08  # 洗数据 测试集和验证集占总数据的比例
    # washer = WasherAll(csv_path, test_radio)  # 洗数据（pkl文件存入config.data_dir）
    # washer.save('E:/code/data/common/携程网_酒店_评价分类/washed_data' )

    from learners.learner1_mlp import Config

    test_trans(Config, 'train', num=None)
