import re
import os
import pickle
from collections import defaultdict
import torch
from torch.utils.data import DataLoader
import json
from nezha.util import nezha_torch_tool
from nezha.util.nezha_torch_tool import Collator_test, KGDataset, KGDataset_test
from loader.vocab import Vocab
from fish_tool.ai import torch_tool
from fish_tool import logs, sys_tool


class SentiDataset:
    def __init__(self, vocab_dir, json_path, seq_len=200, test_num=None, repeat=1):
        self.vocab = Vocab(vocab_dir)
        self.seq_len = seq_len
        self.docs = sys_tool.read_jsonlist(json_path)
        if test_num:
            self.docs = self.docs[:test_num]
        for d in self.docs:
            if 'sent_token' not in d:
                d['sent_token'] = list(d['context'])
        self.docs = self.docs * repeat

    def get_len__count(self, len__count=None):
        len__count = len__count or {}
        for doc in self.docs:
            n = len(doc['text_a'])
            n = n // 50 * 50
            len__count[n] = 1 + len__count.get(n, 0)
        return len__count

    def __getitem__(self, index):
        d = self.docs[index]
        if 'rationale' in d and 'rationale_ids' not in d:
            d['rationale_ids'] = [int(t == "￥") for t in d['rationale']]
        return self.docs[index]

    def __len__(self):
        return len(self.docs)

    def collate_fn(self, docs):
        max_len = max([len(t['sent_token']) for t in docs])
        max_len = min(self.seq_len, max_len)
        data_ids = []
        txts = []
        labels = []
        input_ids = []
        rationale = []
        for doc in docs:
            txts.append(doc['sent_token'])
            if 'id' in doc:
                data_ids.append(doc['id'])
            if 'label' in doc:
                labels.append(int(doc['label']))
            if 'rationale_ids' in doc:
                line = doc['rationale_ids'] + [0] * (max_len - len(doc['rationale_ids']))
                line = line[:max_len]
                rationale.append(line)
            inputs_dict = self.vocab.encode_plus(doc['sent_token'][:max_len])
            ids = inputs_dict['input_ids']
            if len(ids) < max_len:
                ids = ids + [self.vocab.pad_token_id] * (max_len - len(ids))
            if len(ids) > max_len:
                ids = ids[:max_len]
            input_ids.append(ids)
        return {'input_ids': torch_tool.tensor(input_ids), 'labels': torch_tool.tensor(labels),
                'rationale': torch_tool.tensor(rationale), 'txts': txts, 'data_ids': data_ids}


def get_data(config, dtype, test_num=0, repeat=1, shuffle=False, batch_size=0):
    # 情感分析任务 数据加载器
    if dtype == 'inter':
        json_path = f'{config.data_dir}/senti_ch_part1_标注_merge.txt'
    elif dtype == 'test':
        json_path = f'{config.data_dir}/senti_ch_part1.txt'
    else:
        json_path = f'{config.data_dir}/ChnSentiCorp/{dtype}.jsonlist'
    dataset = SentiDataset(config.pre_model_dir, json_path, seq_len=config.max_seq_len, test_num=test_num, repeat=repeat)
    batch_size = batch_size or config.batch_size
    data = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, collate_fn=dataset.collate_fn)
    return data


def show_data_num(config):
    for dtype in ['train', 'valid', 'test']:
        data = get_data(config, dtype)
        print(f'{dtype}~{len(data)}')
