import torch
from typing import List
from transformers import BertTokenizer
from exp01.dataset import TestDataSet
from exp01.instance import TrainingInstance
from utils.sampler import BatchSampler, SequentialSampler


def build_collate_fn(tokenizer: BertTokenizer, args):
    def extend_sentences(training_instance_list: List[TrainingInstance]):
        sentence_text_list = [training_instance.sentence_instance.sentence_text for training_instance in training_instance_list]
        tokenized_inputs = tokenizer(sentence_text_list, padding=True, return_tensors='pt')
        # 由三个部分组成：input_ids， token_type_ids， attention_mask
        seq_len = tokenized_inputs["input_ids"].size(1)

        # 更新每个 train instance 的 attention_mask 部分
        for i, training_instance in enumerate(training_instance_list):
            training_instance.update_attention_mask(tokenized_inputs["attention_mask"][i])
            pass

        # 获取 label_plus 部分
        def get_label_plus():
            extended_label_list = [training_ins.get_extended_label(seq_len) for training_ins in training_instance_list]
            return {
                'a_s': torch.cat([v["a_s"] for v in extended_label_list]).to(args.device),
                'a_e': torch.cat([v["a_e"] for v in extended_label_list]).to(args.device),
                'a_s_': torch.cat([v["a_s_"] for v in extended_label_list]).to(args.device),
                'a_e_': torch.cat([v["a_e_"] for v in extended_label_list]).to(args.device),
                'is_on': torch.cat([v["is_on"] for v in extended_label_list]).to(args.device),
                'o_s': torch.cat([v["o_s"] for v in extended_label_list]).to(args.device),
                'o_e': torch.cat([v["o_e"] for v in extended_label_list]).to(args.device),
                's': torch.cat([v["s"] for v in extended_label_list]).to(args.device)
            }

        # 将 tokenized_inputs 每个元素都弄到 gpu 上
        for k in tokenized_inputs.keys():
            tokenized_inputs[k] = tokenized_inputs[k].to(args.device)
            pass

        return {
            'inputs_plus': tokenized_inputs,
            'label_plus': get_label_plus(),
            'inputs_plus_for_test': [],
            # 这个只是在 dev 和 test 的环节使用到了，如果要设置 dev 和 test 单独的 dataset 和 loader，这里就不考虑了
            'train_instances': training_instance_list,
        }
        pass

    return extend_sentences


class TestDataLoader:
    def __init__(self, dataset: TestDataSet, args, tokenizer: BertTokenizer, drop_last: bool = False):
        self.dataset = dataset
        self.args = args
        self.batch_size = 1
        self.tokenizer = tokenizer
        self.base_sampler = SequentialSampler(len(self.dataset))
        self.batch_sampler = BatchSampler(1, self.base_sampler, drop_last)
        self.batch_data_list = [
            {
                "inputs_plus_for_test": v.get_token_as_batch(tokenizer),
                "test_instances": [v]
            } for v in self.dataset]
        pass

    def __iter__(self):
        for batch_indices in self.batch_sampler:
            return_v = self.batch_data_list[batch_indices[0]]
            for k in return_v["inputs_plus_for_test"].keys():
                return_v["inputs_plus_for_test"][k] = return_v["inputs_plus_for_test"][k].to(self.args.device)
                pass
            yield return_v
            pass
        pass

    def __len__(self) -> int:
        return len(self.batch_sampler)

    pass
