import torch
from typing import List
from transformers import BertTokenizer
from utils.dataset import TrainDataSet, TestDataSet
from utils.instance import TrainingInstance
from utils.sampler import RandomSampler, BatchSampler, SequentialSampler


def build_collate_fn(tokenizer: BertTokenizer, args):
    def extend_sentences(training_instance_list: List[TrainingInstance]):
        sentence_text_list = [training_instance.sentence_instance.sentence_text for training_instance in training_instance_list]
        plus_sentence_text_list = [training_instance.plus_instance.sentence_text for training_instance in training_instance_list]
        tokenized_inputs = tokenizer(plus_sentence_text_list, sentence_text_list, padding=True, return_tensors='pt')
        # 由三个部分组成：input_ids， token_type_ids， attention_mask
        seq_len = tokenized_inputs["input_ids"].size(1)

        # 更新每个 train instance 的 attention_mask 部分
        for i, training_instance in enumerate(training_instance_list):
            training_instance.update_attention_mask(tokenized_inputs["attention_mask"][i])
            pass

        # 获取 label_plus 部分
        def get_label_plus():
            extended_label_list = [training_ins.get_extended_label(seq_len) for training_ins in training_instance_list]
            element = extended_label_list[0]
            res = dict()
            for key_str in element.keys():
                res[key_str] = torch.cat([v[key_str] for v in extended_label_list]).to(args.device)
                pass
            return res

        # 将 tokenized_inputs 每个元素都弄到 gpu 上
        for k in tokenized_inputs.keys():
            tokenized_inputs[k] = tokenized_inputs[k].to(args.device)
            pass

        return {
            'inputs_plus': tokenized_inputs,
            'label_plus': get_label_plus(),
            'inputs_plus_for_test': [],  # 这个只是在 dev 和 test 的环节使用到了，如果要设置 dev 和 test 单独的 dataset 和 loader，这里就不考虑了
            'train_instances': training_instance_list,
        }
        pass

    return extend_sentences


# class TrainDataLoader:  # train Data Loader 和 test Data Loader 应该是不一样的
#     def __init__(self, dataset: TrainDataSet, args, tokenizer: BertTokenizer, shuffle=False, drop_last=False):
#         self.dataset = dataset
#         self.args = args
#         self.tokenizer = tokenizer
#         self.shuffle = shuffle
#         self.drop_last = drop_last
#         self.batch_size = args.batch_size
#         self.base_sampler = RandomSampler(len(self.dataset)) if shuffle else SequentialSampler(len(self.dataset))
#         self.batch_sampler = BatchSampler(self.batch_size, self.base_sampler, self.drop_last)
#         pass
#
#     def extend_sentences(self, training_instance_list: List[TrainingInstance]):
#         sentence_text_list = [training_instance.sentence_instance.sentence_text for training_instance in training_instance_list]
#         plus_sentence_text_list = [training_instance.plus_instance.sentence_text for training_instance in training_instance_list]
#         tokenized_inputs = self.tokenizer(plus_sentence_text_list, sentence_text_list, padding=True, return_tensors='pt')
#         # # 由三个部分组成：input_ids， token_type_ids， attention_mask
#         seq_len = tokenized_inputs["input_ids"].size(1)
#
#         # 更新每个 train instance 的 attention_mask 部分
#         for i, training_instance in enumerate(training_instance_list):
#             training_instance.update_attention_mask(tokenized_inputs["attention_mask"][i])
#             pass
#
#         # 获取 label_plus 部分
#         def get_label_plus():
#             extended_label_list = [training_ins.get_extended_label(seq_len) for training_ins in training_instance_list]
#             return {
#                 'a_s': torch.cat([v["a_s"] for v in extended_label_list]).to(self.args.device),
#                 'a_e': torch.cat([v["a_e"] for v in extended_label_list]).to(self.args.device),
#                 'a_s_': torch.cat([v["a_s_"] for v in extended_label_list]).to(self.args.device),
#                 'a_e_': torch.cat([v["a_e_"] for v in extended_label_list]).to(self.args.device),
#                 'is_on': torch.cat([v["is_on"] for v in extended_label_list]).to(self.args.device),
#                 'o_s': torch.cat([v["o_s"] for v in extended_label_list]).to(self.args.device),
#                 'o_e': torch.cat([v["o_e"] for v in extended_label_list]).to(self.args.device),
#                 's': torch.cat([v["s"] for v in extended_label_list]).to(self.args.device)
#             }
#
#         # 将 tokenized_inputs 每个元素都弄到 gpu 上
#         for k in tokenized_inputs.keys():
#             tokenized_inputs[k] = tokenized_inputs[k].to(self.args.device)
#             pass
#
#         return {
#             'inputs_plus': tokenized_inputs,
#             'label_plus': get_label_plus(),
#             'inputs_plus_for_test': [],  # 这个只是在 dev 和 test 的环节使用到了，如果要设置 dev 和 test 单独的 dataset 和 loader，这里就不考虑了
#             'train_instances': training_instance_list,
#         }
#         pass
#
#     def __iter__(self):
#         for batch_indices in self.batch_sampler:
#             yield self.extend_sentences([self.dataset[idx] for idx in batch_indices])
#             pass
#         pass
#
#     def __len__(self) -> int:
#         return len(self.batch_sampler)
#
#     pass


class TestDataLoader:
    def __init__(self, dataset: TestDataSet, args, tokenizer: BertTokenizer, drop_last: bool = False):
        self.dataset = dataset
        self.args = args
        self.batch_size = 1
        self.tokenizer = tokenizer
        self.base_sampler = SequentialSampler(len(self.dataset))
        self.batch_sampler = BatchSampler(1, self.base_sampler, drop_last)
        self.batch_data_list = [
            {
                "inputs_plus_for_test": v.get_token_as_batch(tokenizer),
                "test_instances": [v]
            } for v in self.dataset]
        pass

    def __iter__(self):
        for batch_indices in self.batch_sampler:
            return_v = self.batch_data_list[batch_indices[0]]
            for k in return_v["inputs_plus_for_test"].keys():
                return_v["inputs_plus_for_test"][k] = return_v["inputs_plus_for_test"][k].to(self.args.device)
                pass
            yield return_v
            pass
        pass

    def __len__(self) -> int:
        return len(self.batch_sampler)

    pass

