import argparse
from utils.raw_data import RawData

from utils.sampler import BatchSampler, RandomSampler, SequentialSampler
from transformers import BertTokenizer
from utils.plus_function import get_extended_batch_data
from utils.instance import SentenceInstance


# 自定义 Dataloader
class MyDataLoader:
    def collate_fn(self, args, batch: list) -> dict:  # 每次遍历的时候调用这个方法，生成这个 batch 的表示
        # batch 是一个 list，代表整个 batch 其中的每个元素都是一个 SentenceInstance
        sentence_text_list = [v.sentence_text for v in batch]
        plus_text_list = [v.plus_text for v in batch]
        sentence_token_range_list = [v.word_token_range for v in batch]
        triplets_list = [v.triplets for v in batch]
        sentence_token_len_list = [len(v.sentence_token) for v in batch]

        tokenized_inputs = self.tokenizer(plus_text_list, sentence_text_list, padding=True, return_tensors='pt')

        # triplets_all: [{"aspect_span": [], "opinion_span_list": [], 'sentiment': 0},]
        inputs_plus, label_plus, inputs_plus_test = get_extended_batch_data(
            args,
            tokenized_inputs,
            triplets_list,
            sentence_token_len_list
        )

        for cuda_var in [inputs_plus, label_plus, inputs_plus_test]:
            for k, v in cuda_var.items():
                cuda_var[k] = v.to(args.device)
                pass
            pass

        extra_info = {
            'sentence': sentence_text_list
        }

        return {
            'inputs_plus': inputs_plus,
            'label_plus': label_plus,
            'inputs_plus_for_test': inputs_plus_test,
            'sentence_token_range': sentence_token_range_list,
            'extra_info': extra_info
        }

    def __init__(
            self,
            instances,
            tokenizer,
            args,
            shuffle=False,
            drop_last=False,
            is_test=False
    ):
        self.instances = instances
        self.args = args
        self.tokenizer = tokenizer
        if shuffle:
            if is_test:
                self.batch_sampler = BatchSampler(1, SequentialSampler(len(self.instances)), drop_last)
                pass
            else:
                self.batch_sampler = BatchSampler(args.batch_size, RandomSampler(len(self.instances)), drop_last)
                pass
            pass
        else:
            if is_test:
                self.batch_sampler = BatchSampler(1, SequentialSampler(len(self.instances)), drop_last)
                pass
            else:
                self.batch_sampler = BatchSampler(args.batch_size, SequentialSampler(len(self.instances)), drop_last)
                pass
            pass
        pass

    def __iter__(self):
        for batch_indices in self.batch_sampler:
            yield self.collate_fn(self.args, [self.instances[idx] for idx in batch_indices])
            pass
        pass

    def __len__(self) -> int:
        return len(self.batch_sampler)

    pass


# 测试 bert 的 tokenizer，如果传入两个 word list 会发生什么？
# if __name__ == '__main__':
#     tmp_tokenizer = BertTokenizer.from_pretrained("../bert_model/bert-base-uncased")
#     input_str_list1 = ["Can you speak english?"] * 3
#     input_str_list2 = [
#         "Yes, I can.",
#         "No, I can't.",
#         "Sorry."
#     ]
#     print("0) tmp_tokenizer.encode(input_str_list1, input_str_list2)")
#     print(tmp_tokenizer.encode(input_str_list1[0]))
#     # [101, 2064, 2017, 3713, 2394, 1029, 102]
#     print("1) tmp_tokenizer(input_str_list1, input_str_list2)")
#     print(tmp_tokenizer(input_str_list1, input_str_list2))
#     print("2) tmp_tokenizer(input_str_list1, input_str_list2, padding=True)")
#     print(tmp_tokenizer(input_str_list1, input_str_list2, padding=True))
#     print("3) tmp_tokenizer(input_str_list1, input_str_list2, padding=True, return_tensors=\"pt\")")
#     print(tmp_tokenizer(input_str_list1, input_str_list2, padding=True, return_tensors="pt"))
#     # {
#     # "input_ids": tensor([[], [], []]),
#     # "token_type_ids": tensor([[], [], []]),
#     # "attention_mask": tensor([[], [], []])
#     # }
#     # 所以这个如果两个 list，会把 arr1 的第 i 个元素和 arr2 的第 i 个元素合并在一起
#     # 每个句子是 [ 101, 2064, 2017, 3713, 2394, 1029,  102, 2748, 1010, 1045, 2064, 1012, 102,    0,    0]
#     # 每个句子都是 101 开头，然后是第一个句子，中间 102，然后是第二个句子，最后 102
#     pass

# 编写一个 dataloader 的测试程序
if __name__ == '__main__':
    def generate_instance_list_from_data(
            original_data: RawData,
            tokenizer,
            max_length: int = 100,
            plus_text: str = "Find the first aspect term and corresponding opinion term in the text"):
        return [SentenceInstance(data, tokenizer, max_length, plus_text) for data in original_data]
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=2)
    parser.add_argument('--device', type=str, default="cpu")
    parser.add_argument('--use_context_augmentation', type=int, default=1)  # -1:不增强 0:线性增强 1:指数增强
    tmp_args = parser.parse_args()
    tmp_args.sen_pre_len = 14
    tmp_tokenizer = BertTokenizer.from_pretrained("../bert_model/bert-base-uncased")
    train_raw_data = RawData('../../Dataset/ASTE-Data-V2/14res/train_triplets.txt')

    instances_train_list = generate_instance_list_from_data(train_raw_data, tmp_tokenizer)
    train_data_loader = MyDataLoader(instances_train_list, tmp_tokenizer, tmp_args)
    for train_batch_data in train_data_loader:
        # {
        #     'inputs_plus': inputs_plus,
        #     'label_plus': label_plus,
        #     'inputs_plus_for_test': inputs_plus_test,
        #     'sentence_token_range': sentence_token_range_list,
        #     'extra_info': extra_info
        # }
        print("inputs_plus")
        print(train_batch_data["inputs_plus"])
        print("label_plus")
        print(train_batch_data["label_plus"])
        print("inputs_plus_for_test")
        print(train_batch_data["inputs_plus_for_test"])
        print("sentence_token_range")
        print(train_batch_data["sentence_token_range"])
        print("extra_info")
        print(train_batch_data["extra_info"])
        break
    pass
