import torch
from torch.utils.data.dataset import T_co
from typing import List
from constant import *
from transformers import BertTokenizer
from raw_data1 import RawData
from plus_function import get_bin_str_list_range_n
from torch.utils.data import Dataset

from utils.sampler import BatchSampler, SequentialSampler, RandomSampler


class MetaSentence:  # 某个句子的所有的原始信息都存储在这里的实例
    def __init__(self, data: dict, tokenizer: BertTokenizer):
        # data 是 {
        #   'sentence': sentence,  # 原本的句子的文本
        #   'triplets': [
        #       ([a_s, a_e], [[o1_s, o1_e], [o2_s, o2_e], sentiment_id]),
        #   ]
        # }
        self.sentence_text = data['sentence']
        self.sentence_token = tokenizer.encode(self.sentence_text)[1:-1]  # 这里是去掉了前边的 101 和结尾的 102
        self.triplets = []  # 其中存储的信息是 (aspect_span, opinion_span_list, sentiment_id)

        def get_word_token_range() -> list:
            token_range, token_start = [], 0
            for word_i, word_text in enumerate(self.sentence_text.strip().split()):
                token_end = token_start + len(tokenizer.encode(word_text, add_special_tokens=False))
                token_range.append([token_start, token_end - 1])
                token_start = token_end
                pass
            return token_range

        self.word_token_range = get_word_token_range()

        for current_triplet in data['triplets']:
            # current_triplet 的样子是 (aspect_span, opinion_span_list, sentiment_id)
            new_triplet = {
                "aspect_span": [],
                "opinion_span_list": [],
                'sentiment': 0
            }
            new_triplet["aspect_span"].append(self.word_token_range[current_triplet[0][0]][0])
            new_triplet["aspect_span"].append(self.word_token_range[current_triplet[0][-1]][1])
            for opinion_span in current_triplet[1]:
                new_triplet["opinion_span_list"].append([
                    self.word_token_range[opinion_span[0]][0],
                    self.word_token_range[opinion_span[-1]][1]
                ])
                pass
            new_triplet['sentiment'] = current_triplet[2]
            self.triplets.append(new_triplet)
            pass
        pass

    pass


class TrainingInstance:  # 用来放入模型进行训练的实例，包含着 plus_sentence 和一个 mask
    def __init__(self, sentence_instance: MetaSentence, plus_instance: MetaSentence, mask_str: str):
        self.sentence_instance = sentence_instance
        self.plus_instance = plus_instance
        self.mask_str = mask_str  # 就是 "0010" 类似于这样的字符串
        self.target_triplet = None
        # 找到 target_triplet
        for char_i, char_here in enumerate(mask_str):
            if char_here == "1":
                self.target_triplet = self.sentence_instance.triplets[char_i]
                break
                pass
            pass
        pass

    def update_attention_mask(self, mask_tensor: torch.Tensor):
        sen_pre_len = len(self.plus_instance.sentence_token) + 2
        # 主要就是屏蔽掉 aspect
        for i, char_here in enumerate(self.mask_str):
            if char_here == "1":
                continue
                pass
            a_start, a_end = self.sentence_instance.triplets[i]
            mask_tensor[sen_pre_len + a_start: sen_pre_len + a_end + 1] = 0
            pass
        pass

    def get_extended_label(self, seq_len: int = 100) -> dict:
        sen_pre_len = len(self.plus_instance.sentence_token) + 2
        # 定义 a_s 和 a_e 和 s 和 is_on
        a_s_i = torch.tensor([sentiment2id["Invalid"]])
        a_e_i = torch.tensor([sentiment2id["Invalid"]])
        sentiment_id = torch.tensor([sentiment2id["Invalid"]])
        is_on = torch.tensor([0])
        # 定义 aspect_tensor
        a_s_tensor = torch.tensor([0] * (seq_len - sen_pre_len))
        a_e_tensor = torch.tensor([0] * (seq_len - sen_pre_len))
        # 定义 opinion_tensor
        o_s_tensor = torch.tensor([0] * (seq_len - sen_pre_len))
        o_e_tensor = torch.tensor([0] * (seq_len - sen_pre_len))
        # 定义 mask
        mask_tensor = torch.tensor([0] * (seq_len - sen_pre_len))
        mask_tensor[:len(self.sentence_instance.sentence_token)] = 1
        # 正常情况：需要更新这些变量
        if self.target_triplet is not None:
            a_start, a_end = self.target_triplet["aspect_span"]
            # 更新 aspect 的 tensor
            a_s_tensor[a_start] = 1
            a_e_tensor[a_end] = 1
            # 更新 opinion 的 tensor
            for o_start, o_end in self.target_triplet["opinion_span_list"]:
                o_s_tensor[o_start] = 1
                o_e_tensor[o_end] = 1
                pass
            # 更新其他内容
            a_s_i = torch.tensor([a_start])
            a_e_i = torch.tensor([a_end])
            sentiment_id = torch.tensor([self.target_triplet["sentiment"]])
            is_on = torch.tensor([1])
            pass
        # 返回最终的结果
        return {
            'a_s': a_s_i,
            'a_e': a_e_i,
            'a_s_': a_s_tensor.unsqueeze(0),
            'a_e_': a_e_tensor.unsqueeze(0),
            'mask': mask_tensor.unsqueeze(0),
            'is_on': is_on,
            'o_s': o_s_tensor.unsqueeze(0),
            'o_e': o_e_tensor.unsqueeze(0),
            's': sentiment_id
        }

    pass


class TrainDataSet(Dataset):  # train dataset 和 test dataset 应该是不一样的
    def __init__(self, raw_data: RawData, tokenizer: BertTokenizer):
        plus_text = "Find the first aspect term and corresponding opinion term in the text"
        plus_meta_sentence_instance = MetaSentence({"sentence": plus_text}, tokenizer)
        self.meta_instances = [MetaSentence(data, tokenizer) for data in raw_data]
        self.training_instances = []

        def generate_instance_list(meta_ins: MetaSentence):
            n_triplet = len(meta_ins.triplets)
            bin_str_list = get_bin_str_list_range_n(n_triplet)
            return [TrainingInstance(meta_ins, plus_meta_sentence_instance, bin_str) for bin_str in bin_str_list]
            pass

        for meta_instance in self.meta_instances:
            self.training_instances += generate_instance_list(meta_instance)
            pass
        pass

    def __getitem__(self, index) -> T_co:
        return self.training_instances[index]

    def __iter__(self):
        for elem in self.training_instances:
            yield elem
            pass
        pass

    def __len__(self) -> int:
        return len(self.training_instances)

    pass


class TrainDataLoader:  # train Data Loader 和 test Data Loader 应该是不一样的
    def __init__(self, dataset: TrainDataSet, args, tokenizer: BertTokenizer, shuffle=False, drop_last=False):
        self.dataset = dataset
        self.args = args
        self.tokenizer = tokenizer
        self.shuffle = shuffle
        self.drop_last = drop_last
        self.batch_size = args.batch_size
        self.base_sampler = RandomSampler(len(self.dataset)) if shuffle else SequentialSampler(len(self.dataset))
        self.batch_sampler = BatchSampler(self.batch_size, self.base_sampler, self.drop_last)
        pass

    def extend_sentences(self, training_instance_list: List[TrainingInstance]):
        sentence_text_list = [training_instance.sentence_instance.sentence_text for training_instance in training_instance_list]
        plus_sentence_text_list = [training_instance.plus_instance.sentence_text for training_instance in training_instance_list]
        tokenized_inputs = self.tokenizer(plus_sentence_text_list, sentence_text_list, padding=True, return_tensors='pt')
        sentence_token_range_list = [v.sentence_instance.word_token_range for v in training_instance_list]
        seq_len = tokenized_inputs["input_ids"].size(1)
        # 由三个部分组成：input_ids， token_type_ids， attention_mask

        # 更新 attention_mask 部分
        for i, training_instance in enumerate(training_instance_list):
            training_instance.update_attention_mask(tokenized_inputs["attention_mask"][i])
            pass

        # 获取 label_plus 部分
        def get_label_plus():
            extended_label_list = [training_ins.get_extended_label(seq_len) for training_ins in training_instance_list]
            return {
                'a_s': torch.cat([v["a_s"] for v in extended_label_list]),
                'a_e': torch.cat([v["a_e"] for v in extended_label_list]),
                'a_s_': torch.cat([v["a_s_"] for v in extended_label_list]),
                'a_e_': torch.cat([v["a_e_"] for v in extended_label_list]),
                'mask': torch.cat([v["mask"] for v in extended_label_list]),
                'is_on': torch.cat([v["is_on"] for v in extended_label_list]),
                'o_s': torch.cat([v["o_s"] for v in extended_label_list]),
                'o_e': torch.cat([v["o_e"] for v in extended_label_list]),
                's': torch.cat([v["s"] for v in extended_label_list])
            }

        return {
            'inputs_plus': tokenized_inputs,
            'label_plus': get_label_plus(),
            'inputs_plus_for_test': [],  # 这个只是在 dev 和 test 的环节使用到了，如果要设置 dev 和 test 单独的 dataset 和 loader，这里就不考虑了
            'sentence_token_range': sentence_token_range_list,
            'extra_info': {
                "sentence": sentence_text_list
            }
        }
        pass

    def __iter__(self):
        for batch_indices in self.batch_sampler:
            yield self.extend_sentences([self.dataset[idx] for idx in batch_indices])
            pass
        pass

    def __len__(self) -> int:
        return len(self.batch_sampler)

    pass


class TestInstance:
    def __init__(self, sentence_instance: MetaSentence, plus_instance: MetaSentence):
        self.meta_instance = sentence_instance
        self.plus_instance = plus_instance
        pass

    def get_token(self, tokenizer: BertTokenizer):
        return tokenizer(
            [self.meta_instance.sentence_text],
            [self.plus_instance.sentence_text],
            padding=True,
            return_tensors='pt'
        )

    pass


class TestDataSet(Dataset):
    def __init__(self, raw_data: RawData, tokenizer: BertTokenizer):
        plus_text = "Find the first aspect term and corresponding opinion term in the text"
        plus_instance = MetaSentence({"sentence": plus_text}, tokenizer)
        self.meta_instances = [MetaSentence(data, tokenizer) for data in raw_data]
        self.test_instances = [TestInstance(v, plus_instance) for v in self.meta_instances]
        pass

    def __getitem__(self, index) -> T_co:
        return self.test_instances[index]

    def __iter__(self):
        for elem in self.test_instances:
            yield elem
            pass
        pass

    def __len__(self):
        return len(self.test_instances)

    pass


class TestDataLoader:
    def __init__(self, dataset: TestDataSet, args, tokenizer: BertTokenizer, drop_last):
        self.dataset = dataset
        self.args = args
        self.batch_size = 1
        self.tokenizer = tokenizer
        self.base_sampler = SequentialSampler(len(self.dataset))
        self.batch_sampler = BatchSampler(1, self.base_sampler, drop_last)
        self.batch_data_list = [{
            "inputs_plus_for_test": v.get_token(tokenizer),
            "sentence_token_range": [v.meta_instance.word_token_range]
        } for v in self.dataset]
        pass

    def __iter__(self):
        for batch_indices in self.batch_sampler:
            yield self.batch_data_list[batch_indices[0]]
            pass
        pass

    def __len__(self) -> int:
        return len(self.batch_sampler)

    pass
