import torch
from transformers import BertTokenizer
from constant import *


class SentenceInstance:  # 某个句子的所有的原始信息都存储在这里
    def __init__(self, sentence_text: str, origin_triplets: list, tokenizer):
        self.sentence_text = sentence_text
        self.origin_triplets = origin_triplets
        self.sentence_token = tokenizer.encode(self.sentence_text)[1:-1]  #
        self.all_opinion_span_list = []  # 存储所有的 opinion 的在 token 中的索引组成的 span_list

        def get_word_token_range() -> tuple:
            # 为什么要这么复杂，这是因为直接使用 split 会得到类似 "world!" 这样字符串加上标点的表达，
            # 但是实际上 bert tokenizer 会把这个解析成 [3434, 999] 两份
            token_range, token2id, token_start = [], [], 0
            for i, word_text in enumerate(self.sentence_text.strip().split()):
                token_end = token_start + len(tokenizer.encode(word_text, add_special_tokens=False))
                for j in range(token_start, token_end):
                    token2id.append(i)
                    pass
                token_range.append([token_start, token_end - 1])
                token_start = token_end
                pass
            return token_range, token2id

        self.word_token_range, self.token2id = get_word_token_range()  # 关于这个部分的代码，具体可以看 test03.py 中的测试
        # self.word_token_range 形如 [[0, 0], [1, 1], [1, 2], [3, 4]]

        self.triplets = []
        for aspect_span, opinion_span_list, sentiment_id in origin_triplets:
            triplet_dict = {"aspect_span": [], "opinion_span_list": [], 'sentiment': 0}
            triplet_dict["aspect_span"].append(self.word_token_range[aspect_span[0]][0])
            triplet_dict["aspect_span"].append(self.word_token_range[aspect_span[-1]][1])
            # 接下来是把 text 中的 aspect 和 opinion 的位置转化成在 token 中的位置
            for opinion_span in opinion_span_list:
                new_opinion_span = [self.word_token_range[opinion_span[0]][0], self.word_token_range[opinion_span[-1]][1]]
                triplet_dict["opinion_span_list"].append(new_opinion_span)
                self.all_opinion_span_list.append(new_opinion_span)
                pass
            triplet_dict['sentiment'] = sentiment_id
            self.triplets.append(triplet_dict)
            pass
        pass

    pass


class TrainingInstance:  # 用来放入模型进行训练的实例，包含着 plus_sentence 和一个 mask
    def __init__(self, sentence_instance: SentenceInstance, mask_str: str):
        self.sentence_instance = sentence_instance
        self.mask_str = mask_str  # 就是 "0010" 类似于这样的字符串
        self.target_triplet = None
        # 是一个 dict 类型 {"aspect_span": [], "opinion_span_list": [[], []], 'sentiment': 0}

        # 找到 target_triplet # 因为 exp05 找的是从后往前数第 1 个，所以这里其实只需要取消 break 就可以了
        for char_i, char_here in enumerate(self.mask_str):
            if char_here == "1":
                self.target_triplet = self.sentence_instance.triplets[char_i]
                break
                pass
            pass

        pass

    def update_attention_mask(self, mask_tensor: torch.Tensor):
        # 这里的输入 tensor 是一个一维 tensor，不需要 squeeze
        # 主要就是屏蔽掉 aspect
        for i, char_here in enumerate(self.mask_str):
            if char_here == "0":
                a_start, a_end = self.sentence_instance.triplets[i]["aspect_span"]
                mask_tensor[1 + a_start: 1 + a_end + 1] = 0
                pass
            pass
        pass


    def get_extended_label(self, seq_len: int = 100) -> dict:  # seq_len 是总共的长度
        # 定义 a_s 和 a_e 和 s 和 is_on
        a_s_i = torch.tensor([sentiment2id["Invalid"]])  # -1
        a_e_i = torch.tensor([sentiment2id["Invalid"]])  # -1
        sentiment_id = torch.tensor([sentiment2id["Invalid"]])  # -1
        is_on = torch.tensor([0])
        # 定义 aspect_tensor
        a_s_tensor = torch.tensor([0] * (seq_len - 1))
        a_e_tensor = torch.tensor([0] * (seq_len - 1))
        # 定义 opinion_tensor
        o_s_tensor = torch.tensor([0] * (seq_len - 1))
        o_e_tensor = torch.tensor([0] * (seq_len - 1))
        # 正常情况：需要更新这些变量
        if self.target_triplet is not None:
            a_start, a_end = self.target_triplet["aspect_span"]
            # 更新 aspect 的 tensor
            a_s_tensor[a_start] = 1
            a_e_tensor[a_end] = 1
            # 更新 opinion 的 tensor
            for o_start, o_end in self.target_triplet["opinion_span_list"]:
                o_s_tensor[o_start] = 1
                o_e_tensor[o_end] = 1
                pass
            # 更新其他内容
            a_s_i = torch.tensor([a_start])
            a_e_i = torch.tensor([a_end])
            sentiment_id = torch.tensor([self.target_triplet["sentiment"]])
            is_on = torch.tensor([1])
            pass
        # 返回最终的结果
        return {
            'a_s': a_s_i,
            'a_e': a_e_i,
            'a_s_': a_s_tensor.unsqueeze(0),
            'a_e_': a_e_tensor.unsqueeze(0),
            'is_on': is_on,
            'o_s': o_s_tensor.unsqueeze(0),
            'o_e': o_e_tensor.unsqueeze(0),
            's': sentiment_id
        }

    pass


class TestInstance:
    def __init__(self, sentence_instance: SentenceInstance):
        self.meta_instance = sentence_instance
        pass

    def get_token_as_batch(self, tokenizer: BertTokenizer):
        return tokenizer(
            self.meta_instance.sentence_text,
            padding=True,
            return_tensors='pt'
        )

    pass