import torch
from transformers import BertTokenizer
from constant import *


class SentenceInstance:  # 某个句子的所有的原始信息都存储在这里
    def __init__(self, sentence_text: str, origin_triplets: list, tokenizer):
        self.sentence_text = sentence_text
        self.origin_triplets = sorted(origin_triplets, key=lambda x: x[0][0])  # 确保有序
        self.sentence_token = tokenizer.encode(self.sentence_text)[1:-1]

        def get_word_token_range() -> tuple:
            # 为什么要这么复杂，这是因为直接使用 split 会得到类似 "world!" 这样字符串加上标点的表达，
            # 但是实际上 bert tokenizer 会把这个解析成 [3434, 999] 两份
            token_range, token2id, token_start = [], [], 0
            for i, word_text in enumerate(self.sentence_text.strip().split()):
                token_end = token_start + len(tokenizer.encode(word_text, add_special_tokens=False))
                for j in range(token_start, token_end):
                    token2id.append(i)
                    pass
                token_range.append([token_start, token_end - 1])
                token_start = token_end
                pass
            return token_range, token2id

        self.word_token_range, self.token2id = get_word_token_range()  # 关于这个部分的代码，具体可以看 test03.py 中的测试
        # self.word_token_range 形如 [[0, 0], [1, 1], [1, 2], [3, 4]]

        self.triplets = []
        self.all_opinion = []  # 所有的 new_opinion_span 存放的地方
        for aspect_span, opinion_span_list, sentiment_id in origin_triplets:
            triplet_dict = {"aspect_span": [], "opinion_span_list": [], 'sentiment': 0}
            a_s = self.word_token_range[aspect_span[0]][0]
            a_e = self.word_token_range[aspect_span[-1]][1]
            triplet_dict["aspect_span"] = [a_s, a_e]
            # 接下来是把 text 中的 aspect 和 opinion 的位置转化成在 token 中的位置
            for opinion_span in opinion_span_list:
                o_s = self.word_token_range[opinion_span[0]][0]
                o_e = self.word_token_range[opinion_span[-1]][1]
                new_opinion_span = [o_s, o_e]
                self.all_opinion.append(new_opinion_span)
                triplet_dict["opinion_span_list"].append(new_opinion_span)
                pass
            triplet_dict['sentiment'] = sentiment_id
            triplet_dict["opinion_span_list"] = sorted(triplet_dict["opinion_span_list"], key=lambda x: x[0])  # 确保 opinion 有序
            self.triplets.append(triplet_dict)
            pass

        pass

    pass


class TrainingInstance:  # 用来放入模型进行训练的实例，包含着 plus_sentence 和一个 mask
    def __init__(self, sentence_instance: SentenceInstance, plus_instance: SentenceInstance, mask_str: str, mask_opinion_str: str):
        self.sentence_instance = sentence_instance
        self.plus_instance = plus_instance
        self.mask_str = mask_str  # 就是 "0010" 类似于这样的字符串
        self.opinion_mask_str = mask_opinion_str  # 是这个 mask_opinion_str 内容
        self.target_opinion = None
        self.target_opinion_list = None
        self.target_aspect = None
        self.target_sentiment = None

        # 找到 target_triplet
        target_aspect_i = self.mask_str.find("1")  # 目标是第几个 aspect
        target_triplet = sentence_instance.triplets[target_aspect_i] if target_aspect_i >= 0 else None
        if target_triplet is not None:
            self.target_aspect = target_triplet["aspect_span"]
            self.target_opinion_list = target_triplet["opinion_span_list"]
            pass
        if self.target_opinion_list is not None:
            target_opinion_i = mask_opinion_str.find("1")
            self.target_opinion = self.target_opinion_list[target_opinion_i] if target_opinion_i >= 0 else None
            self.target_sentiment = target_triplet["sentiment"]
            pass
        pass

    def update_attention_mask(self, mask_tensor: torch.Tensor):  # 这里的输入 tensor 是一个一维 tensor，不需要 squeeze
        sen_pre_len = len(self.plus_instance.sentence_token) + 2
        # 主要就是屏蔽掉 aspect
        for i, char_here in enumerate(self.mask_str):
            if char_here == "0":
                a_start, a_end = self.sentence_instance.triplets[i]["aspect_span"]
                mask_tensor[sen_pre_len + a_start: sen_pre_len + a_end + 1] = 0
                pass
            pass
        # 屏蔽掉 opinion
        if self.target_opinion_list is None:
            return
        for i, char_here in enumerate(self.opinion_mask_str):
            if char_here == "0":
                a_start, a_end = self.target_opinion_list[i]
                mask_tensor[sen_pre_len + a_start: sen_pre_len + a_end + 1] = 0
                pass
            pass
        pass

    def get_extended_label(self, seq_len: int = 100) -> dict:  # seq_len 是总共的长度
        sen_pre_len = len(self.plus_instance.sentence_token) + 2
        # 定义 a_s 和 a_e 和 s 和 is_on
        a_s_i = torch.tensor([sentiment2id["Invalid"]])  # -1
        a_e_i = torch.tensor([sentiment2id["Invalid"]])  # -1
        sentiment_id = torch.tensor([sentiment2id["Invalid"]])  # -1
        has_aspect = torch.tensor([0])
        has_opinion = torch.tensor([0])
        # 定义 aspect_tensor
        a_s_tensor = torch.tensor([0] * (seq_len - sen_pre_len))
        a_e_tensor = torch.tensor([0] * (seq_len - sen_pre_len))
        # 定义 opinion_tensor
        o_s_tensor = torch.tensor([0] * (seq_len - sen_pre_len))
        o_e_tensor = torch.tensor([0] * (seq_len - sen_pre_len))
        # 正常情况：需要更新这些变量
        if self.target_aspect is not None:
            a_start, a_end = self.target_aspect
            # 更新 aspect 的 tensor
            a_s_tensor[a_start] = 1
            a_e_tensor[a_end] = 1
            a_s_i = torch.tensor([a_start])
            a_e_i = torch.tensor([a_end])
            has_aspect = torch.tensor([1])
            pass
        if self.target_opinion is not None:
            # 更新 opinion 的 tensor
            o_start, o_end = self.target_opinion
            o_s_tensor[o_start] = 1
            o_e_tensor[o_end] = 1
            has_opinion = torch.tensor([1])
            pass
            # 更新其他内容
        if self.target_sentiment is not None:
            sentiment_id = torch.tensor([self.target_sentiment])
            pass
        # 返回最终的结果
        return {
            'a_s': a_s_i,
            'a_e': a_e_i,
            'a_s_': a_s_tensor.unsqueeze(0),
            'a_e_': a_e_tensor.unsqueeze(0),
            'has_aspect': has_aspect,
            'has_opinion': has_opinion,
            'o_s': o_s_tensor.unsqueeze(0),
            'o_e': o_e_tensor.unsqueeze(0),
            's': sentiment_id
        }

    pass


class TestInstance:
    def __init__(self, sentence_instance: SentenceInstance, plus_instance: SentenceInstance):
        self.meta_instance = sentence_instance
        self.plus_instance = plus_instance
        pass

    def get_token_as_batch(self, tokenizer: BertTokenizer):
        return tokenizer(
            [self.plus_instance.sentence_text],
            [self.meta_instance.sentence_text],
            padding=True,
            return_tensors='pt'
        )

    pass
