
from typing import Any

import torch
from torch.utils.data import Dataset
from transformers import PreTrainedTokenizerFast
from .wrappers import DataWrapper
from tagging.handshakeing import HandshakingTaggingEncoder, TagMapping

class TPLinkerDataset(Dataset):
    def __init__(self, data:DataWrapper, tokenizer:PreTrainedTokenizerFast, max_seqlen=512) -> None:
        self._data = data
        self._tokenizer = tokenizer
        self._max_seqlen = max_seqlen
        
        tag_mapping = TagMapping()
        self.tag_encoder = HandshakingTaggingEncoder(tag_mapping, len(self._data.id2relation))
        
        super().__init__()
    
    def __getitem__(self, index) -> Any:
        sample = self._data[index]
        sample_in_ids = self._tokenizer.encode_plus(sample["text"], padding='max_length', add_special_tokens=False, truncation=True,
                                                    return_offsets_mapping=True, max_length=self._max_seqlen)
        offsets_mapping = sample_in_ids["offset_mapping"]
        
        builded_ids = {
            "input_ids":sample_in_ids["input_ids"],
            "spo_list":[]
            }

        for rel in sample['spo_list']:
            
            cooked_predicate = {
                "predicate":rel["predicate"],
                "subject_tokenspan":self.charspan2tokenspan(rel["subject_span"], offsets_mapping), 
                "object_tokenspan":self.charspan2tokenspan(rel["object_span"], offsets_mapping)
            }
            builded_ids["spo_list"].append(cooked_predicate)
            
        h2t,h2h,t2t = self.tag_encoder.encode(builded_ids, self._max_seqlen)
        
        return {
            "input_ids":torch.tensor(sample_in_ids["input_ids"]).long(),
            "attention_mask":torch.tensor(sample_in_ids["attention_mask"]).long(),
            "token_type_ids":torch.tensor(sample_in_ids["token_type_ids"]).long(),
            "labels":{
                "h2t":torch.tensor(h2t).long(),
                "h2h":torch.tensor(h2h).long(),
                "t2t":torch.tensor(t2t).long()
            }
        }

        
        
    
    def __len__(self):
        return len(self._data)
    
    @staticmethod
    def charspan2tokenspan(charspan, offsets_mapping):
        char_p,char_q = charspan
        
        token_p = None
        token_q = None

        # 求解p
        for i in range(len(offsets_mapping)-1):
            # 落在第1个区间
            if char_p >= offsets_mapping[i][0] and char_p < offsets_mapping[i+1][0]:
                token_p = i
                break
            # 落在第2个区间
            elif char_p >= offsets_mapping[i+1][0] and char_p < offsets_mapping[i+1][1]:
                token_p = i+1
                break

        # 求解q, 合法的span中p一定小于等于q, 因此从p结束的位置开始即可
        for i in range(token_p, len(offsets_mapping)-1):
            # 落在第2个区间
            if char_q >= offsets_mapping[i][1] and char_q < offsets_mapping[i+1][1]:
                token_q = i + 1
                break
            # 落在第1个区间
            elif char_q >= offsets_mapping[i][0] and char_q < offsets_mapping[i][1]:
                token_q = i
                break

        return [token_p, token_q]