import torch
from torch.utils.data.dataset import Dataset

class CRFDataset(Dataset):
    def __init__(self, tokenizer, data, max_length) -> None:
        self._data = data
        self._tokenizer = tokenizer
        self._max_length = max_length
        super().__init__()
    
    def __len__(self):
        return len(self._data)
    
    def __getitem__(self, index) -> dict:
        sample = self._data[index]
        sample = self.truncate(sample, self._max_length)

        tokens = self._tokenizer.encode_plus(sample["text"], add_special_tokens=False, return_offsets_mapping=True, 
                                             return_attention_mask=False, return_token_type_ids=False)
        input_ids = tokens["input_ids"]
        offset_mapping = tokens["offset_mapping"]

        sequence_length = len(input_ids)

        label = torch.zeros((sequence_length,), dtype=torch.long).fill_(self._data.type2id["<O>"]) # BIO标注法, B对实体类型负责, 默认为O

        for ent_type, ent_span in sample["label"]:

            tok_span = self.charspan2tokenspan(ent_span, offset_mapping)
            label[tok_span[0]] = ent_type  # 将开始位置设置为`<entity type>`

            if tok_span[1] > tok_span[0]:
                label[tok_span[0]+1:tok_span[1]+1] = self._data.type2id["<I>"]  # 设置中间位置的`I`
        
        return {"input_ids":input_ids, "label":label, "gold":sample["label"], "length":sequence_length}
    
    @staticmethod
    def truncate(sample, max_length=512):
        truncated_text = sample["text"] if len(sample["text"]) < max_length else sample["text"][:max_length]
        truncated_entities = []

        for ent_type, ent_span in sample["label"]:

            if ent_span[0] >= max_length:  # 如果span的起始位置就比限制长度大，没必要判断了
                continue
            elif ent_span[1] >= max_length:
                new_span = (ent_span[0], max_length-1)
                new_ent = (ent_type, new_span)
                truncated_entities.append(new_ent)
                continue
            else:
                truncated_entities.append((ent_type, (ent_span[0], ent_span[1])))

        truncated_sample = {"text":truncated_text, "label":truncated_entities}
        
        return truncated_sample

    @staticmethod
    def charspan2tokenspan(charspan, offsets_mapping):
        char_p, char_q = charspan

        token_p = None
        token_q = None

        # 求解p
        for i in range(len(offsets_mapping)-1):
            # 落在第1个区间
            if char_p >= offsets_mapping[i][0] and char_p < offsets_mapping[i+1][0]:
                token_p = i
                break
            # 落在第2个区间
            elif char_p >= offsets_mapping[i+1][0] and char_p < offsets_mapping[i+1][1]:
                token_p = i+1
                break

        # 求解q, 合法的span中p一定小于等于q, 因此从p结束的位置开始即可
        for i in range(token_p-1, len(offsets_mapping)-1):  # token_p-1让i对齐上次结束的位置开始，否则当char_p == char_q时会找不到token_q
            # 落在第2个区间
            if char_q >= offsets_mapping[i][1] and char_q < offsets_mapping[i+1][1]:
                token_q = i + 1
                break
            # 落在第1个区间
            elif char_q >= offsets_mapping[i][0] and char_q < offsets_mapping[i][1]:
                token_q = i
                break

        return (token_p, token_q)
    
    @staticmethod
    def collect_function(batch):
        batch_size = len(batch)
        max_len = max([data["length"] for data in batch])
        
        input_ids = torch.zeros((batch_size, max_len), dtype=torch.long)
        attention_mask = torch.zeros((batch_size, max_len), dtype=torch.long)
        token_type_ids = torch.zeros((batch_size, max_len), dtype=torch.long)
        
        golds = []
        labels = torch.zeros((batch_size, max_len), dtype=torch.long)
        
        for bth, data in enumerate(batch):
            seq_len = data["length"]
            input_ids[bth, :seq_len] = torch.tensor(data["input_ids"], dtype=torch.long)
            attention_mask[bth, :seq_len] = 1
            golds.append(data["gold"])
            labels[bth,:seq_len] = data["label"]
        
        return {"input_ids":input_ids, "attention_mask":attention_mask, "token_type_ids":token_type_ids, "labels":labels, "golds":golds}