from .base import *
from ..utils import *


class NerData(BaseData):
    def __init__(self, args, tokenizer, collate, trie=None):
        self.trie = trie
        self.method = args.method
        self.collate = collate
        super().__init__(args, tokenizer)


    def train_collate(self, batch):
        res = []
        for sample in batch:
            features, _ = self.encode(sample['text'], sample['entities'])
            res.append(features)
        return self.collate(res)

    def dev_collate(self, batch):
        res = []
        for sample in batch:
            features, offsets = self.encode(sample['text'], sample['entities'])
            res.append(features)
        return self.collate(res)

    def init_label(self):
        self.label2id = dict(zip(self.id2label, range(len(self.id2label))))
        if self.method == 'bio':
            self.n_class = 2 * len(self.id2label) + 1
        elif self.method == 'bios':
            self.n_class = 3 * len(self.id2label) + 1
        elif self.method == ['biaffine', 'span']:
            self.n_class = len(self.id2label) + 1

    @staticmethod
    def read_file(file):
        data = []
        label_list = set()
        for line in open(file, encoding='utf-8'):
            line = json.loads(line)
            for entity in line['entities']:
                label_list.add(entity['entity_type'])
            data.append(line)
        return data, sorted(label_list)

    def encode(self, text, label=None, max_length=512):
        features = self.tokenizer.encode_plus(text, return_offsets_mapping=True, return_token_type_ids=False,
                                              max_length=max_length)
        offset = features.pop('offset_mapping')
        char_len = len(offset)
        mapping = {}
        for idx, (s, e) in enumerate(offset[1:-1]):
            for i in range(s, e):
                mapping[i] = idx + 1
        if self.trie is not None:
            starts = list(range(char_len))
            ends = starts[:]
            lattice = self.trie.get_lexicon(text)
            word_mask = [0] * char_len
            word_ids = word_mask[:]
            for (start, end, word) in lattice:
                lattice = self.trie.get_lexicon(text)
                new_start = mapping.get(start)
                new_end = mapping.get(end)
                if new_start is None or new_end is None:
                    print('vocab {} is ignored.'.format(word))
                    continue
                starts.append(new_start)
                ends.append(new_end)
                word_mask.append(1)
                word_ids.append(self.vocab2id[word])
            char_word_mask = [1] * len(word_mask)
            features.update({'start': starts, 'end': ends, 'char_word_mask': char_word_mask,
                             'word_mask': word_mask, 'word_ids': word_ids})

        if label is not None:
            if self.method == 'biaffine':
                labels = [0] * ((char_len + 1) * char_len // 2)
                label_mapping = {}
                for i in range(char_len):
                    for j in range(i, char_len):
                        label_mapping[(i, j)] = len(label_mapping)
            elif self.method in ['span']:
                start_label = [0] * char_len
                end_label = start_label[:]
            elif self.method in ['bio', 'bios']:
                labels = [0] * char_len

            for item in label:
                label_id = self.label2id[item['entity_type']]
                spans = item.get('spans')
                if spans is None:
                    starts = search(item['entity'], text)
                    if starts == -1:
                        continue
                    spans = ((start, start + len(item['entity']) - 1) for start in starts)
                start, end = spans
                new_start = mapping[start]
                new_end = mapping[end]
                if self.method == 'span':
                    start_label[new_start] = label_id + 1
                    end_label[new_end] = label_id + 1
                elif self.method == 'bio':
                    labels[new_start] = 2 * label_id + 1
                    for i in range(new_start + 1, new_end + 1):
                        labels[i] = 2 * (label_id + 1)
                elif self.method == 'bios':
                    if new_start == new_end:
                        labels[new_start] = 3 * label_id + 3
                    else:
                        labels[new_start] = 3 * label_id + 1
                        for i in range(new_start + 1, new_end + 1):
                            labels[i] = 3 * label_id + 2
                elif self.method == 'biaffine':
                    labels[label_mapping[(new_start, new_end)]] = label_id + 1

            if self.method in ['span']:
                features.update({'start_labels': start_label, 'end_labels': end_label})
            else:
                features.update({'labels': labels})
        return features, offset

    # def get_dataloader(self):
    #     return super().get_dataloader(extra_dev=['label'])
