import torch
import os
from cognlp.utils.vocabulary import Vocabulary
from cognlp.utils.util import load_json, save_json
from .processor import Processor
from transformers import BertTokenizer
from tqdm import tqdm
import math
import numpy as np


class REXProcessor(Processor):
    def __init__(self, label_list=None, path=None):
        super().__init__(label_list, path)
        self.tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
        self.max_length = 256
        self.blank_padding = True
        self.mask_entity = True
        self.label_count = {}
        self.used_label_count = {}
        self.max_count = 0
        self.unk_count = 0
        for label in self.vocabulary:
            self.label_count[label[0]] = 0
            self.used_label_count[label[0]] = 0

    def to_device(self, path, device):
        data = load_json(path)
        data['indexed_tokens'] = torch.tensor(data['indexed_tokens'], dtype=torch.long, device=device)
        data['pos1'] = torch.tensor(data['pos1'], dtype=torch.long, device=device)
        data['pos2'] = torch.tensor(data['pos2'], dtype=torch.long, device=device)
        data['att_mask'] = torch.tensor(data['att_mask'], dtype=torch.long, device=device)
        data['label_ids'] = torch.tensor(data['label_ids'], dtype=torch.long, device=device)
        return data

    def process_raw_data(self, dataset, path):
        indexed_tokens = []
        pos1s = []
        pos2s = []
        att_masks = []
        label_ids = []
        for item in tqdm(dataset, desc='Processing Data'):
            indexed_token, pos1, pos2, att_mask = self.tokenize(item)
            indexed_tokens.append(indexed_token)
            pos1s.append(pos1)
            pos2s.append(pos2)
            att_masks.append(att_mask)
            label_ids.append(self.vocabulary.to_index(item['label']))
        all_data = {'indexed_tokens': indexed_tokens, 'pos1': pos1s,
                    'pos2': pos2s, 'att_mask': att_masks, 'label_ids': label_ids}
        save_json(all_data, path)
        return all_data

    def process(self, dataset, path):
        indexed_tokens = []
        pos1s = []
        pos2s = []
        att_masks = []
        label_ids = []
        for data in tqdm(dataset, desc='Processing Data'):
            text = data['text']
            sentences_boundaries = data['sentences_boundaries']
            entities = data['entities']
            triples = data['triples']
            added = set()
            for triple in triples:
                item = {'text': '',
                        'h': {'pos': [], 'word': ''},
                        't': {'pos': [], 'word': ''}}
                sentence_id = triple['sentence_id']
                predicate = triple['predicate']
                h = triple['subject']
                t = triple['object']
                if not h['boundaries'] or not t['boundaries']:
                    continue
                item['text'] = text[sentences_boundaries[sentence_id][0]:sentences_boundaries[sentence_id][1]]
                item['h']['pos'] = h['boundaries']
                item['h']['word'] = h['surfaceform']
                item['t']['pos'] = t['boundaries']
                item['t']['word'] = t['surfaceform']
                indexed_token, pos1, pos2, att_mask = self.tokenize(item)
                indexed_tokens.append(indexed_token)
                added.add(str([h['boundaries'], t['boundaries']]))
                pos1s.append(pos1)
                pos2s.append(pos2)
                att_masks.append(att_mask)
                label_ids.append(self.vocabulary.to_index(predicate['uri']))
                self.label_count[predicate['uri']] += 1
                self.max_count = self.max_count if self.max_count >= self.label_count[predicate['uri']]\
                    else self.label_count[predicate['uri']]
            for entity1 in entities:
                if self.unk_count >= self.max_count:
                    break
                for entity2 in entities:
                    if entity1['boundaries'] == entity2['boundaries']:
                        continue
                    else:
                        if str([entity1['boundaries'], entity2['boundaries']]) in added:
                            break
                        if self.unk_count >= self.max_count:
                            break
                        else:
                            for sentences_boundary in sentences_boundaries:
                                if sentences_boundary[0] <= entity1['boundaries'][0] and sentences_boundary[0] <= entity2['boundaries'][0] and sentences_boundary[1] >= entity1['boundaries'][1] and sentences_boundary[1] >= entity2['boundaries'][1]:
                                    item = {'text': '', 'h': {'pos': [], 'word': ''}, 't': {'pos': [], 'word': ''}}
                                    item['text'] = text[sentences_boundary[0]:sentences_boundary[1]]
                                    item['h']['pos'] = entity1['boundaries']
                                    item['h']['word'] = entity1['surfaceform']
                                    item['t']['pos'] = entity2['boundaries']
                                    item['t']['word'] = entity2['surfaceform']
                                    indexed_token, pos1, pos2, att_mask = self.tokenize(item)
                                    indexed_tokens.append(indexed_token)
                                    pos1s.append(pos1)
                                    pos2s.append(pos2)
                                    att_masks.append(att_mask)
                                    label_ids.append(self.vocabulary.unknown_idx)
                                    self.unk_count += 1
        # all_cnt = len(indexed_tokens)
        # label_cnt = len(self.label_count)
        # ave_cnt = all_cnt // label_cnt
        label_count = []
        for val in self.label_count.values():
            label_count.append(val)
        mid_cnt = np.median(label_count)
        print(mid_cnt)
        used_indexed_tokens = []
        used_pos1s = []
        used_pos2s = []
        used_att_masks = []
        used_label_ids = []
        for _, (indexed_token, pos1, pos2, att_mask, label_id) in tqdm(enumerate(zip(indexed_tokens, pos1s, pos2s, att_masks, label_ids)), desc="Sampling Data"):
            if self.used_label_count[self.vocabulary.to_word(label_id)] < mid_cnt + int(math.sqrt(abs(self.label_count[self.vocabulary.to_word(label_id)])) + 1):
                used_indexed_tokens.append(indexed_token)
                used_pos1s.append(pos1)
                used_pos2s.append(pos2)
                used_att_masks.append(att_mask)
                used_label_ids.append(label_id)
                self.used_label_count[self.vocabulary.to_word(label_id)] += 1
        all_data = {'indexed_tokens': used_indexed_tokens, 'pos1': used_pos1s,
                    'pos2': used_pos2s, 'att_mask': used_att_masks, 'label_ids': used_label_ids}
        save_json(all_data, path)
        return all_data

    def tokenize(self, item):
        """
        Args:
            item: data instance containing 'text' / 'token', 'h' and 't'
            item = {"text":"Tom love Jerry", "h":{"pos":[0, 3]}, "t":{"pos":[9, 14]}}
        Return:
            Name of the relation of the sentence
        """
        if 'text' in item:
            sentence = item['text']
            is_token = False
        else:
            sentence = item['token']
            is_token = True
        pos_head = item['h']['pos']
        pos_tail = item['t']['pos']
        if not is_token:
            pos_min = pos_head
            pos_max = pos_tail
            if pos_head[0] > pos_tail[0]:
                pos_min = pos_tail
                pos_max = pos_head
                rev = True
            else:
                rev = False
            sent0 = self.tokenizer.tokenize(sentence[:pos_min[0]])
            ent0 = self.tokenizer.tokenize(sentence[pos_min[0]:pos_min[1]])
            sent1 = self.tokenizer.tokenize(sentence[pos_min[1]:pos_max[0]])
            ent1 = self.tokenizer.tokenize(sentence[pos_max[0]:pos_max[1]])
            sent2 = self.tokenizer.tokenize(sentence[pos_max[1]:])
            if self.mask_entity:
                ent0 = ['[unused4]']
                ent1 = ['[unused5]']
                if rev:
                    ent0 = ['[unused5]']
                    ent1 = ['[unused4]']
            pos_head = [len(sent0), len(sent0) + len(ent0)]
            pos_tail = [len(sent0) + len(ent0) + len(sent1), len(sent0) +
                len(ent0) + len(sent1) + len(ent1)]
            if rev:
                pos_tail = [len(sent0), len(sent0) + len(ent0)]
                pos_head = [len(sent0) + len(ent0) + len(sent1), len(sent0) +
                    len(ent0) + len(sent1) + len(ent1)]
            tokens = sent0 + ent0 + sent1 + ent1 + sent2
        else:
            tokens = sentence
        re_tokens = ['[CLS]']
        cur_pos = 0
        pos1 = 0
        pos2 = 0
        for token in tokens:
            token = token.lower()
            if cur_pos == pos_head[0]:
                pos1 = len(re_tokens)
                re_tokens.append('[unused0]')
            if cur_pos == pos_tail[0]:
                pos2 = len(re_tokens)
                re_tokens.append('[unused1]')
            if is_token:
                re_tokens += self.tokenizer.tokenize(token)
            else:
                re_tokens.append(token)
            if cur_pos == pos_head[1] - 1:
                re_tokens.append('[unused2]')
            if cur_pos == pos_tail[1] - 1:
                re_tokens.append('[unused3]')
            cur_pos += 1
        re_tokens.append('[SEP]')
        pos1 = min(self.max_length - 1, pos1)
        pos2 = min(self.max_length - 1, pos2)
        indexed_tokens = self.tokenizer.convert_tokens_to_ids(re_tokens)
        avai_len = len(indexed_tokens)
        # pos1 = torch.tensor([[pos1]]).long()
        # pos2 = torch.tensor([[pos2]]).long()
        if self.blank_padding:
            while len(indexed_tokens) < self.max_length:
                indexed_tokens.append(0)
            indexed_tokens = indexed_tokens[:self.max_length]
        att_mask = [0] * self.max_length
        avai_len = min(avai_len, self.max_length)
        att_mask[:avai_len] = [1] * avai_len
        return indexed_tokens, [pos1], [pos2], att_mask

    def get_raw_data(self, dataset, path):
        raw_data = []
        for data in tqdm(dataset, desc='Processing Data'):
            text = data['text']
            sentences_boundaries = data['sentences_boundaries']
            entities = data['entities']
            triples = data['triples']
            added = set()
            for triple in triples:
                item = {'text': '', 'h': {'pos': [], 'word': ''}, 't': {'pos': [], 'word': ''}, 'label': ''}
                sentence_id = triple['sentence_id']
                predicate = triple['predicate']
                h = triple['subject']
                t = triple['object']
                if not h['boundaries'] or not t['boundaries']:
                    continue
                item['text'] = text[sentences_boundaries[sentence_id][0]:sentences_boundaries[sentence_id][1]]
                item['h']['pos'] = h['boundaries']
                item['h']['word'] = h['surfaceform']
                item['t']['pos'] = t['boundaries']
                item['t']['word'] = t['surfaceform']
                item['label'] = predicate['uri']
                raw_data.append(item)
                added.add(str([h['boundaries'], t['boundaries']]))
                self.label_count[predicate['uri']] += 1
                self.max_count = self.max_count if self.max_count >= self.label_count[predicate['uri']] \
                    else self.label_count[predicate['uri']]
            for entity1 in entities:
                if self.unk_count >= self.max_count:
                    break
                for entity2 in entities:
                    if self.unk_count >= self.max_count:
                        break
                    if entity1['boundaries'] == entity2['boundaries']:
                        continue
                    else:
                        if str([entity1['boundaries'], entity2['boundaries']]) in added:
                            break
                        else:
                            for sentences_boundary in sentences_boundaries:
                                if sentences_boundary[0] <= entity1['boundaries'][0] and sentences_boundary[0] <= entity2['boundaries'][0] and sentences_boundary[1] >= entity1['boundaries'][1] and sentences_boundary[1] >= entity2['boundaries'][1]:
                                    item = {'text': '', 'h': {'pos': [], 'word': ''}, 't': {'pos': [], 'word': ''}, 'label': ''}
                                    item['text'] = text[sentences_boundary[0]:sentences_boundary[1]]
                                    item['h']['pos'] = entity1['boundaries']
                                    item['h']['word'] = entity1['surfaceform']
                                    item['t']['pos'] = entity2['boundaries']
                                    item['t']['word'] = entity2['surfaceform']
                                    item['label'] = '<unk>'
                                    raw_data.append(item)
        save_json(raw_data, path)
        return raw_data


def process(tokens, pos_head, pos_tail, tokenizer, max_length, blank_padding):
    re_tokens = ['[CLS]']
    cur_pos = 0
    pos1 = 0
    pos2 = 0
    for token in tokens:
        token = token.lower()
        if cur_pos == pos_head[0]:
            pos1 = len(re_tokens)
            re_tokens.append('[unused0]')
        if cur_pos == pos_tail[0]:
            pos2 = len(re_tokens)
            re_tokens.append('[unused1]')
            re_tokens += tokenizer.tokenize(token)
        else:
            re_tokens.append(token)
        if cur_pos == pos_head[1] - 1:
            re_tokens.append('[unused2]')
        if cur_pos == pos_tail[1] - 1:
            re_tokens.append('[unused3]')
        cur_pos += 1
    re_tokens.append('[SEP]')
    pos1 = min(max_length - 1, pos1)
    pos2 = min(max_length - 1, pos2)
    indexed_tokens = tokenizer.convert_tokens_to_ids(re_tokens)
    avai_len = len(indexed_tokens)
    # pos1 = torch.tensor([[pos1]]).long()
    # pos2 = torch.tensor([[pos2]]).long()
    if blank_padding:
        while len(indexed_tokens) < max_length:
            indexed_tokens.append(0)
        indexed_tokens = indexed_tokens[:max_length]
    att_mask = [0] * max_length
    avai_len = min(avai_len, max_length)
    att_mask[:avai_len] = [1] * avai_len
    return indexed_tokens, [pos1], [pos2], att_mask


def data_sample(data, path):
    result = []
    dic = {}
    for r in data:
        if r['label'] in dic:
            dic[r['label']] += 1
        else:
            dic[r['label']] = 1

    dic = sorted(dic.items(), key=lambda d: d[1], reverse=True)
    count = {}
    for d in dic[0:300]:
        if d[0] not in '<unk>':
            count[d[0]] = int(d[1] ** 0.4) + 300 if d[1] > int(d[1] ** 0.4) + 300 else d[1]
    count['<unk>'] = 1000
    for r in data:
        if r['label'] in count and count[r['label']] > 0:
            count[r['label']] -= 1
            result.append(r)
    save_json(result, path)
    return result
