#! -*- coding:utf-8 -*-
import math
import keras.backend
import numpy as np
import re, os, json
from random import choice
import tensorflow as tf
from keras.utils import  to_categorical
from keras.backend import expand_dims
import keras.backend as K
BERT_MAX_LEN = 512
RANDOM_SEED = 2022

def find_head_idx(source, target):
    target_len = len(target)
    for i in range(len(source)):
        if source[i: i + target_len] == target:
            return i
    return -1

def to_tuple(sent):
    triple_list = []
    for triple in sent['triple_list']:
        triple_list.append(tuple(triple))
    sent['triple_list'] = triple_list

def seq_padding(batch, padding=0):
    length_batch = [len(seq) for seq in batch]
    max_length = max(length_batch)
    return np.array([
        np.concatenate([seq, [padding] * (max_length - len(seq))]) if len(seq) < max_length else seq for seq in batch
    ])

def load_data(train_path, dev_path, test_path, rel_dict_path, dir_dict_path):
    train_data = json.load(open(train_path))
    dev_data = json.load(open(dev_path))
    test_data = json.load(open(test_path))
    id2rel, rel2id = json.load(open(rel_dict_path))
    id2dir, dir2id = json.load(open(dir_dict_path))
    id2dir, dir2id = json.load(open(dir_dict_path))
    id2dir = {int(i): j for i, j in id2dir.items()}
    id2rel = {int(i): j for i, j in id2rel.items()}
    num_rels = len(id2rel)
    num_dirs = len(id2dir)

    random_order = list(range(len(train_data)))
    np.random.seed(RANDOM_SEED)
    np.random.shuffle(random_order)
    train_data = [train_data[i] for i in random_order]

    for sent in train_data: 
        to_tuple(sent)
    for sent in dev_data:  
        to_tuple(sent)
    for sent in test_data: 
        to_tuple(sent)

    print("train_data len:", len(train_data))
    print("dev_data len:", len(dev_data))
    print("test_data len:", len(test_data))

    return train_data, dev_data, test_data, id2rel, rel2id, num_rels, id2dir, dir2id, num_dirs


class data_generator:
    def __init__(self, data, tokenizer, rel2id, id2rel, num_rels, dir2id, id2dir, num_dirs, maxlen, batch_size=32):
        self.data = data
        self.batch_size = batch_size
        self.tokenizer = tokenizer
        self.maxlen = maxlen
        self.rel2id = rel2id
        self.id2rel = id2rel
        self.num_rels = num_rels
        self.steps = len(self.data) // self.batch_size
        self.dir2id = dir2id
        self.id2dir = id2dir
        self.num_dirs = num_dirs
        if len(self.data) % self.batch_size != 0:
            self.steps += 1
    def __len__(self):
        return self.steps
    def __iter__(self):
        while True:
            idxs = list(range(len(self.data)))
            np.random.seed(RANDOM_SEED)
            np.random.shuffle(idxs)
            tokens_batch, segments_batch, entity_heads_batch, entity_tails_batch, relation_emb_batch, relation_batch, entity_head_idx_batch, entity_tail_idx_batch, direction_batch, CLS_batch= [], [], [], [], [], [], [], [], [], []
            entity_pair_mask =[]
            for idx in idxs:
                line = self.data[idx]
                text = ' '.join(line['text'].split()[:self.maxlen])
                tokens = self.tokenizer.tokenize(text)
                if len(tokens) > BERT_MAX_LEN:
                    tokens = tokens[:BERT_MAX_LEN]
                text_len = len(tokens)
                all_entity = []
                all_relation = []
                train_entity = []
                seq_ent_head = []
                seq_ent_tail = []
                entity_tokend = []
                id4dir=[]
                train_entity = list(line['entity'])
                for ent in train_entity:
                    ent_tokend = self.tokenizer.tokenize(ent)[1:-1]
                    ent_head_id = find_head_idx(tokens, ent_tokend)
                    ent_tail_id = ent_head_id + len(ent_tokend) -1
                    seq_ent_head.append([ent_head_id])
                    seq_ent_tail.append([ent_tail_id])
                    entity_tokend.append(ent_tokend)
                while(len(seq_ent_head)<11):
                    seq_ent_head.append([-1])
                    seq_ent_tail.append([-1])
                    entity_tokend.append('none')
                    train_entity.append('none')
           

                entity_head_idx_batch.append(seq_ent_head)
                entity_tail_idx_batch.append(seq_ent_tail)
                direction = np.zeros((len(self.id2rel),66))
                entity_pair = np.zeros(66)
                for f in range(len(line['entity'])):
                    for s in range(f,len(line['entity'])):
                        id = f* 11 + s - f*(f+1)/2
                        entity_pair[int(id)] = 1
                        id4dir.append(id)
                entity_pair = tf.convert_to_tensor(entity_pair)
                entity_pair = K.expand_dims(entity_pair, axis=0)
                entity_pair =tf.transpose(entity_pair)
                entity_pair_mask.append(entity_pair)
                for dir_id in range(66):
                    for i in range(len(self.id2rel)):
                        if dir_id not in id4dir:
                            direction[i][dir_id]=3

                for i in range(len(self.id2rel)):
                    choice_rel = self.id2rel[i]
                    for j in range(len(line['entity'])):
                        for k in range(j,len(line['entity'])):
                            e_1 = train_entity[j][:]
                            e_2 = train_entity[k][:]
                            e_1_tokend = entity_tokend[j][:]
                            e_2_tokend = entity_tokend[k][:]
                            e_1_idx = find_head_idx(tokens, e_1_tokend)
                            e_2_idx = find_head_idx(tokens, e_2_tokend)
                            id = j* len(seq_ent_head) + k - j*(j+1)/2
                            if (e_1,choice_rel,e_2) in line['triple_list']:
                                    
                                if e_1_idx <= e_2_idx:
                                        
                                    direction[i][int(id)]=1
                                else:
                                        
                                    direction[i][int(id)]=2
                            elif (e_2,choice_rel,e_1) in line['triple_list']:
                               
                                if e_2_idx <= e_1_idx:
                                        
                                    direction[i][int(id)]=1
                                else:
                                        
                                    direction[i][int(id)]=2
                direction_batch.append(to_categorical(direction,num_classes=4))
                for entity in entity_tokend:
                    entity_head_idx = find_head_idx(tokens,entity)
                    if entity_head_idx and entity_head_idx != -1:
                        ent = (entity_head_idx, entity_head_idx + len(entity) - 1)
                        all_entity.append(ent)
                for rel in line['relation']:
                    all_relation.append(rel)    
                
                token_ids, segment_ids = self.tokenizer.encode(first=text)
                if len(token_ids) > text_len:
                    token_ids = token_ids[:text_len]
                    segment_ids = segment_ids[:text_len]
                tokens_batch.append(token_ids)   
                segments_batch.append(segment_ids)
                entity_heads, entity_tails, relation = np.zeros(text_len), np.zeros(text_len), np.zeros(self.num_rels)
                for all_ent in all_entity:
                    entity_heads[all_ent[0]] = 1
                    entity_tails[all_ent[1]] = 1
                for allrel in all_relation:
                    relation[self.rel2id[allrel]] = 1

                entity_heads_batch.append(entity_heads)
                entity_tails_batch.append(entity_tails)
                relation_batch.append(relation)
                relation_emb = tf.eye(self.num_rels)
                relation_emb_batch.append(relation_emb)
                CLS_batch.append([0])

                if len(tokens_batch) == self.batch_size or idx == idxs[-1] :
                    tokens_batch = tf.convert_to_tensor(seq_padding(tokens_batch))
                    segments_batch = tf.convert_to_tensor(seq_padding(segments_batch))
                    entity_heads_batch = tf.convert_to_tensor(seq_padding(entity_heads_batch))
                    entity_tails_batch = tf.convert_to_tensor(seq_padding(entity_tails_batch))
                    relation_emb_batch = tf.convert_to_tensor(relation_emb_batch)
                    relation_batch = tf.convert_to_tensor(relation_batch)
                    entity_head_idx_batch = tf.convert_to_tensor(entity_head_idx_batch)
                    entity_tail_idx_batch = tf.convert_to_tensor(entity_tail_idx_batch)
                    direction_batch = tf.stack(direction_batch,axis=0)
                    CLS_batch = tf.convert_to_tensor(CLS_batch)
                    entity_pair_mask =tf.convert_to_tensor(entity_pair_mask)
                    
                    yield [tokens_batch, segments_batch, entity_heads_batch, entity_tails_batch, relation_emb_batch, relation_batch, entity_head_idx_batch, entity_tail_idx_batch, direction_batch, CLS_batch, entity_pair_mask], None
                    tokens_batch, segments_batch, entity_heads_batch, entity_tails_batch, relation_emb_batch, relation_batch, entity_head_idx_batch, entity_tail_idx_batch, direction_batch, CLS_batch, entity_pair_mask = [], [], [], [], [], [], [], [], [], [],[]
                