from unicodedata import numeric
from tensorflow.python import keras
from keras.layers  import *
from keras.utils import to_categorical
from keras.models import Model
import keras.backend as K
from keras.callbacks import Callback
from keras.optimizers import Adam
from keras_bert import load_trained_model_from_checkpoint
from utils import metric, seq_gather
from tqdm import tqdm
import numpy as np
import tensorflow as tf
from torch.utils.tensorboard import SummaryWriter
log_dir = './results/WebNLG'
writer = SummaryWriter(log_dir)
max_entity_nums = 8
max_entity_pair_nums = 36
def make_entity_pairs(x):
    entity_1_pair_list = []
    entity_2_pair_list= []
    entity, entity_nums = x# entity(batch_size, max_ent_nums, hidden) entity_nums(batch_size, entity)
    entity_nums = entity_nums.shape[-2]
    for i in range(entity_nums):
        for j in range(i, entity_nums):
            e1 = entity[:,i,:]
            e2 = entity[:,j,:]
            entity_1_pair_list.append(e1)
            entity_2_pair_list.append(e2)
    return entity_1_pair_list, entity_2_pair_list
def seq_gathers(x):# seq(batch,pair,tokens,768) idx(batch,pair,1)
    seq, idx = x
    idx = K.cast(idx, 'int32')
    batch_idx = K.arange(0, K.shape(seq)[0])
    #a = K.shape(seq)[0]
    pair_idx = K.arange(0,K.shape(idx)[1])
    batch_idx = K.expand_dims(batch_idx, 1)
    pair_idx = K.expand_dims(pair_idx, 1)
    batch_idx = K.expand_dims(batch_idx, 1)
    batch_idx = K.repeat_elements(batch_idx, rep=8, axis=1)
    pair_idx = K.expand_dims(pair_idx, 0)
    a = tf.shape(seq)[0]
    pair_idx = K.tile(pair_idx,(a,1,1))
    idx =tf.concat([batch_idx, pair_idx, idx],axis=-1)
    return tf.gather_nd(seq, idx)

class Bi_linear(Layer):
   def __init__(self, units,
    activation=None,   
    **kwargs):
         super(Bi_linear, self).__init__(**kwargs)
         self.units = units
         self.activation = activation
   def build(self, input_shape):
       self.w = self.add_weight(
         shape=(self.units,input_shape[0][-1],input_shape[1][-1]),
         initializer='random_normal',
         trainable=True,
       )
       self.b = self.add_weight(
         shape=(self.units,),
         initializer='random_normal',
         trainable=True
       )
       self.built = True
       super(Bi_linear, self).build(input_shape)
   def call(self, inputs):
      inputs_1,inputs_2 = inputs
      w1 = tf.expand_dims(self.w,0)
      w2 = tf.tile(w1, (tf.shape(inputs_1)[1],1,1,1))
      w2 = tf.expand_dims(w2, 0)
      w2 = tf.tile(w2,(tf.shape(inputs_1)[0],1,1,1,1))
      x1 = tf.expand_dims(inputs_1, 2)
      x1 = tf.expand_dims(x1, 4)
      x2 = tf.tile(x1, (1,1,self.units,1,tf.shape(inputs_2)[-1]))
      y1 = tf.expand_dims(inputs_2,2)
      y2 = tf.tile(y1,(1,1,self.units,1))
      result = tf.multiply(x2, w2)
      tp = tf.reduce_sum(result, axis=3)
      output = tf.reduce_sum(tf.multiply(tp,y2),3) + self.b
      output = tf.convert_to_tensor(output)
      return output


def E2EModel(bert_config_path, bert_checkpoint_path, LR, num_rels, num_dirs, max_entity_pairs =8):
    bert_model = load_trained_model_from_checkpoint(bert_config_path,bert_checkpoint_path,seq_len=None)
    for l in bert_model.layers:
        l.trainable = True
    tokens_in = Input(shape = (None,))
    segments_in = Input(shape = (None,))
    gold_entity_head_in = Input(shape = (None,))
    gold_entity_tail_in = Input(shape = (None,))
    relation_embdding_in = Input(shape =(num_rels,num_rels))
    gold_relation_in = Input(shape = (None,))
    entity_head_in  = Input(shape=(max_entity_pairs,1))
    entity_tail_in  = Input(shape=(max_entity_pairs,1))
    CLS_in = Input(shape=(1,))
    gold_direction_in = Input(shape=(num_rels, max_entity_pair_nums, 4))
    entity_pair_mask_in = Input(shape=(max_entity_pair_nums,1))
    
    

    tokens, segments, gold_entity_head, gold_entity_tail, relation_embdding, gold_relation = tokens_in, segments_in, gold_entity_head_in, gold_entity_tail_in, relation_embdding_in, gold_relation_in
    entity_head, entity_tail, gold_direction = entity_head_in, entity_tail_in, gold_direction_in
   
   
    mask = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), 'float32'))(tokens)
    token_feature = bert_model([tokens,segments])
    #Bi_LSTM = Bidirectional(LSTM(256,return_sequences=True))
    #token_feature = Bi_LSTM(token_feature)
    #Bi_LSTM.trainable = False

    pred_entity_head = Dense(1, activation='sigmoid')(token_feature)
    pred_entity_tail = Dense(1, activation='sigmoid')(token_feature)
    entity_model = Model([tokens, segments], [pred_entity_head, pred_entity_tail])
    '''for layer in entity_model.layers:
        layer.trainable = False'''
    relation_map = Dense(256, activation='tanh')(relation_embdding)
    relation_map = Dense(768,activation='tanh')(relation_map)
    rel_attention = Attention()([relation_map, token_feature]) 
    pred_relation = Dense(1, activation='sigmoid')(rel_attention)
    relation_model = Model([tokens, segments, relation_embdding], [pred_relation])
    '''for layer in relation_model.layers:
        layer.trainable=False'''
    relation_map = K.expand_dims(relation_map,1)
    relation_map = K.repeat_elements(relation_map, rep=max_entity_pair_nums, axis=1)
    CLS_feature = Lambda(seq_gather)([token_feature, CLS_in])
    CLS_feature = K.expand_dims(CLS_feature, 1)
    CLS_feature = K.repeat_elements(CLS_feature, rep=max_entity_pair_nums, axis=1)
    encode_feature = K.expand_dims(token_feature, 1)
    encode_feature = K.repeat_elements(encode_feature, rep=max_entity_pairs, axis=1)
    entity_head_feature = Lambda(seq_gathers)([encode_feature, entity_head])
    entity_tail_feature = Lambda(seq_gathers)([encode_feature, entity_tail])
    entity_feature = Average()([entity_head_feature,entity_tail_feature])
    entity_1, entity_2 = Lambda(make_entity_pairs)([entity_feature, entity_head])
    entity_1 = tf.stack(entity_1, axis=1)
    entity_2 = tf.stack(entity_2, axis=1)
    entity_map = Dense(768,activation='elu')
    entity_1 = entity_map(entity_1)
    entity_2 = entity_map(entity_2)
    #entity_1 = Dense(768,activation='elu')(entity_1)
    #entity_2 = Dense(768,activation='elu')(entity_2)
    relation_map = Dense(768,activation='elu')(relation_map)
    CLS_feature = Dense(2304,activation="elu")(CLS_feature)
    triple_feature = []
    for j in range(num_rels):
        final = tf.concat([entity_1,relation_map[:,:,j,:],entity_2],axis=-1)
        #final = Add()([entity_1, relation_map[:,:,i,:], entity_2])
        triple_feature.append(final)
    triple_feature = tf.stack(triple_feature,axis=0)
    #triple_feature = gold_relation*triple_feature#完成训练后对模型特征进行掩码

    #pred_direction_list = [Dense(4,activation='softmax',use_bias=False)(entity_pair_mask_in * triple_feature[i,:,:,:])) for i in range(num_rels)]
    pred_direction_list = [Dense(4,activation='softmax',use_bias=False)(entity_pair_mask_in * (triple_feature[i,:,:,:] + Attention(score_mode="dot")([CLS_feature,triple_feature[i,:,:,:]]))) for i in range(num_rels)]
    pred_direction = tf.stack(pred_direction_list,axis=1)
    direction_model = Model([tokens, segments, relation_embdding, entity_head, entity_tail, CLS_in, entity_pair_mask_in, gold_relation_in], [pred_direction_list])
    '''for layer in direction_model.layers:
        #if layer != bert_model:
            layer.trainable=False'''

    joint_model = Model([tokens, segments, gold_entity_head, gold_entity_tail, relation_embdding, gold_relation, entity_head,
    entity_tail,  gold_direction, CLS_in,entity_pair_mask_in], [pred_entity_head, pred_entity_tail, pred_relation, pred_direction_list])
    gold_entity_head = K.expand_dims(gold_entity_head, 2)
    gold_entity_tail = K.expand_dims(gold_entity_tail, 2)
    gold_relation = K.expand_dims(gold_relation, 2)
    entity_head_loss = K.binary_crossentropy(gold_entity_head, pred_entity_head)
    entity_head_loss = K.sum(entity_head_loss*mask) / K.sum(mask)
    entity_tail_loss = K.binary_crossentropy(gold_entity_tail, pred_entity_tail)
    entity_tail_loss = K.sum(entity_tail_loss*mask) / K.sum(mask)
    relation_loss = K.binary_crossentropy(gold_relation, pred_relation)
    relation_loss = K.sum(relation_loss) / num_rels
   
    direction_loss = K.categorical_crossentropy(gold_direction, pred_direction)
    total_loss = entity_head_loss + entity_tail_loss + relation_loss + direction_loss
    joint_model.add_loss(total_loss)
    joint_model.summary()
    return entity_model, relation_model, direction_model, joint_model

class Eval(Callback):
    def __init__(self, entity_model, relation_model, direction_model, tokenizer, id2rel, eval_data, save_weights_path, id2dir, min_delta=1e-4):
        self.min_delta = min_delta
        self.monitor_op = np.greater
        self.entity_model = entity_model
        self.relation_model = relation_model
        self.direction_model = direction_model
        self.tokenizer = tokenizer
        self.id2rel = id2rel
        self.id2dir = id2dir
        self.eval_data = eval_data
        self.save_weights_path = save_weights_path
    
    def on_train_begin(self):
        self.step = 0
        self.wait =0
        self.stopped_epoch = 0
        self.e_best = -np.inf
        self.r_best = -np.inf
        self.triples_best = -np.inf
      

    def on_epoch_end(self, epoch):
        if (epoch+1)%20 == 0:
            e_precision, e_recall, e_f1, r_precision, r_recall, r_f1, triples_precision, triples_recall, triples_f1,triple_correct_num, triple_predict_num, triple_gold_num, dir4nums, fake_nums = metric(self.entity_model, self.relation_model, self.direction_model, self.eval_data, self.id2rel ,self.tokenizer, self.id2dir)
            writer.add_scalar("e_p", e_precision, epoch+1)
            writer.add_scalar("e_r", e_recall,epoch+1)
            writer.add_scalar("e_f1", e_f1, epoch+1)
            writer.add_scalar("r_p", r_precision, epoch+1)
            writer.add_scalar("r_r", r_recall, epoch+1)
            writer.add_scalar("r_f1", r_f1, epoch+1)
            writer.add_scalar("triples_precision", triples_precision, epoch+1)
            writer.add_scalar("triples_recall", triples_recall,epoch+1)
            writer.add_scalar("triples_f1", triples_f1, epoch+1)
            writer.add_scalar("triple_correct_num", triple_correct_num, epoch+1)
            writer.add_scalar("triple_predict_num", triple_predict_num, epoch+1)
            writer.add_scalar("triple_gold_num ", triple_gold_num, epoch+1)
            writer.add_scalar('dir4_nums', dir4nums,epoch+1)
            writer.add_scalar('fake_triple_num',fake_nums,epoch+1)
            if self.monitor_op(e_f1 - self.min_delta, self.e_best) or self.monitor_op(self.min_delta, e_f1):
                self.e_best = e_f1
                #self.model.save_weights(self.save_weights_path)
                
            if self.monitor_op(r_f1 - self.min_delta, self.r_best) or self.monitor_op(self.min_delta, r_f1):
                self.r_best = r_f1
                #self.model.save_weights(self.save_weights_path)
                
            if self.monitor_op(triples_f1 - self.min_delta, self.triples_best) or self.monitor_op(self.min_delta, triples_f1):
                self.triples_best = triples_f1
                self.model.save_weights(self.save_weights_path)
            
            print('e_f1: %.4f, e_precision: %.4f, e_recall: %.4f, e_best f1: %.4f\nr_f1: %.4f, r_precision: %.4f, r_recall: %.4f, r_best f1: %.4f\n' % (e_f1, e_precision, e_recall, self.e_best, r_f1, r_precision, r_recall, self.r_best))
            print('triples_f1: %.4f, triples_precision: %.4f, triples_recall: %.4f, triples_best_f1: %.4f\n'%(triples_f1, triples_precision, triples_recall, self.triples_best))   
    def on_train_end(self):
        writer.close()
        print('train stop!')
