#! -*- coding:utf-8 -*-
import keras.backend as K
from keras_bert import Tokenizer
import numpy as np
import codecs
from tqdm import tqdm
import json
import unicodedata
import tensorflow as tf
import os, argparse
from data_loader import find_head_idx
BERT_MAX_LEN = 512

class HBTokenizer(Tokenizer):
    def _tokenize(self, text):
        if not self._cased:
            text = unicodedata.normalize('NFD', text)
            text = ''.join([ch for ch in text if unicodedata.category(ch) != 'Mn'])
            text = text.lower()
        spaced = ''
        for ch in text:
            if ord(ch) == 0 or ord(ch) == 0xfffd or self._is_control(ch):
                continue
            else:
                spaced += ch
        tokens = []
        for word in spaced.strip().split():
            tokens += self._word_piece_tokenize(word)
            tokens.append('[unused1]')
        return tokens

def get_tokenizer(vocab_path):
    token_dict = {}
    with codecs.open(vocab_path, 'r', 'utf8') as reader:
        for line in reader:
            token = line.strip()
            token_dict[token] = len(token_dict)
    return HBTokenizer(token_dict, cased=True)

def seq_gather(x):
    seq, idxs = x
    idxs = K.cast(idxs, 'int32')
    batch_idxs = K.arange(0, K.shape(seq)[0])
    batch_idxs = K.expand_dims(batch_idxs, 1)
    idxs = K.concatenate([batch_idxs, idxs], 1)
    return K.tf.gather_nd(seq, idxs)

def extract_items(entity_model, relation_model, direction_model, tokenizer, text_in, id2rel, id2dir, h_bar=0.5, t_bar=0.5, r_bar=0.5):
    tokens = tokenizer.tokenize(text_in)
    token_ids, segment_ids = tokenizer.encode(first=text_in) 
    token_ids, segment_ids = np.array([token_ids]), np.array([segment_ids])
    if len(token_ids[0]) > BERT_MAX_LEN:
        token_ids = token_ids[:,:BERT_MAX_LEN]    
        segment_ids = segment_ids[:,:BERT_MAX_LEN]
    relation_emb =  tf.eye(len(id2rel))
    relation_emb_list =[] 
    relation_emb_list.append(relation_emb)
    relation_emb_list = tf.convert_to_tensor(relation_emb_list)
    token_ids = tf.convert_to_tensor(token_ids)
    relation_emb_list = tf.convert_to_tensor(relation_emb_list)
    segment_ids = tf.convert_to_tensor(segment_ids)
    entity_heads_logits, entity_tails_logits = entity_model.predict([token_ids,segment_ids])
    relation_logits = relation_model.predict([token_ids, segment_ids, relation_emb_list])
    entity_heads, entity_tails = np.where(entity_heads_logits[0,:]>h_bar)[0], np.where(entity_tails_logits[0,:]>t_bar)[0]
    relations = np.where(relation_logits[0]>r_bar)[0]
    entities = []
    relation = []
    triple_list = []
    entity_head4dir = []
    entity_tail4dir = []
    CLS_pos = []
    relation_emb_fordir = []
    fake_triple_list = []
    entity_pair_mask =[]
    relation_in = np.zeros(len(id2rel))
    for rel in relations:
        relation.append(id2rel[rel])
        relation_in[rel]=1

    for entity_head in entity_heads:
        entity_tail = entity_tails[entity_tails >= entity_head]
        if len(entity_tail)>0:
            entity_tail = entity_tail[0]
            entity = tokens[entity_head: entity_tail]
            entities.append((entity, entity_head, entity_tail))

    if len(entities)>0 and len(relations)>0:
        CLS_pos.append([0])
        relation_emb_fordir.append(relation_emb)
        for ent_h in entities:
            entity_head4dir.append([ent_h[1]])
            entity_tail4dir.append([ent_h[2]])
        while len(entity_head4dir)<11:
            entity_head4dir.append([-1])
            entity_tail4dir.append([-1])
        entity_head4dir = tf.convert_to_tensor(entity_head4dir)
        entity_head4dir = K.expand_dims(entity_head4dir,0) 
        entity_tail4dir = tf.convert_to_tensor(entity_tail4dir)
        entity_tail4dir = K.expand_dims(entity_tail4dir,0)
        CLS_pos = tf.convert_to_tensor(CLS_pos)
        CLS_pos = K.expand_dims(CLS_pos,0)
        entity_pair_mask = np.zeros(66)
        for f in range(len(entities)):
            for s in range(f,len(entities)):
                id = f* 11 + s - f*(f+1)/2
                entity_pair_mask[int(id)] = 1    
        entity_pair = tf.convert_to_tensor(entity_pair_mask)
        entity_pair = K.expand_dims(entity_pair, axis=0)
        entity_pair_mask =tf.transpose(entity_pair)
        entity_pair_mask = K.expand_dims(entity_pair_mask,0)
        relation_in = tf.convert_to_tensor(relation_in)
        relation_in = K.expand_dims(relation_in, axis=0)
        direction_logits = direction_model([token_ids, segment_ids, relation_emb_list, entity_head4dir, entity_tail4dir, CLS_pos,entity_pair_mask, relation_in])
        for rel in relations:
            for i in range(int(len(entities))):
                for j in range(i,int(len(entities))):
                    id = i*11 + j - i*(i+1)/2
                    direction = direction_logits[0][rel][0][int(id)][:]
                    direction = np.argmax(direction)
                    if direction == 1:
                        sub = entities[i][0]
                        rel4trip = id2rel[rel]
                        obj = entities[j][0]
                        triple_list.append((sub,rel4trip,obj))
                    elif direction == 2:
                        sub = entities[j][0]
                        rel4trip = id2rel[rel]
                        obj = entities[i][0]
                        triple_list.append((sub,rel4trip,obj))
                    elif direction ==0:
                        sub = entities[i][0]
                        obj = entities[j][0]
                        fake_triple_list.append((sub,obj))
    entity_set = set()
    if entities:
        entity_list=[]
        for i, entity in enumerate(entities):
            ent = entity[0]
            ent = ''.join([i.lstrip("##") for i in ent])#借调字符串左边的##
            ent = ' '.join(ent.split('[unused1]'))#用空格隔开
            entity_list.append((ent))

        for e in entity_list:
            entity_set.add(e)
    triple_list_final = []
    triple_set = set()
    fake_triple_set = set()
    fake_triple_final = []
    dir4_list_final = []
    for i ,triple in enumerate(triple_list):
        sub = triple[0]
        obj = triple[2]
        sub = ''.join([i.lstrip("##") for i in sub])
        sub = ' '.join(sub.split('[unused1]'))
        obj = ''.join([i.lstrip("##") for i in obj])
        obj = ' '.join(obj.split('[unused1]'))
        rel = triple[1]
        triple_list_final.append((sub, rel, obj))
    for s,o in fake_triple_final:
        fake_triple_set.add((s,o)) 
    for s, r, o in triple_list_final:
        triple_set.add((s, r, o))
    return list(entity_set), list(relation), list(triple_set), list(fake_triple_set), list(dir4_list_final)


def partial_match(pred_set, gold_set):
    pred = {(i[:].split(' ')[0] if len(i[:].split(' ')) > 0 else i[:]) for i in pred_set}
    gold = {(i[:].split(' ')[0] if len(i[:].split(' ')) > 0 else i[:]) for i in gold_set}
    return pred, gold

def partial_match_triples(pred_set, gold_set):
    pred = {(i[0].split(' ')[0] if len(i[0].split(' ')) > 0 else i[0], i[1], i[2].split(' ')[0] if len(i[2].split(' ')) > 0 else i[2]) for i in pred_set}
    
    gold = {(i[0].split(' ')[0] if len(i[0].split(' ')) > 0 else i[0], i[1], i[2].split(' ')[0] if len(i[2].split(' ')) > 0 else i[2]) for i in gold_set}
   
    return pred, gold

def metric(entity_model, relation_model, direction_model, eval_data, id2rel, tokenizer, id2dir, exact_match=False, output_path=None):
    if output_path:
        F = open(output_path, 'w')
    rel_correct_num, rel_predict_num, rel_gold_num, ent_correct_num, ent_predict_num, ent_gold_num, triple_correct_num, triple_predict_num, triple_gold_num = 1e-10, 1e-10, 1e-10, 1e-10, 1e-10, 1e-10, 1e-10, 1e-10, 1e-10
    dir4_num =0
    fake_num =0
    
    for line in tqdm(iter(eval_data)):
        pred_entity, pred_relation, pred_triples, pred_fake_triples, dir4_triples = extract_items(entity_model, relation_model, direction_model, tokenizer, line['text'], id2rel, id2dir)
        pred_entity = set(pred_entity)
        pred_relation = set(pred_relation)
        pred_triples= set(pred_triples)
        Gold_entities = set()
        Gold_rels = set()
        Gold_triples = set()
        for value in line['triple_list']:
            Gold_entities.add(value[0])
            Gold_entities.add(value[2])
            Gold_rels.add(value[1])
            Gold_triples.add(value)
           


        Pred_ent_eval, Gold_ent_eval = partial_match(pred_entity, Gold_entities) if not exact_match else (pred_entity, Gold_entities)   
        Pred_triple_eval, Gold_triple_eval = partial_match_triples(pred_triples, Gold_triples) if not exact_match else (pred_triples, Gold_triples) 
        #Pred_triples_eval, Gold_triples_eval = Pred_triples, Gold_triples
        triple_correct_num+=len(Pred_triple_eval&Gold_triple_eval)
        triple_predict_num+=len(Pred_triple_eval)
        triple_gold_num+=len(Gold_triple_eval)
        rel_correct_num+=len(pred_relation&Gold_rels)  
        rel_predict_num+=len(pred_relation)
        rel_gold_num+=len(Gold_rels)
        ent_correct_num+=len(Pred_ent_eval&Gold_ent_eval)
        ent_predict_num+=len(Pred_ent_eval)
        ent_gold_num+=len(Gold_ent_eval)
        dir4_num +=len(dir4_triples)
        fake_num +=len(pred_fake_triples)
      

        if output_path:
            result = json.dumps({
                'text': line['text'],
                'entity_gold': [
                    ent for ent in Gold_ent_eval
                ],
                'entity_pred': [
                    enti for enti in Pred_ent_eval
                ],
                'gold_relation':[
                    rel for rel in Gold_rels
                ],
                'pred_relaiton':[
                    rel for rel in pred_relation
                ],
                'triples_list_gold':[
                    trip for trip in Gold_triple_eval
                ],
                'triples_list_pred':[
                    trip for trip in Pred_triple_eval
                ],
                'fake_triple_pred':[
                    trip for trip in pred_fake_triples
                ],
                'triple_list_lack':[
                    trip for trip in Gold_triple_eval - pred_triples
                ]
            }, ensure_ascii=False, indent=4)
            F.write(result + ',' + '\n')
    if output_path:
        F.close()
    rel_precision = rel_correct_num / rel_predict_num
    rel_recall = rel_correct_num / rel_gold_num
    rel_f1 = 2*rel_precision*rel_recall / (rel_precision + rel_recall)
    ent_precision = ent_correct_num / ent_predict_num
    ent_recall = ent_correct_num / ent_gold_num
    ent_f1 = 2 * ent_precision * ent_recall / (ent_precision + ent_recall)
    tirple_precision = triple_correct_num / triple_predict_num
    triple_recall = triple_correct_num / triple_gold_num
    triple_f1 = 2*tirple_precision*triple_recall / (tirple_precision + triple_recall)
    
    print(f'correct_num:{triple_correct_num}\npredict_num:{triple_predict_num}\ngold_num:{triple_gold_num}')
    return  ent_precision, ent_recall, ent_f1, rel_precision, rel_recall, rel_f1 ,tirple_precision, triple_recall, triple_f1, triple_correct_num, triple_predict_num,triple_gold_num, dir4_num, fake_num