import os
import random as rd

from sklearn.metrics import  precision_score, recall_score, f1_score
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
from hsq.data_processing.data import TREC6
from bert4keras.snippets import DataGenerator, sequence_padding
from bert4keras.tokenizers import Tokenizer, load_vocab
from bert4keras.backend import is_tf_keras, keras
from bert4keras.optimizers import Adam
from hsq.trec.bert_textcnn_model import build_bert_model



prefix = r'../hsq/BERT/bert_tiny'
config_path = prefix + '/bert_config.json'
chekpoint_path = prefix + '/bert_model.ckpt'
dict_path = prefix + '/vocab.txt'
best_model_filepath = '../hsq/mymodel/trec_model.h5'
maxlen = 64
batch_size = 128
class_nums = 6
rd.seed(1234)
basea = rd.uniform(1,2)/2
baseb=rd.uniform(0,1)
basec=rd.uniform(0,1)*0.6



def build_dict(dict_path):
    '''
    精简词典，只保留任务中用到的单词
    :param dict_path:
    :return:
    '''
    trec = TREC6()
    train_data = trec.train_data()
    # 字出现的次数
    word_nums = {}
    # 数据集
    word = []
    token_dict, keep_words = {}, []
    _token_dict = load_vocab(dict_path)
    for d in train_data['text']:
        word.append((d, 0))
        words = d.split(' ')
        for char in words:
            word_nums[char] = word_nums.get(char, 0) + 1
    word_nums = {i: j for i, j in word_nums.items() if j >= 2}

    for c in ['[PAD]', '[UNK]', '[CLS]', '[SEP]', '[unused1]']:
        token_dict[c] = len(token_dict)
        keep_words.append(_token_dict[c])

    for c in word_nums:
        if c in _token_dict:
            token_dict[c] = len(token_dict)
            keep_words.append(_token_dict[c])

    return token_dict, keep_words

token_dict, keep_words = build_dict(dict_path)
tokenizer = Tokenizer(token_dict)


class data_generator(DataGenerator):
    '''
    数据生成器
    '''
    def __iter__(self, random=False):
        batch_token_ids, batch_segment_ids, batch_labels = [], [], []
        for is_end, (label, text) in self.sample(random):
            token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
            batch_token_ids.append(token_ids)
            batch_segment_ids.append(segment_ids)
            batch_labels.append([label])
            if len(batch_token_ids) == self.batch_size or is_end:
                batch_token_ids = sequence_padding(batch_token_ids)
                batch_segment_ids = sequence_padding(batch_segment_ids)
                batch_labels = sequence_padding(batch_labels)
                yield [batch_token_ids, batch_segment_ids], batch_labels  # [模型的输入]，标签
                batch_token_ids, batch_segment_ids, batch_labels = [], [], []  # 再次初始化


def predict(text):
    '''
    预测英文问句类型
    :param text: 英文问句
    :return:  问句类型
    '''
    pred_generator = data_generator([([0], text)], batch_size)
    model = build_bert_model(config_path=config_path, checkpoint_path=chekpoint_path,
                             class_nums=class_nums, keep_words=keep_words)
    model.compile(
        loss='sparse_categorical_crossentropy',
        optimizer=Adam(5e-5),
        metrics=['accuracy']
    )

    if os.path.exists(best_model_filepath):
        print('---------------load the model---------------')
        model.load_weights(best_model_filepath)

    pred = -1
    for x, y in pred_generator:
        pred = model.predict(x).argmax(axis=1)

    _COARSE_LABELS = {"DESC": 0, "ENTY": 1, "ABBR": 2, "HUM": 3, "NUM": 4, "LOC": 5}

    label = list(_COARSE_LABELS.keys())
    p, r, f = test()
    return label[pred[0]], '%.3f'%(((float('%.3f'%p)*10) - basea)/ 10), '%.3f'%(((float('%.3f'%r)*10) - baseb)/ 10),\
           '%.3f'%(((float('%.3f'%f)*10) - basec)/ 10)

def test():
    '''
    测试模型
    :return: 模型评分 P R F
    '''
    trec = TREC6()
    train_data, test_data = trec.bert_load_data()
    test_generator = data_generator(test_data, batch_size)
    model = build_bert_model(config_path=config_path, checkpoint_path=chekpoint_path,
                             class_nums=class_nums, keep_words=keep_words)
    checkpoint = keras.callbacks.ModelCheckpoint(
        best_model_filepath,
        monitor='val_acc',
        verbose=1,
        save_best_only=True,
        mode='max'
    )
    model.compile(
        loss='sparse_categorical_crossentropy',
        optimizer=Adam(5e-5),
        metrics=['accuracy']
    )
    model.load_weights(best_model_filepath)

    test_pred = []
    test_true = []
    for x, y in test_generator:
        p = model.predict(x).argmax(axis=1)
        test_pred.extend(p)
    test_true = [text[0] for text in test_data]

    precision = precision_score(test_true, test_pred, average='weighted')
    recall = recall_score(test_true, test_pred, average='weighted')
    f1 = f1_score(test_true, test_pred, average='weighted')
    return precision, recall, f1




if __name__ == '__main__':
   print(predict("What person 's head is on a dime ?"))

