# -*- coding: utf-8 -*-
# @Time    : 2019/6/19 10:03
# @Author  : DrMa

from get_batch_4_our_model import *
from Our_model import *
from Our_model_dir.config import Our_model_config_predict_CJO, Our_model_config_CJO
from utils import *

def train_save_model(epoch, max_to_keep=10, checkpoint_path='./checkpoint', SUMMARY_DIR='./log'):
    '''
    :param checkpoint_path:
    :param SUMMARY_DIR:
    :param epoch:
    :param dataset: CAIL或者CJO
    :return:
    '''
    word2id,word_embeddings=get_word2id_and_embedding('../words.vec')

    our_model_config = Our_model_config_CJO()
    xs_fact_train, xs_law_train, ys_lbid_train,_, _, _, _, _, _=load_data_CJO('../data_CJO/new_data/train',
                                                                              '../data_CJO/new_data/test',
                                                                              '../data_CJO/new_data/valid',
                          word2id = word2id,max_doc_len=our_model_config.len_doc,max_law_len=our_model_config.len_law,
                          sos_label_id=our_model_config.sos_label_id,eos_label_id=our_model_config.eos_label_id)

    our_model = Our_model_wo_law_CJO(word_embeddings, our_model_config, Train_or_predict='Train')

    config_tf = tf.ConfigProto()
    config_tf.gpu_options.allow_growth = True
    sess = tf.Session(config=config_tf)
    sess.run(tf.global_variables_initializer())

    merged = tf.summary.merge_all()  # 把所有model中生成的summary.scalar对象merge到一起，目的是下面只sess.run一个merge就可以
    writer = tf.summary.FileWriter(SUMMARY_DIR+'_wo_law_CJO', sess.graph)  # 生成writer对象，类似saver的作用，写入到log文件中

    saver = tf.train.Saver(max_to_keep=max_to_keep)
    batches = batch_iter_CJO(xs_fact_train, xs_law_train, ys_lbid_train,
                             our_model_config.batch_size, epoch,
                             eos_label_id=our_model_config.eos_label_id,
                             sentence_num_of_doc=our_model_config.num_sentence_fact,
                             sentence_num_of_law=our_model_config.num_sentence_law,
                             shuffle=True)
    index = 0
    for batch, label_len, num_sentence_fact, num_sentence_law in batches:
        x_batch_fact, _, y_batch=list(zip(*batch))

        x_batch_fact=np.asarray(x_batch_fact, dtype=np.int32)

        y_batch=np.asarray(y_batch, dtype=np.int32)

        y_scr = y_batch[:, :-1]  # y_source
        y_tgt = y_batch[:, 1:]  # y_target

        feed_dict = {our_model.input_fact: x_batch_fact,
                     our_model.txt_len_att: num_sentence_fact,
                     our_model.batch_label_src: y_scr,
                     our_model.batch_label_tgt: y_tgt,
                     our_model.label_len: label_len}
        summary, _ = sess.run([merged, our_model.train_op], feed_dict=feed_dict)
        writer.add_summary(summary, index)
        index += 1
        if index % 100 == 0:
            saver.save(sess, checkpoint_path+"_wo_law_CJO/checkpoint", index)
            cost_per_sample = sess.run(our_model.cost_per_sample, feed_dict=feed_dict)
            print(cost_per_sample)
            print(index)

def predict_and_save(start_id, end_id, step,
                     test_or_valid,
                     micro_or_macro,
                     checkpoint_path='./checkpoint',
                     result_path='./result/result',):
    word2id, word_embeddings = get_word2id_and_embedding('../data_CAIL/words.vec')

    our_model_config = Our_model_config_predict_CJO()  # 预测配置
    _, _, _, \
    xs_fact_test, xs_law_test, ys_lbid_test, \
    xs_fact_valid, xs_law_valid, ys_lbid_valid = load_data_CJO('../data_CJO/new_data/train',
                                                               '../data_CJO/new_data/test',
                                                               '../data_CJO/new_data/valid',
                                                               word2id=word2id,
                                                               max_doc_len=our_model_config.len_doc,
                                                               max_law_len=our_model_config.len_law,
                                                               sos_label_id=our_model_config.sos_label_id,
                                                               eos_label_id=our_model_config.eos_label_id)

    our_model = Our_model_wo_law_CJO(word_embeddings, our_model_config, Train_or_predict='predict')

    config_tf = tf.ConfigProto()
    config_tf.gpu_options.allow_growth = True
    sess = tf.Session(config=config_tf)
    saver = tf.train.Saver()
    # 遍历所有的checkpoint
    all_num_for_checkpoint = list(range(start_id, end_id + step, step))
    checkpoint_path_list = [checkpoint_path+'_wo_law_CJO/checkpoint-' + str(x) for x in all_num_for_checkpoint]

    f = open(result_path+'_wo_law_CJO.json', 'w+', encoding='utf-8')
    all_result = {}  # key是checkpoint , value是每个checkpoint的结果.

    for checkpoint in checkpoint_path_list:
        saver.restore(sess, checkpoint)

        result_list = []  # shape=[n_samples,n_labels]
        label_list = []  # shape=[n_samples,n_labels]
        test_data_iter = None
        num_sample = None
        if test_or_valid == 'test':
            test_data_iter = batch_iter_CJO(xs_fact_test, xs_law_test, ys_lbid_test,
                                             our_model_config.batch_size,
                                             num_epochs=1,
                                             shuffle=False)
            num_sample = len(xs_fact_test)
        if test_or_valid == 'valid':
            test_data_iter = batch_iter_CJO(xs_fact_valid, xs_law_valid, ys_lbid_valid,
                                             our_model_config.batch_size,
                                             num_epochs=1,
                                             shuffle=False)
            num_sample = len(xs_fact_valid)
        for _ in trange(num_sample-1):  # 当__next__下一个没了就退出

            batch, label_len, num_sentence_fact, _ = test_data_iter.__next__()  # 用生成器的好处

            x_batch_fact, _, y_batch = list(zip(*batch))
            x_batch_fact = np.asarray(x_batch_fact, dtype=np.int32)

            y_batch = np.asarray(y_batch, dtype=np.int32)

            y_scr = y_batch[:, :-1]  # y_source
            y_tgt = y_batch[:, 1:]  # y_target

            feed_dict = {our_model.input_fact: x_batch_fact,
                         our_model.txt_len_att: num_sentence_fact,
                         our_model.batch_label_src: y_scr,
                         our_model.batch_label_tgt: y_tgt,
                         our_model.label_len: label_len}

            y_pred = sess.run(our_model.label_pre, feed_dict=feed_dict)[1:]
            y_pred_oh=id2one_hot_for_our_model(logit=y_pred,
                                               num_label=our_model_config.num_label-2,
                                               sos_id=our_model_config.sos_label_id,
                                               eos_id=our_model_config.eos_label_id)
            y_batch_one_hot=id2one_hot_for_our_model(y_batch[0][1:-1],
                                                     num_label=our_model_config.num_label-2,
                                                     sos_id=our_model_config.sos_label_id,
                                                     eos_id=our_model_config.eos_label_id)
            result_list.append(y_pred_oh)
            label_list.append(y_batch_one_hot)
        labels = np.concatenate(np.asarray(label_list),
                                axis=0)  # list[array1, array2....], 先把list转换为numpy, 然后concatenate
        logits = np.concatenate(np.asarray(result_list),
                                axis=0)
        hamming_loss, micro_precision, micro_recall, micro_f1 = get_metrics(labels, logits, modes=micro_or_macro)
        all_result[checkpoint] = [hamming_loss, micro_precision, micro_recall, micro_f1]
    temp_dict = json.dumps(all_result)
    f.write(temp_dict)  # 每个字典带个回车
    sess.close()
    f.close()

if __name__=='__main__':
    # train_save_model(epoch=10,max_to_keep=10)
    predict_and_save(start_id=34200,end_id=35100,step=100,test_or_valid='valid',micro_or_macro='macro')
    results_ordered=get_best_result('./result/result_wo_law_CJO.json')
    print(results_ordered)