# -*- coding: utf-8 -*-
# @Time    : 2019/5/29 21:59
# @Author  : DrMa
from get_batch_4_our_model import *
from Our_model import *
from Our_model_dir.config import Our_model_config_predict_CJO, Our_model_config_CAIL, Our_model_config_CJO,Our_model_config_predict_CAIL
from utils import *

def train_save_model(epoch, dataset, max_to_keep=10, checkpoint_path='./checkpoint', SUMMARY_DIR='./log'):
    '''
    :param checkpoint_path:
    :param SUMMARY_DIR:
    :param epoch:
    :param dataset: CAIL或者CJO
    :return:
    '''
    word2id,word_embeddings=get_word2id_and_embedding('../words.vec')
    if dataset=='CAIL':
        xs_train, ys_train, xs_test, ys_test, xs_valid, ys_valid = load_data_CAIL('../data_CAIL/new_data/train',
                                                                                  '../data_CAIL/new_data/test',
                                                                                  '../data_CAIL/new_data/valid',
                                                                                  word2id = word2id,
                                                                                  max_doc_len=400,
                                                                                  sos_label_id=107,
                                                                                  eos_label_id=106)
        our_model_config=Our_model_config_CAIL()
        our_model=Our_model_CAIL(word_embeddings, our_model_config, Train_or_predict='Train')

        config_tf = tf.ConfigProto()
        config_tf.gpu_options.allow_growth = True
        sess = tf.Session(config=config_tf)
        sess.run(tf.global_variables_initializer())

        merged = tf.summary.merge_all()  # 把所有model中生成的summary.scalar对象merge到一起，目的是下面只sess.run一个merge就可以
        writer = tf.summary.FileWriter(SUMMARY_DIR+'_CAIL', sess.graph)  # 生成writer对象，类似saver的作用，写入到log文件中

        saver = tf.train.Saver(max_to_keep=max_to_keep)
        batches = batch_iter_CAIL(xs_train, ys_train, our_model_config.batch_size, epoch)
        index = 0
        for batch, len_label, num_sentence in batches:
            #num_sentence相当于text_len
            #len_label就是label的长度
            x_batch, y_batch = list(zip(*batch))

            x_batch = np.asarray(x_batch, dtype=np.int32)  # 输入
            y_batch = np.asarray(y_batch, dtype=np.int32)

            y_scr = y_batch[:, :-1]  # y_source
            y_tgt = y_batch[:, 1:]  # y_target

            feed_dict={our_model.input_x:x_batch,
                       our_model.text_len:num_sentence,
                       our_model.batch_label_src:y_scr,
                       our_model.batch_label_tgt:y_tgt,
                       our_model.label_len:len_label}
            summary, _=sess.run([merged, our_model.train_op], feed_dict=feed_dict)
            writer.add_summary(summary,index)
            index+=1
            if index%100==0:
                saver.save(sess,checkpoint_path+'_CAIL/Our_model_dir',index)
                cost_per_sample=sess.run(our_model.cost_per_sample, feed_dict=feed_dict)
                print(cost_per_sample)
                print(index)
    elif dataset=='CJO':
        our_model_config = Our_model_config_CJO()
        xs_fact_train, xs_law_train, ys_lbid_train, \
        xs_fact_test, xs_law_test, ys_lbid_test, \
        xs_fact_valid, xs_law_valid, ys_lbid_valid=load_data_CJO('../data_CJO/new_data/train',
                                                                  '../data_CJO/new_data/test',
                                                                  '../data_CJO/new_data/valid',
                                                                  word2id = word2id,
                                                                  max_doc_len=our_model_config.len_doc,
                                                                  max_law_len=our_model_config.len_law,
                                                                  sos_label_id=our_model_config.sos_label_id,
                                                                  eos_label_id=our_model_config.eos_label_id)

        our_model = Our_model_CJO(word_embeddings, our_model_config, Train_or_predict='Train')

        config_tf = tf.ConfigProto()
        config_tf.gpu_options.allow_growth = True
        sess = tf.Session(config=config_tf)
        sess.run(tf.global_variables_initializer())

        merged = tf.summary.merge_all()  # 把所有model中生成的summary.scalar对象merge到一起，目的是下面只sess.run一个merge就可以
        writer = tf.summary.FileWriter(SUMMARY_DIR+'_CJO', sess.graph)  # 生成writer对象，类似saver的作用，写入到log文件中

        saver = tf.train.Saver(max_to_keep=max_to_keep)
        batches = batch_iter_CJO(xs_fact_train, xs_law_train, ys_lbid_train,
                                 our_model_config.batch_size, epoch,
                                 eos_label_id=our_model_config.eos_label_id,
                                 sentence_num_of_doc=our_model_config.num_sentence_fact,
                                 sentence_num_of_law=our_model_config.num_sentence_law,
                                 shuffle=True)
        index = 0
        for batch, label_len, num_sentence_fact, num_sentence_law in batches:
            x_batch_fact, x_batch_law, y_batch=list(zip(*batch))

            x_batch_fact=np.asarray(x_batch_fact, dtype=np.int32)
            x_batch_law = np.asarray(x_batch_law, dtype=np.int32)
            y_batch=np.asarray(y_batch, dtype=np.int32)

            y_scr = y_batch[:, :-1]  # y_source
            y_tgt = y_batch[:, 1:]  # y_target

            feed_dict = {our_model.input_fact: x_batch_fact,
                         our_model.input_law: x_batch_law,
                         our_model.txt_len_att: num_sentence_fact,
                         our_model.law_len_att: num_sentence_law,
                         our_model.batch_label_src: y_scr,
                         our_model.batch_label_tgt: y_tgt,
                         our_model.label_len: label_len}
            summary, _ = sess.run([merged, our_model.train_op], feed_dict=feed_dict)
            writer.add_summary(summary, index)
            index += 1
            if index % 100 == 0:
                saver.save(sess, checkpoint_path+"_CJO/checkpoint", index)
                cost_per_sample = sess.run(our_model.cost_per_sample, feed_dict=feed_dict)
                print(cost_per_sample)
                print(index)
    else:
        print('Wrong dataset name!')

def predict_and_save(start_id, end_id, step,
                     test_or_valid,
                     micro_or_macro,
                     dataset,
                     checkpoint_path='./checkpoint',
                     result_path='./result/result',):
    word2id, word_embeddings = get_word2id_and_embedding('../data_CAIL/words.vec')
    if dataset=='CAIL':
        _, _, xs_test, ys_test, xs_valid, ys_valid = load_data_CAIL('../data_CAIL/new_data/train',
                                                                                  '../data_CAIL/new_data/test',
                                                                                  '../data_CAIL/new_data/valid',
                                                                                  word2id=word2id,
                                                                                  max_doc_len=400,
                                                                                  sos_label_id=107,
                                                                                  eos_label_id=106)

        our_model_config = Our_model_config_predict_CAIL()#预测配置
        our_model = Our_model_CAIL(word_embeddings, our_model_config, Train_or_predict='predict')

        config_tf = tf.ConfigProto()
        config_tf.gpu_options.allow_growth = True
        sess = tf.Session(config=config_tf)
        saver = tf.train.Saver()
        # 遍历所有的checkpoint
        all_num_for_checkpoint = list(range(start_id, end_id + step, step))
        checkpoint_path_list = [checkpoint_path+'_CAIL/Our_model_dir-' + str(x) for x in all_num_for_checkpoint]

        f = open(result_path+'_CAIL.json', 'w+', encoding='utf-8')
        all_result = {}  # key是checkpoint , value是每个checkpoint的结果.
        # 存放真实标签为了统一,我们跟
        for checkpoint in checkpoint_path_list:
            saver.restore(sess, checkpoint)

            result_list = []  # shape=[n_samples,n_labels]
            label_list = []  # shape=[n_samples,n_labels]
            test_data_iter = None
            num_sample = None
            if test_or_valid == 'test':
                test_data_iter = batch_iter_CAIL(xs_test, ys_test,
                                                 our_model_config.batch_size,
                                                 num_epochs=1,
                                                 shuffle=False)
                num_sample = len(xs_test)
            if test_or_valid == 'valid':
                test_data_iter = batch_iter_CAIL(xs_valid, ys_valid,
                                                 our_model_config.batch_size,
                                                 num_epochs=1,
                                                 shuffle=False)
                num_sample = len(xs_valid)
            for _ in trange(num_sample):  # 当__next__下一个没了就退出
                batch, len_label, num_sentence = test_data_iter.__next__()  # 用生成器的好处
                x_batch, y_batch = list(zip(*batch))

                x_batch = np.asarray(x_batch, dtype=np.int32)  # 输入
                y_batch = np.asarray(y_batch, dtype=np.int32)

                y_scr = y_batch[:, :-1]  # y_source
                y_tgt = y_batch[:, 1:]  # y_target

                feed_dict = {our_model.input_x: x_batch,
                             our_model.text_len: num_sentence,
                             our_model.batch_label_src: y_scr,
                             our_model.batch_label_tgt: y_tgt,
                             our_model.label_len: len_label}
                y_pred = sess.run(our_model.label_pre, feed_dict=feed_dict)[1:]
                y_pred_oh=id2one_hot_for_our_model(logit=y_pred,
                                                   num_label=our_model_config.num_label-2,
                                                   sos_id=our_model_config.sos_label_id,
                                                   eos_id=our_model_config.eos_label_id)
                y_batch_one_hot=id2one_hot_for_our_model(y_batch[0][1:-1],
                                                         num_label=our_model_config.num_label-2,
                                                         sos_id=our_model_config.sos_label_id,
                                                         eos_id=our_model_config.eos_label_id)
                result_list.append(y_pred_oh)
                label_list.append(y_batch_one_hot)
            labels = np.concatenate(np.asarray(label_list),
                                    axis=0)  # list[array1, array2....], 先把list转换为numpy, 然后concatenate
            logits = np.concatenate(np.asarray(result_list),
                                    axis=0)
            hamming_loss, micro_precision, micro_recall, micro_f1 = get_metrics(labels, logits, modes=micro_or_macro)
            all_result[checkpoint] = [hamming_loss, micro_precision, micro_recall, micro_f1]
        temp_dict = json.dumps(all_result)
        f.write(temp_dict)  # 每个字典带个回车
        sess.close()
        f.close()
    elif dataset=='CJO':
        our_model_config = Our_model_config_predict_CJO()  # 预测配置
        _, _, _, \
        xs_fact_test, xs_law_test, ys_lbid_test, \
        xs_fact_valid, xs_law_valid, ys_lbid_valid = load_data_CJO('../data_CJO/new_data/train',
                                                                   '../data_CJO/new_data/test',
                                                                   '../data_CJO/new_data/valid',
                                                                   word2id=word2id,
                                                                   max_doc_len=our_model_config.len_doc,
                                                                   max_law_len=our_model_config.len_law,
                                                                   sos_label_id=our_model_config.sos_label_id,
                                                                   eos_label_id=our_model_config.eos_label_id)

        our_model = Our_model_CJO(word_embeddings, our_model_config, Train_or_predict='predict')

        config_tf = tf.ConfigProto()
        config_tf.gpu_options.allow_growth = True
        sess = tf.Session(config=config_tf)
        saver = tf.train.Saver()
        # 遍历所有的checkpoint
        all_num_for_checkpoint = list(range(start_id, end_id + step, step))
        checkpoint_path_list = [checkpoint_path+'_CJO/checkpoint-' + str(x) for x in all_num_for_checkpoint]

        f = open(result_path+'_CJO.json', 'w+', encoding='utf-8')
        all_result = {}  # key是checkpoint , value是每个checkpoint的结果.

        for checkpoint in checkpoint_path_list:
            saver.restore(sess, checkpoint)

            result_list = []  # shape=[n_samples,n_labels]
            label_list = []  # shape=[n_samples,n_labels]
            test_data_iter = None
            num_sample = None
            if test_or_valid == 'test':
                test_data_iter = batch_iter_CJO(xs_fact_test, xs_law_test, ys_lbid_test,
                                                 our_model_config.batch_size,
                                                 num_epochs=1,
                                                 shuffle=False)
                num_sample = len(xs_fact_test)
            if test_or_valid == 'valid':
                test_data_iter = batch_iter_CJO(xs_fact_valid, xs_law_valid, ys_lbid_valid,
                                                 our_model_config.batch_size,
                                                 num_epochs=1,
                                                 shuffle=False)
                num_sample = len(xs_fact_valid)
            for _ in trange(num_sample-1):  # 当__next__下一个没了就退出

                batch, label_len, num_sentence_fact, num_sentence_law = test_data_iter.__next__()  # 用生成器的好处

                x_batch_fact, x_batch_law, y_batch = list(zip(*batch))
                x_batch_fact = np.asarray(x_batch_fact, dtype=np.int32)
                x_batch_law = np.asarray(x_batch_law, dtype=np.int32)
                y_batch = np.asarray(y_batch, dtype=np.int32)

                y_scr = y_batch[:, :-1]  # y_source
                y_tgt = y_batch[:, 1:]  # y_target

                feed_dict = {our_model.input_fact: x_batch_fact,
                             our_model.input_law: x_batch_law,
                             our_model.txt_len_att: num_sentence_fact,
                             our_model.law_len_att: num_sentence_law,
                             our_model.batch_label_src: y_scr,
                             our_model.batch_label_tgt: y_tgt,
                             our_model.label_len: label_len}

                y_pred = sess.run(our_model.label_pre, feed_dict=feed_dict)[1:]
                y_pred_oh=id2one_hot_for_our_model(logit=y_pred,
                                                   num_label=our_model_config.num_label-2,
                                                   sos_id=our_model_config.sos_label_id,
                                                   eos_id=our_model_config.eos_label_id)
                y_batch_one_hot=id2one_hot_for_our_model(y_batch[0][1:-1],
                                                         num_label=our_model_config.num_label-2,
                                                         sos_id=our_model_config.sos_label_id,
                                                         eos_id=our_model_config.eos_label_id)
                result_list.append(y_pred_oh)
                label_list.append(y_batch_one_hot)
            labels = np.concatenate(np.asarray(label_list),
                                    axis=0)  # list[array1, array2....], 先把list转换为numpy, 然后concatenate
            logits = np.concatenate(np.asarray(result_list),
                                    axis=0)
            hamming_loss, micro_precision, micro_recall, micro_f1 = get_metrics(labels, logits, modes=micro_or_macro)
            all_result[checkpoint] = [hamming_loss, micro_precision, micro_recall, micro_f1]
        temp_dict = json.dumps(all_result)
        f.write(temp_dict)  # 每个字典带个回车
        sess.close()
        f.close()
    else:
        print('Wrong dataset name!')

if __name__=='__main__':
    train_save_model(epoch=10,
                     dataset='CJO',
                     max_to_keep=10)

    # predict_and_save(start_id=34200,end_id=35100,step=100,
    #                  test_or_valid='valid',
    #                  micro_or_macro='macro',
    #                  dataset='CJO')
    # best_result = get_best_result('result/result_CJO.json')
    # print(best_result)

#tensorboard --logdir=D:\Project\Multi_charge_prediction\Our_model_dir\log_CAIL
#localhost:6006