#!/usr/bin/env python
# encoding: utf-8
from sklearn.metrics import f1_score
from ABSA.nn_layer import softmax_layer, bi_dynamic_rnn, reduce_mean_with_len, softmax_layer1
from ABSA.att_layer import bilinear_attention_layer_1
from ABSA.config import *
from ABSA.utils import load_w2v, batch_iter, load_inputs_3
import datetime
import os


def tower_absa(inputs, sen_len, target, sen_len_tr, keep_prob1, _id='all'):
    cell = tf.contrib.rnn.LSTMCell
    inputs = tf.nn.dropout(inputs, keep_prob=keep_prob1)
    inputs_target = tf.nn.dropout(target, keep_prob=keep_prob1)

    with tf.variable_scope("share_towe_absa"):
        hiddens_share = bi_dynamic_rnn(cell, inputs, FLAGS.n_hidden, sen_len, 'sen1')

    with tf.variable_scope("private_towe"):
        hiddens_private_towe = bi_dynamic_rnn(cell, inputs, FLAGS.n_hidden, sen_len, 'sen2')
    # [batch, max_len, 4 * hidden]
    hidden_share_towe = tf.concat([hiddens_share, hiddens_private_towe], 2)

    with tf.variable_scope("private_absa"):
        hiddens_private_absa = bi_dynamic_rnn(cell, inputs, FLAGS.n_hidden, sen_len, 'sen3')
    # [batch, max_len, 4 * hidden]
    hidden_share_absa = tf.concat([hiddens_share, hiddens_private_absa], 2)

    with tf.variable_scope("rnn2_target"):
        hiddens_m = bi_dynamic_rnn(cell, inputs_target, FLAGS.n_hidden, sen_len_tr, 'sen4')
    # [batch, 2 * hidden]
    average_pool_m = reduce_mean_with_len(hiddens_m, sen_len_tr)

    with tf.variable_scope('absa'):
        # [batch, 1, max_len]
        att_m_1 = bilinear_attention_layer_1(hidden_share_absa, average_pool_m, sen_len, 2 * FLAGS.n_hidden,
                                             4 * FLAGS.n_hidden, FLAGS.l2_reg, FLAGS.random_base, 'sen5')
        # [batch, 1, max_len] [batch, max_len, 4 * hidden]
        outputs_m_1 = tf.squeeze(tf.matmul(att_m_1, hidden_share_absa))
        outputs_m_1 = tf.reshape(outputs_m_1, [-1, 4 * FLAGS.n_hidden])

    with tf.variable_scope('towe'):
        # [batch, 1, max_len]
        att_m_2 = bilinear_attention_layer_1(hidden_share_towe, average_pool_m, sen_len, 2 * FLAGS.n_hidden,
                                             4 * FLAGS.n_hidden, FLAGS.l2_reg, FLAGS.random_base, 'sen6')
        # [batch, 1, max_len] [batch, max_len, 4 * hidden]
        outputs_m_2 = tf.squeeze(tf.matmul(att_m_2, hidden_share_towe))
        outputs_m_2 = tf.reshape(outputs_m_2, [-1, 4 * FLAGS.n_hidden])

    outputs_tower = softmax_layer1(outputs_m_2, 4 * FLAGS.n_hidden, keep_prob1, FLAGS.l2_reg, FLAGS.n_class, 'sen7')

    output_absa = softmax_layer1(outputs_m_1, 4 * FLAGS.n_hidden, keep_prob1, FLAGS.l2_reg, FLAGS.n_class, 'sen8')
    return outputs_tower, output_absa


def main(_):
    word_id_mapping, w2v = load_w2v(FLAGS.embedding_file_path, FLAGS.embedding_dim)
    word_embedding = tf.constant(w2v, name='word_embedding')

    # tower process data
    tr_x_train, tr_sen_len_train, tr_target_word_train, tr_tar_len_train, tr_y_train, tr_position_train = load_inputs_3(
        FLAGS.train_file_path_1,
        word_id_mapping,
        FLAGS.max_sentence_len,
        FLAGS.max_target_len
    )
    tr_x_dev, tr_sen_len_dev, tr_target_word_dev, tr_tar_len_dev, tr_y_dev, tr_position_dev = load_inputs_3(
        FLAGS.validate_file_path_1,
        word_id_mapping,
        FLAGS.max_sentence_len,
        FLAGS.max_target_len
    )

    # absa process data
    tr_x, tr_sen_len, tr_target_word, tr_tar_len, tr_y, tr_position = load_inputs_3(
        FLAGS.train_file_path_2,
        word_id_mapping,
        FLAGS.max_sentence_len,
        FLAGS.max_target_len
    )
    te_x, te_sen_len, te_target_word, te_tar_len, te_y, te_position = load_inputs_3(
        FLAGS.validate_file_path_2,
        word_id_mapping,
        FLAGS.max_sentence_len,
        FLAGS.max_target_len
    )

    keep_prob1 = tf.placeholder(tf.float32, name='input_keep_prob1')
    with tf.name_scope('inputs'):
        x = tf.placeholder(tf.int32, [None, FLAGS.max_sentence_len], name='input_x')
        y = tf.placeholder(tf.float32, [None, FLAGS.n_class], name='input_y')
        y_ = tf.placeholder(tf.float32, [None, FLAGS.n_class], name='input_y_2')
        sen_len = tf.placeholder(tf.int32, [None], name='input_sen_len')
        target_words = tf.placeholder(tf.int32, [None, FLAGS.max_target_len], name='input_target')
        tar_len = tf.placeholder(tf.int32, [None], name='input_tar_len')
        position = tf.placeholder(tf.int32, [None, FLAGS.max_sentence_len], name='position')

    inputs_s = tf.nn.embedding_lookup(word_embedding, x)

    position_embeddings = tf.get_variable(
        name='position_embedding',
        shape=[FLAGS.max_sentence_len, FLAGS.position_embedding_dim],
        initializer=tf.random_uniform_initializer(-FLAGS.random_base, FLAGS.random_base),
        regularizer=tf.contrib.layers.l2_regularizer(FLAGS.l2_reg)
    )

    input_position = tf.nn.embedding_lookup(position_embeddings, position)
    inputs_s = tf.concat([inputs_s, input_position], 2)

    target = tf.nn.embedding_lookup(word_embedding, target_words)

    prob_tower, prob_absa = tower_absa(inputs_s, sen_len, target, tar_len, keep_prob1, FLAGS.t1)

    loss1 = loss_func(y, prob_tower)
    loss2 = loss_func(y_, prob_absa)

    acc_num_1, acc_prob_1 = acc_func(y, prob_tower)
    acc_num, acc_prob = acc_func(y_, prob_absa)

    global_step1 = tf.Variable(0, name='tr_global_step_1', trainable=False)
    optimizer1 = tf.train.MomentumOptimizer(learning_rate=FLAGS.learning_rate_2, momentum=0.9).minimize(loss1,
                                                                                                        global_step=global_step1)

    global_step2 = tf.Variable(0, name='tr_global_step_2', trainable=False)
    optimizer2 = tf.train.MomentumOptimizer(learning_rate=FLAGS.learning_rate_2, momentum=0.9).minimize(loss2,
                                                                                                        global_step=global_step2)

    true_y_1 = tf.argmax(y, 1, name='true_y_1')
    pred_y_1 = tf.argmax(prob_tower, 1, name='pred_y_1')

    true_y_2 = tf.argmax(y_, 1, name='true_y_2')
    pred_y_2 = tf.argmax(prob_absa, 1, name='pred_y_2')

    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        init = tf.global_variables_initializer()
        sess.run(init)

        def train_step_1(i, x_f, sen_len_f, target, tl, yi, yi_2, x_poisition, kp1):
            feed_dict = {
                x: x_f,
                y: yi,
                y_: yi_2,
                sen_len: sen_len_f,
                target_words: target,
                tar_len: tl,
                position: x_poisition,
                keep_prob1: kp1
            }
            step, _, losses = sess.run([global_step1, optimizer1, loss1], feed_dict)
            time_str = datetime.datetime.now().isoformat()
            print("{}: Iter {}, step {}, loss1 {:g}".format(time_str, i, step, losses))

        def train_step_2(i, x_f, sen_len_f, target, tl, yi, yi_2, x_poisition, kp1):
            feed_dict = {
                x: x_f,
                y: yi,
                y_: yi_2,
                sen_len: sen_len_f,
                target_words: target,
                tar_len: tl,
                position: x_poisition,
                keep_prob1: kp1
            }
            step, _, losses = sess.run([global_step2, optimizer2, loss2], feed_dict)
            time_str = datetime.datetime.now().isoformat()
            print("{}: Iter {}, step {}, loss2 {:g}".format(time_str, i, step, losses))

        # def dev_step_1(te_x_f, te_sen_len_f, te_target, te_tl, te_yi, te_yi_2, te_x_poisition):
        #     feed_dict = {
        #         x: te_x_f,
        #         y: te_yi,
        #         y_: te_yi_2,
        #         sen_len: te_sen_len_f,
        #         target_words: te_target,
        #         tar_len: te_tl,
        #         position: te_x_poisition,
        #         keep_prob1: 1.0
        #     }
        #
        #     tf_true, tf_pred, _loss = sess.run([true_y_1, pred_y_1, loss1], feed_dict)
        #     cost = 0
        #     pre_label, true_label = [], []
        #     for logit, position1, length in zip(tf_pred, tf_true, tr_sen_len_dev):
        #         logit = logit[:length]
        #         tr_position = position1[:length]
        #         cost += _loss * length
        #         pre_label.append(logit)
        #         true_label.append(tr_position)
        #     return pre_label, true_label, cost

        def dev_step_1(te_x_f, te_sen_len_f, te_target, te_tl, te_yi, te_yi_2, te_x_poisition):
            feed_dict = {
                x: te_x_f,
                y: te_yi,
                y_: te_yi_2,
                sen_len: te_sen_len_f,
                target_words: te_target,
                tar_len: te_tl,
                position: te_x_poisition,
                keep_prob1: 1.0
            }

            tf_true, tf_pred, _loss, acc1 = sess.run([true_y_1, pred_y_1, loss1, acc_num_1], feed_dict)
            cost = 0
            acc = 0
            num = 0
            pre_label, true_label = [], []

            length = len(te_x_f)
            acc += acc1
            num += length
            cost += _loss * length
            pre_label += list(tf_pred)
            true_label += list(tf_true)

            return pre_label, true_label, cost, acc, num

        def dev_step_2(te_x_f, te_sen_len_f, te_target, te_tl, te_yi, te_yi_2, te_x_poisition):
            feed_dict = {
                x: te_x_f,
                y: te_yi,
                y_: te_yi_2,
                sen_len: te_sen_len_f,
                target_words: te_target,
                tar_len: te_tl,
                position: te_x_poisition,
                keep_prob1: 1.0
            }

            tf_true, tf_pred, _loss, acc1 = sess.run([true_y_2, pred_y_2, loss2, acc_num], feed_dict)
            cost = 0
            acc = 0
            num = 0
            pre_label, true_label = [], []

            length = len(te_x_f)
            acc += acc1
            num += length
            cost += _loss * length
            pre_label += list(tf_pred)
            true_label += list(tf_true)

            return pre_label, true_label, cost, acc, num

        checkpoint_dir_1 = os.path.abspath(FLAGS.saver_checkpoint_1)
        checkpoint_prefix_1 = os.path.join(checkpoint_dir_1, "model")
        if not os.path.exists(checkpoint_dir_1):
            os.makedirs(checkpoint_dir_1)
        saver_1 = tf.train.Saver(tf.global_variables(), max_to_keep=5)

        checkpoint_dir_2 = os.path.abspath(FLAGS.saver_checkpoint_2)
        checkpoint_prefix_2 = os.path.join(checkpoint_dir_2, "model")
        if not os.path.exists(checkpoint_dir_2):
            os.makedirs(checkpoint_dir_2)
        saver_2 = tf.train.Saver(tf.global_variables(), max_to_keep=5)

        max_f1 = 0
        max_recall = 0
        max_precision = 0
        max_label = None

        max_acc = 0.
        max_ty, max_py = None, None

        for i in range(FLAGS.n_iter):
            for j in range(2):
                if j == 0:
                    batches_train = batch_iter(
                        list(zip(tr_x_train, tr_sen_len_train, tr_target_word_train, tr_tar_len_train, tr_y_train, tr_y,
                                 tr_position_train)), FLAGS.batch_size, 1, True)
                    for batch in batches_train:
                        x_batch, sen_len_batch, target_batch, tar_len_batch, y_batch, y2_batch, position_batch = zip(
                            *batch)
                        train_step_1(i, x_batch, sen_len_batch, target_batch, tar_len_batch, y_batch, y2_batch,
                                     position_batch,
                                     FLAGS.keep_prob1)

                    batches_test = batch_iter(
                        list(zip(tr_x_dev, tr_sen_len_dev, tr_target_word_dev, tr_tar_len_dev, tr_y_dev, te_y,
                                 tr_position_dev)), 2000, 1, False)

                    label_pp, label_tt = [], []
                    cost1 = 0
                    accuracy, number = 0, 0
                    for batch_ in batches_test:
                        te_x_batch, te_sen_len_batch, te_target_batch, te_tar_len_batch, te_y_batch, te_y_2_batch, te_position_batch = zip(
                            *batch_)
                        label_p, label_t, _loss, _acc, num = dev_step_1(te_x_batch, te_sen_len_batch, te_target_batch,
                                                             te_tar_len_batch, te_y_batch, te_y_2_batch,
                                                             te_position_batch)

                        label_pp += label_p
                        label_tt += label_t
                        cost1 += _loss
                        accuracy += _acc
                        number += num

                    # precision, recall, f1 = score_BIO(label_pp, label_tt)
                    # current_step_1 = tf.train.global_step(sess, global_step1)
                    # print("Iter {}: step {}, loss1 {}, precision {:g}, recall {:g}, f1 {:g}".format(
                    #     i, current_step_1, cost1, precision, recall, f1))
                    #
                    # if f1 > max_f1:
                    #     max_f1 = f1
                    #     max_precision = precision
                    #     max_recall = recall
                    #     max_label = label_pp
                    #     path = saver_1.save(sess, checkpoint_prefix_1, global_step=current_step_1)
                    #     print("Saved model checkpoint to {}\n".format(path))
                    # print("topf1 {:g}, precision {:g}, recall {:g}".format(max_f1, max_precision, max_recall))
                    # print("\n")
                    print('all samples={}, correct prediction={}'.format(number, accuracy))
                    acc = accuracy / number
                    cost = cost1 / number
                    current_step_1 = tf.train.global_step(sess, global_step1)
                    print("\nEvaluation1:")
                    print('Iter {}: step {}, loss={}, test acc={:.6f}'.format(i, current_step_1, cost, acc))
                    if acc > max_acc:
                        max_acc = acc
                        max_ty = label_tt
                        max_py = label_pp
                        path = saver_1.save(sess, checkpoint_prefix_1, global_step=current_step_1)
                        print("Saved model checkpoint to {}\n".format(path))
                    F1 = f1_score(max_ty, max_py, average=None)
                    print('F1:', F1, 'avg=', sum(F1) / FLAGS.n_class)
                    print('Max acc={}\n'.format(max_acc))
                else:
                    batches_train = batch_iter(
                        list(zip(tr_x, tr_sen_len, tr_target_word, tr_tar_len, tr_y_train, tr_y, tr_position)),
                        FLAGS.batch_size, 1,
                        True)
                    for batch in batches_train:
                        x_batch, sen_len_batch, target_batch, tar_len_batch, y_1_batch, y_batch, position_batch = zip(
                            *batch)
                        train_step_2(i, x_batch, sen_len_batch, target_batch, tar_len_batch, y_1_batch, y_batch,
                                     position_batch,
                                     FLAGS.keep_prob1)

                    batches_test = batch_iter(
                        list(zip(te_x, te_sen_len, te_target_word, te_tar_len, tr_y_dev, te_y, te_position)), 2000, 1,
                        False)

                    label_pp, label_tt = [], []
                    cost1, accuracy, number = 0, 0, 0

                    for batch_ in batches_test:
                        te_x_batch, te_sen_len_batch, te_target_batch, te_tar_len_batch, te_y_1_batch, te_y_batch, te_position_batch = zip(
                            *batch_)
                        label_p, label_t, _loss, _acc, num = dev_step_2(te_x_batch, te_sen_len_batch, te_target_batch,
                                                                        te_tar_len_batch, te_y_1_batch, te_y_batch,
                                                                        te_position_batch)
                        label_pp += label_p
                        label_tt += label_t
                        cost1 += _loss
                        accuracy += _acc
                        number += num

                    print('all samples={}, correct prediction={}'.format(number, accuracy))
                    acc = accuracy / number
                    cost = cost1 / number
                    current_step_2 = tf.train.global_step(sess, global_step2)
                    print("\nEvaluation2:")
                    print('Iter {}: step {}, loss={}, test acc={:.6f}'.format(i, current_step_2, cost, acc))
                    if acc > max_acc:
                        max_acc = acc
                        max_ty = label_tt
                        max_py = label_pp
                        path = saver_2.save(sess, checkpoint_prefix_2, global_step=current_step_2)
                        print("Saved model checkpoint to {}\n".format(path))
                    F1 = f1_score(max_ty, max_py, average=None)
                    print('F1:', F1, 'avg=', sum(F1) / FLAGS.n_class)
                    print('Max acc={}\n'.format(max_acc))

        # fp = open(FLAGS.prob_file, 'w')
        # for ws in max_label:
        #     fp.write(' '.join([str(w) for w in ws]) + '\n')


if __name__ == '__main__':
    tf.app.run()
