#!/usr/bin/env python
# coding: utf-8
import tensorflow as tf
from bert import modeling
from transformers import BertTokenizer
from TNews_Loader import *

class adict(dict):
    ''' Attribute dictionary - a convenience data structure, similar to SimpleNamespace in python 3.3
        One can use attributes to read/write dictionary content.
    '''
    def __init__(self, *av, **kav):
        dict.__init__(self, *av, **kav)
        self.__dict__ = self

def create_model(model, is_training, labels, num_labels=14):
    """Creates a classification model."""

    # In the demo, we are doing a simple classification task on the entire
    # segment.
    #
    # If you want to use the token-level output, use model.get_sequence_output()
    # instead.
    output_layer = model.get_pooled_output()

    hidden_size = output_layer.shape[-1].value

    output_weights = tf.get_variable(
      "output_weights", [num_labels, hidden_size],
      initializer=tf.truncated_normal_initializer(stddev=0.02))

    output_bias = tf.get_variable(
      "output_bias", [num_labels], initializer=tf.zeros_initializer())

    with tf.variable_scope("loss"):
        if is_training:
          # I.e., 0.1 dropout
          output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)

    logits = tf.matmul(output_layer, output_weights, transpose_b=True)
    logits = tf.nn.bias_add(logits, output_bias)
    probabilities = tf.nn.softmax(logits, axis=-1)
    log_probs = tf.nn.log_softmax(logits, axis=-1)
    one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
    per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
    loss = tf.reduce_mean(per_example_loss)
    return (loss, per_example_loss, logits, probabilities)

def TopicTrainGraph():
    bert_config = modeling.BertConfig.from_json_file("./publish/bert_config.json")
    input_ids = tf.placeholder(shape=[None, 512], dtype=tf.int32, name="input_ids")
    input_mask = tf.placeholder(shape=[None, 512], dtype=tf.int32, name="input_mask")
    segment_ids = tf.placeholder(shape=[None, 512], dtype=tf.int32, name="segment_ids")
    label_ids = tf.placeholder(shape=[None], dtype=tf.int32, name="labels")
    is_training = True
    model = modeling.BertModel(
        config=bert_config,
        is_training=True,
        input_ids=input_ids,
        input_mask=input_mask,
        token_type_ids=segment_ids,
        use_one_hot_embeddings=True)

    init_checkpoint = "./publish/bert_model.ckpt"
    tvars = tf.trainable_variables()
    (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,
                                                                                               init_checkpoint)
    tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
    tf.logging.info("**** Trainable Variables ****")
    for var in tvars:
        init_string = ""
        if var.name in initialized_variable_names:
            init_string = ", *INIT_FROM_CKPT*"
        tf.logging.info("  name = %s, shape = %s%s", var.name, var.shape,
                        init_string)

    (total_loss, per_example_loss, logits, probabilities) = create_model(
        model, is_training, label_ids, num_labels=14)

    predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
    weights = tf.ones(tf.shape(label_ids), dtype=tf.float32)
    accuracy = tf.metrics.accuracy(labels=label_ids, predictions=predictions, weights=weights)

    global_step = tf.Variable(0, name="global_step", trainable=False)
    train_op = tf.train.AdagradOptimizer(0.00002).minimize(total_loss, global_step)
    return adict(
        input_ids=input_ids,
        input_mask=input_mask,
        token_type_ids=segment_ids,
        label_ids=label_ids,
        predictions=predictions,
        probabilities=probabilities,
        logits=logits,
        total_loss=total_loss,
        global_step=global_step,
        train_op=train_op,
        accuracy=accuracy
    )

def TrainTopicBert(tr_reader, val_reader, test_reader, max_sent_len):
    train_steps = len(tr_reader.label_ids)/tr_reader.batchsize
    t_epochs = 10
    best_acc = 0.0

    graph = TopicTrainGraph(max_sent_len=max_sent_len)
    validation_metrics_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES)
    acc_init = tf.variables_initializer(var_list=validation_metrics_vars)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(acc_init)
        for epoch in range(t_epochs):
            for x, m, y in tr_reader.iter():
                s = np.zeros_like(x)
                feed_dict = {graph.input_ids: x, graph.label_ids: y, graph.token_type_ids: s, graph.input_mask: m}
                acc, loss, _, step = sess.run([graph.accuracy, graph.total_loss, graph.train_op, graph.global_step], feed_dict=feed_dict)
                print('[%5d/%5d] | %5d, loss/acc = %6.8f/%6.7f, %6.7f ' % (step, train_steps, epoch,
                                                                           loss, acc[0], acc[1])
                      )

            losses = []
            sess.run(acc_init)
            for x, m, y in val_reader.iter():
                s = np.zeros_like(x)
                feed_dict = {graph.input_ids: x, graph.label_ids: y, graph.token_type_ids: s, graph.input_mask: m}
                acc, loss = sess.run([graph.accuracy, graph.total_loss], feed_dict=feed_dict)
                losses.append(loss)

            val_acc = acc[-1]
            val_loss = np.mean(loss)

            print("epoch:%5d,  valid accuracy/loss = %6.7f/%6.8f" % (epoch, val_acc, val_loss))
            if val_acc > best_acc:
                best_acc = val_acc
                losses = []
                sess.run(acc_init)
                for x, m, y in test_reader.iter():
                    s = np.zeros_like(x)
                    feed_dict = {graph.input_ids: x, graph.label_ids: y, graph.token_type_ids: s, graph.input_mask: m}
                    acc, loss = sess.run([graph.accuracy, graph.total_loss], feed_dict=feed_dict)
                    losses.append(loss)
                print("#####test reader##### test acc/test loss= %6.7f/%6.7f"%(acc[-1], np.mean(losses)))
                saver.save(sess, "./thu_model.ckpt", global_step=epoch)


tokenizer = BertTokenizer.from_pretrained("./publish/")
test_file = './THUCnews/cnews.test.txt'
val_file = './THUCnews/cnews.val.txt'
train_file = './THUCnews/cnews.train.txt'
train_reader = THUReader('./THUCnews/cnews.train.txt', batchsize=20, max_seq_length=512, tokenizer=tokenizer)
val_reader = THUReader('./THUCnews/cnews.val.txt', batchsize=20, max_seq_length=512, tokenizer=tokenizer)
test_reader = THUReader('./THUCnews/cnews.test.txt', batchsize=20, max_seq_length=512, tokenizer=tokenizer)

TrainTopicBert(train_reader, val_reader, test_reader, max_sent_len=train_reader.max_seq_length)







