import tensorflow as tf
from bert import modeling
import os
import create_input
import tokenization
import numpy as np


def deal_with_data(tokenizer,x_train,max_seq_length):
    train_input_idsList=[]
    train_input_masksList=[]
    train_segment_idsList=[]

    for t in x_train:
        single_input_id, single_input_mask, single_segment_id=create_input.convert_single_example(max_seq_length,tokenizer,t)
        train_input_idsList.append(single_input_id)
        train_input_masksList.append(single_input_mask)
        train_segment_idsList.append(single_segment_id)

    train_input_idsList=np.asarray(train_input_idsList,dtype=np.int32)
    train_input_masksList=np.asarray(train_input_masksList,dtype=np.int32)
    train_segment_idsList=np.asarray(train_segment_idsList,dtype=np.int32)
    return train_input_idsList,train_input_masksList,train_segment_idsList

class rebuild_bert(object):
    def __init__(self,bert_config,batch_size,max_seq_length,is_training,categories,learning_rate):
        #  创建bert的输入
        self.categories = categories
        self.batch_size = batch_size
        self.max_seq_length = max_seq_length
        self.input_ids = tf.placeholder(shape=[self.batch_size, self.max_seq_length], dtype=tf.int32, name="input_ids")
        self.input_mask = tf.placeholder(shape=[self.batch_size, self.max_seq_length], dtype=tf.int32, name="input_mask")
        self.segment_ids = tf.placeholder(shape=[self.batch_size, self.max_seq_length], dtype=tf.int32, name="segment_ids")
        self.is_training = is_training
        self.learning_rate = learning_rate
        ###
        self.bert_config = bert_config
        self.input_labels = tf.placeholder(shape=batch_size, dtype=tf.int32, name="input_ids")
        # 创建bert模型
        self.model = modeling.BertModel(
            config=self.bert_config,
            is_training=self.is_training,
            input_ids=self.input_ids,
            input_mask=self.input_mask,
            token_type_ids=self.segment_ids,
            use_one_hot_embeddings=False  # 这里如果使用TPU 设置为True，速度会快些。使用CPU 或GPU 设置为False ，速度会快些。
        )

        # self.output_layer = self.model.get_sequence_output()  # 这个获取每个token的output 输入数据[batch_size, seq_length, embedding_size] 如果做seq2seq 或者ner 用这个
        self.output_layer = self.model.get_pooled_output()  # 这个获取句子的output
        self.hidden_size = self.output_layer.shape[-1].value  # 获取输出的维度
        # 后面就简单了，就是一个全连接
        self.output_weights = tf.get_variable(
            "output_weights", [categories,  self.hidden_size],
            initializer=tf.truncated_normal_initializer(stddev=0.02))
        self.output_bias = tf.get_variable(
            "output_bias", [categories], initializer=tf.zeros_initializer())
        with tf.variable_scope("loss"):
            if self.is_training:
                # I.e., 0.1 dropout
                self.output_layer = tf.nn.dropout(self.output_layer, keep_prob=0.9)
            self.logits = tf.matmul(self.output_layer, self.output_weights, transpose_b=True)
            self.logits = tf.nn.bias_add(self.logits, self.output_bias)
            self.log_probs = tf.nn.log_softmax(self.logits, axis=-1)
            self.one_hot_labels = tf.one_hot(self.input_labels, depth=categories, dtype=tf.float32)
            self.per_example_loss = -tf.reduce_sum(self.one_hot_labels * self.log_probs, axis=-1)
            self.loss = tf.reduce_mean(self.per_example_loss)
            self.predict = tf.argmax(tf.nn.softmax(self.logits), axis=1, name="predictions")
            self.acc = tf.reduce_mean(tf.cast(tf.equal(self.input_labels, tf.cast(self.predict, dtype=tf.int32)), "float"),
                                 name="accuracy")

        self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)


    def train(self,x_train,y_train,x_test,y_test,init_checkpoint,iter_num,prior_iter_num,iter_per_valid=20):
        # bert模型参数初始化的地方
        print("加载模型")
        # 获取模型中所有的训练参数。
        tvars = tf.trainable_variables()
        # 加载BERT模型
        (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
        tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
        tf.logging.info("**** Trainable Variables ****")
        # 打印加载模型的参数
        for var in tvars:
            init_string = ""
        if var.name in initialized_variable_names:
            init_string = ", *INIT_FROM_CKPT*"
        tf.logging.info("  name = %s, shape = %s%s", var.name, var.shape,
                        init_string)
        max_acc = 0
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver(max_to_keep=3)
            count = 0
            total_loss = 0
            total_a = 0
            for i in range(iter_num):
                shuffIndex = np.random.permutation(np.arange(len(x_train)))[:self.batch_size]
                batch_labels = y_train[shuffIndex]
                batch_input_idsList = self.train_input_idsList[shuffIndex]
                batch_input_masksList = self.train_input_masksList[shuffIndex]
                batch_segment_idsList = self.train_segment_idsList[shuffIndex]
                l, a, _ = sess.run([self.loss, self.acc, self.train_op], feed_dict={
                    self.input_ids: batch_input_idsList, self.input_mask: batch_input_masksList,
                    self.segment_ids: batch_segment_idsList, self.input_labels: batch_labels
                })
                total_loss += l
                total_a += a
                count += 1
                print("训练集 当前准确率:%.4f,当前损失函数:%.4f,平均准确率:%.4f,平均损失:%.4f" % (a, l, total_a / count, total_loss / count))
                if (i % iter_per_valid == 0 and i != 0):
                    print("测试验证集")
                    total_loss = 0
                    total_a = 0
                    count = 0
                    test_iter_num = len(x_test) // self.batch_size
                    # if(test_iter_num*batch_size<len(x_test)):
                    #     test_iter_num +=1
                    l_index = 0
                    for j in range(test_iter_num):
                        index = np.arange(l_index, l_index + self.batch_size, 1)
                        l_index += self.batch_size
                        # print(index)
                        batch_labels = y_test[index]
                        batch_input_idsList = self.test_input_idsList[index]
                        batch_input_masksList = self.test_input_masksList[index]
                        batch_segment_idsList = self.test_segment_idsList[index]
                        l, a, = sess.run([self.loss, self.acc], feed_dict={
                            self.input_ids: batch_input_idsList, self.input_mask: batch_input_masksList,
                            self.segment_ids: batch_segment_idsList, self.input_labels: batch_labels
                        })
                        total_loss += l
                        total_a += a
                        count += 1

                        avg_loss = total_loss / count
                        avg_acc = total_a / count
                    print("验证集 平均准确率:%.4f,平均损失:%.4f" % (avg_acc, avg_loss))
                    if (max_acc < avg_acc):
                        max_acc = avg_acc
                        saver.save(sess=sess, save_path=os.path.join("./saved_models", 'model_iter' + str(
                                i + prior_iter_num) + 'ac=' + str(avg_acc) + ".ckpt"))
                        print("保存此模型")
    def test(self,x_test,y_test,init_checkpoint):
        # bert模型参数初始化的地方
        print("加载模型")
        # 获取模型中所有的训练参数。
        tvars = tf.trainable_variables()
        # 加载BERT模型
        (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,init_checkpoint)
        tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
        tf.logging.info("**** Trainable Variables ****")
        # 打印加载模型的参数
        for var in tvars:
            init_string = ""
        if var.name in initialized_variable_names:
            init_string = ", *INIT_FROM_CKPT*"
        tf.logging.info("  name = %s, shape = %s%s", var.name, var.shape,init_string)
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            count = 0
            total_loss = 0
            total_a = 0
            print("测试验证集")
            test_iter_num = len(x_test) // self.batch_size
            l_index = 0
            for j in range(test_iter_num):
                index = np.arange(l_index, l_index + self.batch_size, 1)
                l_index += self.batch_size
                # print(index)
                batch_labels = y_test[index]
                batch_input_idsList = self.test_input_idsList[index]
                batch_input_masksList = self.test_input_masksList[index]
                batch_segment_idsList = self.test_segment_idsList[index]
                l, a, = sess.run([self.loss, self.acc], feed_dict={
                    self.input_ids: batch_input_idsList, self.input_mask: batch_input_masksList,
                    self.segment_ids: batch_segment_idsList, self.input_labels: batch_labels
                })
                total_loss += l
                total_a += a
                count += 1
                avg_loss = total_loss / count
                avg_acc = total_a / count
                print("验证集 平均准确率:%.4f,平均损失:%.4f" % (avg_acc, avg_loss))

    def predict(self,x,init_checkpoint):
        rs = []
        # bert模型参数初始化的地方
        print("加载模型")
        # 获取模型中所有的训练参数。
        tvars = tf.trainable_variables()
        # 加载BERT模型
        (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,init_checkpoint)
        tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
        tf.logging.info("**** Trainable Variables ****")
        # 打印加载模型的参数
        for var in tvars:
            init_string = ""
        if var.name in initialized_variable_names:
            init_string = ", *INIT_FROM_CKPT*"
        tf.logging.info("  name = %s, shape = %s%s", var.name, var.shape,init_string)
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            print("预测")
            test_iter_num = len(x) // self.batch_size
            l_index = 0
            for j in range(test_iter_num):
                index = np.arange(l_index, l_index + self.batch_size, 1)
                l_index += self.batch_size
                # print(index)
                batch_input_idsList = self.test_input_idsList[index]
                batch_input_masksList = self.test_input_masksList[index]
                batch_segment_idsList = self.test_segment_idsList[index]
                predict_index = sess.run([self.predict], feed_dict={
                    self.input_ids: batch_input_idsList, self.input_mask: batch_input_masksList,
                    self.segment_ids: batch_segment_idsList,
                })
                print("predict_index %d" % (predict_index))
                rs.append(predict_index)
        return np.array(rs)

    def load_data(self,x_train,x_test,vocab_file):
        tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file)  # token 处理器，主要作用就是 分字，将字转换成ID。vocab_file 字典文件路径
        # 训练集转换token
        self.train_input_idsList, self.train_input_masksList, self.train_segment_idsList = deal_with_data(tokenizer, x_train,self.max_seq_length)
        # 测试集转换token
        self.test_input_idsList, self.test_input_masksList, self.test_segment_idsList = deal_with_data(tokenizer, x_test,self.max_seq_length)