import json
import numpy as np
# import pandas as pd
from keras_bert import Tokenizer, load_vocabulary, load_trained_model_from_checkpoint, get_checkpoint_paths
import tensorflow as tf
import keras
import keras.backend as K
from keras.preprocessing import sequence
from keras.layers import Dense, Input, Masking, Concatenate, Lambda, Multiply, Activation
from keras import Model
import jieba
import editdistance
import os
# import matplotlib.pyplot as plt


# 1 read data
table_filename = '../data/train/train.tables.json'
data_filename = '../data/train/train.json'
get_data_filename = '../data/train_data.json'
dict_filename = '../data/chinese_wwm_ext_L-12_H-768_A-12/vocab.txt'
config_path = '../data/chinese_wwm_ext_L-12_H-768_A-12/bert_config.json'
model_path = '../data/chinese_wwm_ext_L-12_H-768_A-12/bert_model.ckpt'
# table_list = []
# data_list = []
learning_rate = 5e-5
min_learning_rate = 1e-5
max_len = 0

# with open(table_filename, encoding='utf-8') as f:
#     for line in f:
#         line = json.loads(line)
#         table_list.append(line)
#
# with open(data_filename, encoding='utf-8') as f:
#     for line in f.readlines():
#         line = json.loads(line)
#         for i in range(len(table_list)):
#             if line.get('table_id') == table_list[i].get('id'):
#                 line["header"] = table_list[i].get("header")
#                 line['types'] = table_list[i].get('types')
#         data_list.append(line)

# save data to a json file
# with open('train_data.json', 'w', encoding='utf-8') as f:
#     for i in range(len(data_list)):
#         f.write(json.dumps(data_list[i], ensure_ascii=False)+'\n')

# question + [real/text] header [seq]


data = get_data_id(get_data_filename)
# print("token_id_list", data[0][0])
# print("segment_id_list", data[1][0])
# print("header_id_list", data[2][0])
# print("header_mask_list", data[3][0])
# print("output_cond_conn_op_list", data[4][0])
# print("output_sel_agg_list", data[5][0])
# print("output_cond_cel_list", data[6][0])
# print("value_op_list", data[7][0])
# print("value_mask_list", data[8][0])

class Data_Sequece(keras.utils.Sequence):
    def __init__(self, data, batch_size):
        self.data = data
        self.batch_size = batch_size
        self.steps = len(self.data[0]) // self.batch_size
        if len(self.data[0]) % self.batch_size != 0:
            self.steps += 1

    def __len__(self):
        return self.steps

    def __getitem__(self, item):
        token_id_list = self.data[0][self.batch_size * item:self.batch_size * (item + 1)]
        segment_id_list = self.data[1][self.batch_size * item:self.batch_size * (item + 1)]
        header_id_list = self.data[2][self.batch_size * item:self.batch_size * (item + 1)]
        header_mask_list = self.data[3][self.batch_size * item:self.batch_size * (item + 1)]
        output_cond_conn_op_list = self.data[4][self.batch_size * item:self.batch_size * (item + 1)]
        output_sel_agg_list = self.data[5][self.batch_size * item:self.batch_size * (item + 1)]
        output_cond_cel_list = self.data[6][self.batch_size * item:self.batch_size * (item + 1)]
        value_op_list = self.data[7][self.batch_size * item:self.batch_size * (item + 1)]
        value_mask_list = self.data[8][self.batch_size * item:self.batch_size * (item + 1)]
        return [token_id_list, segment_id_list, header_id_list, header_mask_list, value_mask_list,
                output_cond_conn_op_list, output_sel_agg_list, output_cond_cel_list, value_op_list], \
               [output_cond_conn_op_list, output_sel_agg_list, output_cond_cel_list, value_op_list]
        # inputs = {"input_token_ids": token_id_list, "input_segment_ids": segment_id_list,
        #           "input_header_ids": header_id_list, "input_header_mask": header_mask_list,
        #           "value_mask": value_mask_list}
        # outputs = {"output_cond_conn_op": output_cond_conn_op_list, "output_sel_agg": output_sel_agg_list,
        #            "output_cond_cel": output_cond_cel_list, "value_op": value_op_list}
        # return inputs, outputs

    def __iter__(self):
        """Create a generator that iterate over the Sequence."""
        for item in (self[i] for i in range(len(self))):
            print(item)
            yield item


data_seq = Data_Sequece(data, 4)


num_sel_agg = 7
num_cond_op = 5
num_cond_conn_op = 3


def seq_gather(x):
    seq, idxs = x
    idxs = K.cast(idxs, 'int32')
    return tf.gather_nd(seq, idxs)

bert_model = load_trained_model_from_checkpoint(config_path, model_path, seq_len=None)
for l in bert_model.layers:
    l.trainable = True

inp_token_ids = Input(shape=(None,), name='input_token_ids', dtype='int32')
inp_segment_ids = Input(shape=(None,), name='input_segment_ids', dtype='int32')
inp_header_ids = Input(shape=(None, 2), name='input_header_ids', dtype='int32')
inp_header_mask = Input(shape=(None,), name='input_header_mask')
value_mask = Input(shape=(None,), name="value_mask")  # 加入columns的掩码
output_cond_conn_op_in = Input(shape=(1,), name="output_cond_conn_op_in", dtype='int32')
output_sel_agg_in = Input(shape=(None, 1), name="output_sel_agg_in", dtype='int32')
output_cond_cel_in = Input(shape=(None,), name="output_cond_cel_in", dtype='int32')
value_op_in = Input(shape=(None,), name="value_op_in", dtype='int32')


x = bert_model([inp_token_ids, inp_segment_ids])  # (None, seq_len, 768)
value_masks = Lambda(lambda x: tf.expand_dims(x, axis=-1), name="reduce_value_mask")(value_mask) # (None, value, 1)
value_x = Multiply()([x, value_masks])

# 1 predict cond_conn_op
x_for_cond_conn_op = Lambda(lambda x: x[:, 0], name="recude_cls")(x)  # (None, 768)
p_cond_conn_op = Dense(num_cond_conn_op, activation='softmax', name='output_cond_conn_op')(x_for_cond_conn_op)

# 2 predict value_op
# x:(None, seq_len, 768)  inp_header_ids:(None, h_len, 1) to x_for_header (None, h_len, 768)
x_for_header = Lambda(seq_gather, name='header_seq_gather')([x, inp_header_ids])
header_mask = Lambda(lambda x: K.expand_dims(x, axis=-1), name="reduce_header_mask")(inp_header_mask)  # (None, h_len, 1)
x_for_header = Multiply()([x_for_header, header_mask])
# reduce_mean for columns
columns_mean = Lambda(lambda x: tf.reduce_mean(x, axis=1), name="reduce_clomns_mean")(x_for_header)
# value的编码+列名的编码
columns_mean_expand = Lambda(lambda x:tf.expand_dims(x, axis=1), name="reduce_columns_mean_expand")(columns_mean)
value_op = Lambda(lambda x: 0.2 * x[0] + 0.8 * x[1], name="reduce_value_op")([columns_mean_expand, value_x])
# value_op = Masking()(value_op)
value_op = Lambda(lambda x: x[0] - (1 - x[1]) * 10)([value_op, value_masks])
value_op = Dense(num_cond_op, activation="softmax", name="value_op")(value_op)

# 3 predict sel_agg
# x_for_header = Masking()(x_for_header)
p_sel_agg = Lambda(lambda x: x[0]- (1 - x[1]) * 10)([x_for_header, header_mask])
p_sel_agg = Dense(num_sel_agg, activation='softmax', name='output_sel_agg')(p_sel_agg)

# 4 predcit cond_cel
x = Lambda(lambda x: tf.expand_dims(x, 2))(x)
x_for_header = Lambda(lambda x: tf.expand_dims(x, 1))(x_for_header)
pcsel_1 = Dense(64)(x)
pcsel_2 = Dense(64)(x_for_header)
pcsel = Lambda(lambda x: 0.8 * x[0] + 0.2 * x[1])([pcsel_1, pcsel_2])
pcsel = Activation('tanh')(pcsel)
p_cond_cel = Dense(1)(pcsel)
p_cond_cel = Lambda(lambda x: x[0][..., 0] - (1 - x[1]) * 1e10)([p_cond_cel, header_mask])
p_cond_cel = Activation('softmax', name="output_cond_cel")(p_cond_cel)  # 归一化

model = Model(
    [inp_token_ids, inp_segment_ids, inp_header_ids, inp_header_mask, value_mask,
     output_cond_conn_op_in, output_sel_agg_in, output_cond_cel_in, value_op_in],
    [p_cond_conn_op, p_sel_agg, p_cond_cel, value_op]
)

def my_sparse_categorical_crossentropy(y_true, y_pred):
    result = tf.reduce_mean(tf.reduce_mean(keras.losses.sparse_categorical_crossentropy(y_true, y_pred), axis=-1), axis=-1)
    return result

def my_sparse_categorical_crossentropy_v2(y_true, y_pred):
    result = tf.reduce_mean(keras.losses.sparse_categorical_crossentropy(y_true, y_pred), axis=-1)
    return result

output_cond_conn_loss = my_sparse_categorical_crossentropy_v2(output_cond_conn_op_in, p_cond_conn_op)
output_sel_agg_loss = my_sparse_categorical_crossentropy(output_sel_agg_in,p_sel_agg)
output_sel_agg_loss = tf.reduce_sum(output_sel_agg_loss * inp_header_mask)/tf.reduce_sum(inp_header_mask)
output_cond_cel_loss = my_sparse_categorical_crossentropy(output_cond_cel_in,p_cond_cel)
output_cond_cel_loss = tf.reduce_sum(output_cond_cel_loss * value_mask)/tf.reduce_sum(value_mask)
value_op_loss = my_sparse_categorical_crossentropy(value_op_in, value_op)
value_op_loss = tf.reduce_sum(value_op_loss * value_mask)/tf.reduce_sum(value_mask)
loss = output_cond_conn_loss + output_sel_agg_loss + output_cond_cel_loss + value_op_loss
model.add_loss(loss)
# keras.utils.plot_model(model, to_file="model.png", show_shapes=True)
# # https://blog.csdn.net/fanzy1234/article/details/89054080
#

model.compile(
    # loss={
    #     "output_cond_conn_op": my_sparse_categorical_crossentropy_v2,  # 3
    #     "output_sel_agg": my_sparse_categorical_crossentropy,  # 7
    #     "output_cond_cel": my_sparse_categorical_crossentropy,  # 1
    #     "value_op": my_sparse_categorical_crossentropy  # 5
    # },
    optimizer=keras.optimizers.Adam(lr=learning_rate),
    metrics={
        "output_cond_conn_op": 'accuracy',
        "output_sel_agg": 'accuracy',
        "output_cond_cel": 'accuracy',
        "value_op": 'accuracy'
    },
    # loss_weights=[1, 1, 1, 1]
)
class Evaluate(keras.callbacks.Callback):
    def __init__(self):
        self.accs = []
        self.best = 0.
        self.passed = 0
        self.stage = 0
    def on_batch_begin(self, batch, logs=None):
        """第一个epoch用来warmup，第二个epoch把学习率降到最低
        """
        if self.passed < self.params['steps']:
            lr = (self.passed + 1.) / self.params['steps'] * learning_rate
            print(f"\t self.passed is {self.passed} self.params['steps'] is {self.params['steps']} lr is :{lr}")
            K.set_value(self.model.optimizer.lr, lr)
            self.passed += 1
        elif self.params['steps'] <= self.passed < self.params['steps'] * 2:
            lr = (2 - (self.passed + 1.) / self.params['steps']) * (learning_rate - min_learning_rate)
            lr += min_learning_rate
            K.set_value(self.model.optimizer.lr, lr)
            self.passed += 1
#
# checkpoint_save_path = '../data/checkpoint/bert2sql.ckpt'
# if os.path.exists(checkpoint_save_path + '.index'):
#     print("----------load model------------------")
#     model.load_weights(checkpoint_save_path)
#
#
# # def scheduler(epoch, lr):
# #     return lr * 0.9 ** (epoch)
#
#
# cp_callback = keras.callbacks.ModelCheckpoint(
#     filepath=checkpoint_save_path,
#     save_best_only=True,save_weights_only=True
# )
# #early_stop = keras.callbacks.EarlyStopping(verbose=1, patience=2)
# #lrs_callback = keras.callbacks.LearningRateScheduler(scheduler)
# tb_callback = keras.callbacks.TensorBoard('./logs')

evaluate = Evaluate()
EPOCHS = 1
model.summary()
history = model.fit(data_seq, epochs=EPOCHS, steps_per_epoch=len(data_seq), callbacks=[evaluate])
#
# metrics = history.history
# figs, axes = plt.subplots(1,2, figsize=(10, 12))
# axes[0].plot(history.epoch, metrics['loss'], metrics['output_cond_conn_op_loss'],
#              metrics['output_sel_agg_loss'], metrics['output_cond_cel_loss'], metrics['value_op_loss'])
# axes[1].plot(history.epoch, metrics['accuracy'], metrics['output_cond_conn_op_accuracy'],
#              metrics['output_sel_agg_accuracy'], metrics['output_cond_cel_accuracy'], metrics['value_op_accuracy'])
# axes[0].legend(['loss', 'output_cond_conn_op_loss','output_sel_agg_loss',
#                 'output_cond_cel_loss', 'output_cond_conn_op_accuracy'])
# axes[1].legend(['accuracy', 'output_cond_conn_op_accuracy', 'output_sel_agg_accuracy',
#                 'output_cond_cel_accuracy', 'value_op_accuracy'])
# plt.show()


# a, b, c, d = data_seq_iter[1]["output_cond_conn_op"], data_seq_iter[1]["output_sel_agg"], \
#              data_seq_iter[1]["output_cond_cel"], data_seq_iter[1]["value_op"]
# a1, b1, c1, d1 = model.predict([data_seq_iter[0]["input_token_ids"], data_seq_iter[0]["input_segment_ids"],
#                                 data_seq_iter[0]["input_header_ids"], data_seq_iter[0]["input_header_mask"],
#                                 data_seq_iter[0]["value_mask"]])
# a1 = tf.squeeze(a1, axis=0)
# b1 = tf.squeeze(b1, axis=0)
# c1 = tf.squeeze(c1, axis=0)
# d1 = tf.squeeze(d1, axis=0)
# print(a, b , c, d)
# print(a1, b1, c1, d1)
# print("cond_conn_op", tf.reduce_mean(keras.losses.sparse_categorical_crossentropy(a, a1), axis=-1))
# print("sel_agg", tf.reduce_mean(tf.reduce_mean(keras.losses.sparse_categorical_crossentropy(b, b1), axis=-1), axis=-1))
# print("cond_sel", tf.reduce_mean(tf.reduce_mean(keras.losses.sparse_categorical_crossentropy(c, c1), axis=-1), axis=-1))
# print("cond_sel_and_ap", tf.reduce_mean(tf.reduce_mean(keras.losses.sparse_categorical_crossentropy(d, d1), axis=-1), axis=-1))

# loss: 8.0099 - output_cond_conn_op_loss: 1.2864 - output_sel_agg_loss: 0.1036
# - output_cond_cel_loss: 5.0347 - value_op_loss: 1.5852
# - output_cond_conn_op_accuracy: 0.3909 - output_sel_agg_accuracy: 0.0465
# - output_cond_cel_accuracy: 0.9247 - value_op_accuracy: 0.5789


# 5.8
'''
1、去除了Masking层  解决了因为不计算0的问题导致的sel_agg值为0的部分预测一直为4的情况
2、去除keras中的model.compile()中的计算loss的方法，改为使用model.add_loss()的方法计算loss
3、在model的loss中加入mask（掩码）
4、在mdoel的acc的计算中加入softmax的优化计算
'''