import json
import numpy as np
# import pandas as pd
from keras_bert import Tokenizer, load_vocabulary, load_trained_model_from_checkpoint, get_checkpoint_paths
import tensorflow as tf
import keras
import keras.backend as K
from keras.preprocessing import sequence
from keras.layers import Dense, Input, Masking, Concatenate, Lambda, Multiply, Activation
from keras import Model
import jieba
import editdistance
import os
from keras.utils import multi_gpu_model
os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1"
# import matplotlib.pyplot as plt
from utils.read_data import get_data_id

# 1 read data
table_filename = '../data/train/train.tables.json'
data_filename = '../data/train/train.json'
get_data_filename = '../data/train_deal_data.json'
dict_filename = '../data/chinese_wwm_ext_L-12_H-768_A-12/vocab.txt'
config_path = '../data/chinese_wwm_ext_L-12_H-768_A-12/bert_config.json'
model_path = '../data/chinese_wwm_ext_L-12_H-768_A-12/bert_model.ckpt'
# table_list = []
# data_list = []
learning_rate = 5e-5
min_learning_rate = 1e-5
max_len = 160
batch_size = 24
# with open(table_filename, encoding='utf-8') as f:
#     for line in f:
#         line = json.loads(line)
#         table_list.append(line)
#
# with open(data_filename, encoding='utf-8') as f:
#     for line in f.readlines():
#         line = json.loads(line)
#         for i in range(len(table_list)):
#             if line.get('table_id') == table_list[i].get('id'):
#                 line["header"] = table_list[i].get("header")
#                 line['types'] = table_list[i].get('types')
#         data_list.append(line)

# save data to a json file
# with open('train_data.json', 'w', encoding='utf-8') as f:
#     for i in range(len(data_list)):
#         f.write(json.dumps(data_list[i], ensure_ascii=False)+'\n')

# question + [real/text] header [seq]


data = get_data_id(get_data_filename)


class Data_Sequece(keras.utils.Sequence):
    def __init__(self, data, batch_size):
        self.data = data
        self.batch_size = batch_size
        self.steps = len(self.data[0]) // self.batch_size
        if len(self.data[0]) % self.batch_size != 0:
            self.steps += 1

    def __len__(self):
        return self.steps

    def __getitem__(self, item):
        token_id_list = self.data[0][self.batch_size * item:self.batch_size * (item + 1)]
        segment_id_list = self.data[1][self.batch_size * item:self.batch_size * (item + 1)]
        header_id_list = self.data[2][self.batch_size * item:self.batch_size * (item + 1)]
        header_mask_list = self.data[3][self.batch_size * item:self.batch_size * (item + 1)]
        output_sel_agg_list = self.data[5][self.batch_size * item:self.batch_size * (item + 1)]
        return [token_id_list, segment_id_list, header_id_list, header_mask_list], \
                [np.expand_dims(output_sel_agg_list, axis=-1)]

    def __iter__(self):
        """Create a generator that iterate over the Sequence."""
        for item in (self[i] for i in range(len(self))):
            yield item


data_seq = Data_Sequece(data, batch_size)


num_sel_agg = 7
num_cond_op = 5
num_cond_conn_op = 3


def seq_gather(x):
    seq, idxs = x
    idxs = K.cast(idxs, 'int32')
    return tf.gather_nd(seq, idxs)

bert_model = load_trained_model_from_checkpoint(config_path, model_path, seq_len=None)
for l in bert_model.layers:
    l.trainable = True

inp_token_ids = Input(shape=(None,), name='input_token_ids', dtype='int32')
inp_segment_ids = Input(shape=(None,), name='input_segment_ids', dtype='int32')
inp_header_ids = Input(shape=(None, 2), name='input_header_ids', dtype='int32')
inp_header_mask = Input(shape=(None,), name='input_header_mask')

x = bert_model([inp_token_ids, inp_segment_ids])  # (None, seq_len, 768)

# predict sel_agg
# x:(None, seq_len, 768)  inp_header_ids:(None, h_len, 1) to x_for_header (None, h_len, 768)
x_for_header = Lambda(seq_gather, name='header_seq_gather')([x, inp_header_ids])
header_mask = Lambda(lambda x: K.expand_dims(x, axis=-1), name="reduce_header_mask")(inp_header_mask)  # (None, h_len, 1)
x_for_header = Multiply()([x_for_header, header_mask])
x_for_header = Masking()(x_for_header)
p_sel_agg = Dense(num_sel_agg, activation='softmax', name='output_sel_agg')(x_for_header)


train_model = Model(
    [inp_token_ids, inp_segment_ids, inp_header_ids, inp_header_mask],
    [p_sel_agg]
)


model = Model(
    [inp_token_ids, inp_segment_ids, inp_header_ids, inp_header_mask],
    [p_sel_agg]
)

train_model = multi_gpu_model(train_model, gpus=2)
train_model.compile(
    loss='sparse_categorical_crossentropy',
    metrics = ["accuracy"],
    optimizer=keras.optimizers.Adam(lr=learning_rate)
)

# if os.path.exists(checkpoint_save_path + '.index'):
#     print("----------load model------------------")
#     model.load_weights(checkpoint_save_path)

EPOCHS = 1
checkpoint_save_path = '../data/bert2sql_agg_cel_{}_epochs.h5'.format(EPOCHS)
train_model.summary()
train_model.fit_generator(data_seq, epochs=EPOCHS, steps_per_epoch=len(data_seq))
model.save_weights(checkpoint_save_path)

#
# metrics = history.history
# figs, axes = plt.subplots(1,2, figsize=(10, 12))
# axes[0].plot(history.epoch, metrics['loss'], metrics['output_cond_conn_op_loss'],
#              metrics['output_sel_agg_loss'], metrics['output_cond_cel_loss'], metrics['value_op_loss'])
# axes[1].plot(history.epoch, metrics['accuracy'], metrics['output_cond_conn_op_accuracy'],
#              metrics['output_sel_agg_accuracy'], metrics['output_cond_cel_accuracy'], metrics['value_op_accuracy'])
# axes[0].legend(['loss', 'output_cond_conn_op_loss','output_sel_agg_loss',
#                 'output_cond_cel_loss', 'output_cond_conn_op_accuracy'])
# axes[1].legend(['accuracy', 'output_cond_conn_op_accuracy', 'output_sel_agg_accuracy',
#                 'output_cond_cel_accuracy', 'value_op_accuracy'])
# plt.show()


#
# a, b, c, d = data[4][0:1], data[5][0:1], data[6][0:1], data[7][0:1]
# a1, b1, c1, d1 = model.predict([data[0][0:1], data[1][0:1], data[2][0:1], data[3][0:1], data[8][0:1]])
# a1 = tf.squeeze(a1, axis=0)
# b1 = tf.squeeze(b1, axis=0)
# c1 = tf.squeeze(c1, axis=0)
# d1 = tf.squeeze(d1, axis=0)
# print(a, b , c, d)
# print(a1, b1, c1, d1)
# print("cond_conn_op", tf.reduce_mean(keras.losses.sparse_categorical_crossentropy(a, a1), axis=-1))
# print("sel_agg", tf.reduce_mean(tf.reduce_mean(keras.losses.sparse_categorical_crossentropy(b, b1), axis=-1), axis=-1))
# print("cond_sel", tf.reduce_mean(tf.reduce_mean(keras.losses.binary_crossentropy(c, c1), axis=-1), axis=-1))
# print("cond_sel_and_ap", tf.reduce_mean(tf.reduce_mean(keras.losses.sparse_categorical_crossentropy(d, d1), axis=-1), axis=-1))


