import json
import numpy as np
import pandas as pd
from keras_bert import Tokenizer, load_vocabulary, load_trained_model_from_checkpoint, get_checkpoint_paths
import tensorflow as tf
import keras
import keras.backend as K
from keras.preprocessing import sequence
from keras.layers import Dense, Input, Masking, Concatenate, Lambda, Multiply, Activation
from keras import Model
import jieba
import editdistance
import os
import matplotlib.pyplot as plt

# 4d29d0513aaa11e9b911f40f24344a08 二零一九年第四周大黄蜂和密室逃生这两部影片的票房总占比是多少呀

table_filename = '../../data/train/train.tables.json'
data_filename = '../../data/train/train.json'
dict_filename = '../../data/chinese_wwm_ext_L-12_H-768_A-12/vocab.txt'
config_path = '../../data/chinese_wwm_ext_L-12_H-768_A-12/bert_config.json'
model_path = '../../data/chinese_wwm_ext_L-12_H-768_A-12/bert_model.ckpt'
bert2sql_model_path = '../../data/bert2sql_50_epochs.h5'
train_deal_data_path = '../../data/train_deal_data.json'



class MyTokenizer(Tokenizer):
    def _tokenize(self, text):
        R = []
        for token in text:
            if token in self._token_dict:
                R.append(token)
            elif self._is_space:
                R.append('unused1')
            else:
                R.append('UNK')
        return R


token_dict = load_vocabulary(dict_filename)
tokenizer = MyTokenizer(token_dict)


def most_similar(w, wlist):
    """从词表中找最相近的词（当无法全匹配的时候）
    """
    if len(wlist) == 0:
        return w
    scores = [editdistance.eval(w, t) for t in wlist]
    return wlist[scores.index(min(scores))]


def most_similar_2(word, sentence):
    """从句子s中找与w最相近的片段，
    借助分词工具和ngram的方式尽量精确地确定边界。
    """
    sw = jieba.lcut(sentence)
    sl = list(sw)
    sl.extend([''.join(i) for i in zip(sw, sw[1:])])
    sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:])])
    sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:], sw[3:])])
    return most_similar(word, sl)


question = ""
table_id = ""
sql = {}
num_sel_agg = 7
num_cond_op = 5
num_cond_conn_op = 3
num_conds = 5

def seq_gather(x):
    seq, idxs = x
    idxs = K.cast(idxs, 'int32')
    return tf.gather_nd(seq, idxs)


bert_model = load_trained_model_from_checkpoint(config_path, model_path, seq_len=None)
for l in bert_model.layers:
    l.trainable = True

inp_token_ids = Input(shape=(None,), name='input_token_ids', dtype='int32')
inp_segment_ids = Input(shape=(None,), name='input_segment_ids', dtype='int32')
inp_header_ids = Input(shape=(None, 2), name='input_header_ids', dtype='int32')
inp_header_mask = Input(shape=(None,), name='input_header_mask')
value_mask = Input(shape=(None,), name="value_mask")  # 加入columns的掩码
# zeros_dims = Input(tensor=K.variable(np.zeros(shape=(batch_size, max_len, 768))))

x = bert_model([inp_token_ids, inp_segment_ids])  # (None, seq_len, 768)
value_masks = Lambda(lambda x: tf.expand_dims(x, axis=-1), name="reduce_value_mask")(value_mask) # (None, value, 1)
value_x = Multiply()([x, value_masks])

# predict cond_conn_op
x_for_cond_conn_op = Lambda(lambda x: x[:, 0], name="recude_cls")(x)  # (None, 768)
p_cond_conn_op = Dense(num_cond_conn_op, activation='softmax', name='output_cond_conn_op')(x_for_cond_conn_op)
p_cond_nums = Dense(num_conds, activation='softmax', name='output_cond_nums')(x_for_cond_conn_op)

# predict sel_agg
# x:(None, seq_len, 768)  inp_header_ids:(None, h_len, 1) to x_for_header (None, h_len, 768)
x_for_header = Lambda(seq_gather, name='header_seq_gather')([x, inp_header_ids])
header_mask = Lambda(lambda x: K.expand_dims(x, axis=-1), name="reduce_header_mask")(inp_header_mask)  # (None, h_len, 1)
# use for cond_cel
header_cond_cel_mask = Lambda(lambda x: K.expand_dims(x, axis=1), name="reduce_header_cond_cel_mask")(inp_header_mask)  # (None, 1, h_len)
x_for_header = Multiply()([x_for_header, header_mask])
# x_for_header_dims = Lambda(lambda x: tf.expand_dims(x, 1))(x_for_header)

# reduce_mean for columns
columns_mean = Lambda(lambda x: tf.reduce_mean(x, axis=1), name="reduce_clomns_mean")(x_for_header)
# value的编码+列名的编码 4 4 4 2 2 4 4 是应为还有x_for_header
# shape (None, 1, 768)
columns_mean_expand = Lambda(lambda x:tf.expand_dims(x, axis=1), name="reduce_columns_mean_expand")(columns_mean)
value_op = Lambda(lambda x: 0.4 * x[0] + 0.6 * x[1], name="reduce_value_op")([columns_mean_expand, value_x])
value_op = Masking()(value_op)
value_op = Dense(num_cond_op, activation="softmax", name="value_op")(value_op)

x_for_header = Masking()(x_for_header)
# 6 5 6 6 0 0 0 是应为Masking操作导致的
p_sel_agg = Dense(num_sel_agg, activation='softmax', name='output_sel_agg')(x_for_header)
x = Lambda(lambda x: tf.expand_dims(x, 2))(x)
x_for_header = Lambda(lambda x: tf.expand_dims(x, 1))(x_for_header)
pcsel_1 = Dense(256)(x)
pcsel_2 = Dense(256)(x_for_header)
pcsel = Lambda(lambda x: 0.5 * x[0] + 0.5 * x[1])([pcsel_1, pcsel_2])
# value_masks_dims = Lambda(lambda x: tf.expand_dims(x, axis=-1), name="reduce_value_mask_dims")(value_masks) # (None, value, 1)
# value_masks_dims.shape (None, 160, 1, 1)
pcsel = Activation('tanh')(pcsel)
# 相似度计算
p_cond_cel = Dense(1)(pcsel)
# (None, 160, 160, 1) -> (None, 160, 160)   header_cond_cel_mask.shape (None, 1, h_len)  p_cond_cel (None, question, header)
p_cond_cel = Lambda(lambda x: x[0][..., 0] - (1 - x[1]) * 1e10)([p_cond_cel, header_cond_cel_mask])
p_cond_cel = Multiply()([p_cond_cel, value_masks])  # (None, 160, 160) -> (None, 160, 160) axis=1
p_cond_cel = Masking()(p_cond_cel)
p_cond_cel = Activation('softmax', name="output_cond_cel")(p_cond_cel)

model = Model(
    [inp_token_ids, inp_segment_ids, inp_header_ids, inp_header_mask, value_mask],
    [p_cond_conn_op, p_sel_agg, p_cond_cel, value_op, p_cond_nums]
)
model.load_weights(bert2sql_model_path)


def get_sql(predict_data, cel_nums):
    p_cond_conn_op = predict_data[0]  # 3
    p_sel_agg = predict_data[1]       # (None, sel_len, 7)
    p_cond_cel = predict_data[2]      # (None, 412, 412)
    value_op = predict_data[3]        # (None, 412 , 5)
    p_cond_nums = predict_data[4]
    #
    p_cond_conn_op = np.argmax(p_cond_conn_op, axis=-1)[0]
    p_cond_nums = np.argmax(p_cond_nums, axis=-1)[0]
    # print(f"p_cond_nums is {p_cond_nums}")

    p_agg, p_sel = [],[]
    agg_nums = 0
    sel_agg_max = np.argmax(p_sel_agg, axis=-1)  # (1, 412, 1)
    print("sel_agg_max", sel_agg_max)
    for cel, agg in enumerate(sel_agg_max[0][:cel_nums]): # (412, 1)
        if agg != 6:  # inputs_data[5] headers
            agg_nums += 1
            p_agg.append(agg)
            p_sel.append(cel)
    if agg_nums == 0:
        p_agg.append(0)
        p_sel.append(0)

    # 设置一个预测conds中的条件个数

    v_str_len = 0
    v_start = 0
    # get the vlaue's start and len
    v_str_len_list, v_start_list, v_str_list = [], [], []
    ops = {}

    for index, values in enumerate(value_op[0].argmax(axis=-1)):
        if values != 4:
            if v_start == 0:
                v_start = index
                v_start_list.append(v_start)
            ops[index] = values
            v_str_len += 1
        else:
            v_start = 0
            v_str_len_list.append(v_str_len)
            v_str_len = 0
    for i in v_str_len_list:
        if i != 0:
            v_str_list.append(i)
    # print(v_start_list, v_str_list, ops)

    table_list = []
    with open(table_filename, 'r', encoding='utf-8') as lines:
        for line in lines:
            text = json.loads(line)
            if table_id == text.get('id'):
                for table_column in text.get("rows"):
                    table_list += table_column

    conds = []
    for i in range(len(v_start_list)):
        v_start = v_start_list[i]
        v_end = v_start_list[i] + v_str_list[i]
        str1 = question[v_start - 1: v_end - 1]
        editdistance_list = [editdistance.eval(str1, str(t)) for t in table_list]
        # print(f"table list is {table_list}")
        # print(f"editdistance {[editdistance.eval(str1, str(t)) for t in table_list]}")
        # print(f"table str is {table_list[]}")
        op = ops[v_start]
        # print(f"p_cond_cel.shape is {p_cond_cel.shape}")
        # print(f"p_cond_cel[0][v_start: v_end][:].shape is {p_cond_cel[0][v_start: v_end][:].shape}")
        str2 = table_list[editdistance_list.index(min(editdistance_list))]
        cel = np.mean(p_cond_cel[0][v_start: v_end][:], axis=0).argmax() - 1  # p_cond_cel[0][v_start: v_end][:].shape (4, 412)
        # print(f"cel is {np.mean(p_cond_cel[0][v_start: v_end][:], axis=0)}")
        if str1 in question and str1.isdigit():  # 而且str1为int，则xxx
            conds.append(tuple([cel, op, int(str1)]))
        else:
            conds.append(tuple([cel, op, str2]))
    return {"agg":p_agg,"cond_conn_op": p_cond_conn_op, "sel": p_sel, "conds":conds}


def is_equal(sql, pred_sql):
    return set(sql['agg'])==set(pred_sql['agg']) and sql['cond_conn_op']==pred_sql['cond_conn_op'] and \
            set(sql['sel']) == set(pred_sql['sel']) and set(sql['conds'])==set(pred_sql['conds'])

def generator_sql():
    nums_true = 0
    nums_all = 0
    with open(train_deal_data_path, 'r', encoding='utf-8') as lines:
        for line in lines:
            if nums_all == 1000:
                break
            max_len = 160
            nums_all += 1
            text = json.loads(line)
            global question
            global table_id
            table_id = text.get("table_id")
            question = text.get("question")
            sql = text.get("sql")
            for i in range(len(sql.get("conds"))):
                sql.get("conds")[i] = tuple(sql.get("conds")[i])
            questions_token_id, quesiton_segment_id = tokenizer.encode(question)
            # 1  generate the header_token_id, header_segment_id
            header_token_id_list, header_segment_id_list = [], []
            header_token_id, header_segment_id = [], []
            for i in range(len(text.get('header'))):
                # headers的segment id为1，用来区别question和header
                header_token_id_, header_segment_id_ = tokenizer.encode(text.get('header')[i])
                if text.get('types')[i] == 'text':
                    header_token_id_[0] = 11
                else:
                    header_token_id_[0] = 12
                # 将header的segment id从0改为1
                # for i in range(len(header_segment_id_)):
                #     header_segment_id_[i] = 1 - header_segment_id_[i]
                header_token_id.append(header_token_id_)
                header_segment_id.append(header_segment_id_)

            for i in range(len(header_token_id)):
                header_token_id_list += header_token_id[i]
                header_segment_id_list += header_segment_id[i]

            token_id = [questions_token_id + header_token_id_list]
            segment_id = [quesiton_segment_id + header_segment_id_list]
            # 2 value_mask
            value_op = [4] * (len(token_id[0]))  # 还需要加上之后的列名
            value = [text.get('sql')['conds'][i][2] for i in range(len(text.get('sql')['conds']))]
            for i in range(len(value)):
                # 如果sql中的value值出现在question中，则直接制作label
                if value[i] in text.get('question'):
                    # find the value index from question
                    index = text.get('question').index(value[i])
                    # assign op value
                    value_op[index+1: index+1+len(value[i])] = [text.get('sql')['conds'][i][1]] * len(value[i])
                # 如果sql中的value不出现在question中，则先使用相似度匹配，
                # 通过sql中的value值匹配到question中的字符，使用question匹配成功的字符制作label
                else:
                    temp = most_similar_2(value[i], text.get('question'))
                    index = text.get('question').index(temp)
                    value_op[index + 1: index + 1 + len(value[i])] = [text.get('sql')['conds'][i][1]] * len(value[i])
            value_mask = [[0 if value_op[i] == 4 else 1 for i in range(len(value_op))]]

            # 3 header_id and header_mask

            batch_index = 0
            header_id = []
            for i in range(len(token_id[0])):
                if token_id[0][i] == 11 or token_id[0][i] == 12:
                    header_id.append([batch_index, i])
            is_ok = True
            for i in range(len(header_id)):
                if header_id[i][1] > max_len:
                    print("continue")
                    is_ok = False
            if is_ok:
                header_id = [header_id]
                # # header_mask_list
                header_mask = [[1] * len(header_id[0])]
                token_id = np.array(sequence.pad_sequences(token_id, maxlen=max_len, padding='post', truncating='post'))
                segment_id = np.array(sequence.pad_sequences(segment_id, maxlen=max_len, padding='post', truncating='post', value=1))
                value_mask = np.array(sequence.pad_sequences(value_mask, maxlen=max_len, padding='post', truncating='post'))
                header_mask = np.array(sequence.pad_sequences(header_mask, maxlen=max_len, padding='post', truncating='post'))
                header_id = np.array(sequence.pad_sequences(header_id, maxlen=max_len, padding='post', truncating='post'))
                inputs_data =  [token_id, segment_id, header_id, header_mask, value_mask]
                cel_nums = 0
                for i in range(len(inputs_data[3][0])):
                    if inputs_data[3][0][i] == 1:
                        cel_nums += 1
                predict_data = model.predict(inputs_data)
                pred_sql = get_sql(predict_data, cel_nums)
                if is_equal(sql, pred_sql):
                    nums_true += 1
                print(f"nums_true is {nums_true}")
                print(f"nums_all is {nums_all}")
                print(f"nums_true/nums_all is {nums_true/nums_all}")
                print(f"sql is {sql}")
                print(f"pred_sql is {pred_sql}")
    return nums_true, nums_true / 41522

print(generator_sql())