import json
import numpy as np
import pandas as pd
from keras_bert import Tokenizer, load_vocabulary, load_trained_model_from_checkpoint, get_checkpoint_paths
import tensorflow as tf
import keras
import keras.backend as K
from keras.preprocessing import sequence
from keras.layers import Dense, Input, Masking, Concatenate, Lambda, Multiply, Activation
from keras import Model
import jieba
import editdistance
import os
import matplotlib.pyplot as plt

# 4d29d0513aaa11e9b911f40f24344a08 二零一九年第四周大黄蜂和密室逃生这两部影片的票房总占比是多少呀

table_filename = '../../data/train/train.tables.json'
data_filename = '../../data/train/train.json'
dict_filename = '../../data/chinese_wwm_ext_L-12_H-768_A-12/vocab.txt'
config_path = '../../data/chinese_wwm_ext_L-12_H-768_A-12/bert_config.json'
model_path = '../../data/chinese_wwm_ext_L-12_H-768_A-12/bert_model.ckpt'
bert2sql_model_path = '../../data/bert2sql_agg_cel_1_epochs.h5'
train_deal_data_path = '../../data/train_deal_data.json'
# table_list = []
# data_list = []
learning_rate = 5e-5
min_learning_rate = 1e-5


class MyTokenizer(Tokenizer):
    def _tokenize(self, text):
        R = []
        for token in text:
            if token in self._token_dict:
                R.append(token)
            elif self._is_space:
                R.append('unused1')
            else:
                R.append('UNK')
        return R


token_dict = load_vocabulary(dict_filename)
tokenizer = MyTokenizer(token_dict)


def most_similar(w, wlist):
    """从词表中找最相近的词（当无法全匹配的时候）
    """
    if len(wlist) == 0:
        return w
    scores = [editdistance.eval(w, t) for t in wlist]
    return wlist[scores.index(min(scores))]


def most_similar_2(word, sentence):
    """从句子s中找与w最相近的片段，
    借助分词工具和ngram的方式尽量精确地确定边界。
    """
    sw = jieba.lcut(sentence)
    sl = list(sw)
    sl.extend([''.join(i) for i in zip(sw, sw[1:])])
    sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:])])
    sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:], sw[3:])])
    return most_similar(word, sl)


inputs = input("请输入需要查询的问题:\n")
table_id = input("请输入表id:\n")
headers = ""
question = ""
def get_inputs(inputs, table_id):
    questions_token_id, quesiton_segment_id = tokenizer.encode(inputs)
    with open(train_deal_data_path, 'r', encoding='utf-8') as lines:
        for line in lines:
            max_len = 160
            text = json.loads(line)
            if table_id == text.get('table_id') and inputs==text.get('question'):
                print(text)
                global headers
                global question
                headers = text.get("header")
                question = inputs
                # 1  generate the header_token_id, header_segment_id
                header_token_id_list, header_segment_id_list = [], []
                header_token_id, header_segment_id = [], []
                for i in range(len(text.get('header'))):
                    # headers的segment id为1，用来区别question和header
                    header_token_id_, header_segment_id_ = tokenizer.encode(text.get('header')[i])
                    if text.get('types')[i] == 'text':
                        header_token_id_[0] = 11
                    else:
                        header_token_id_[0] = 12
                    # 将header的segment id从0改为1
                    # for i in range(len(header_segment_id_)):
                    #     header_segment_id_[i] = 1 - header_segment_id_[i]
                    header_token_id.append(header_token_id_)
                    header_segment_id.append(header_segment_id_)

                for i in range(len(header_token_id)):
                    header_token_id_list += header_token_id[i]
                    header_segment_id_list += header_segment_id[i]

                token_id = [questions_token_id + header_token_id_list]
                segment_id = [quesiton_segment_id + header_segment_id_list]
                # 2 value_mask
                value_op = [4] * (len(token_id[0]))  # 还需要加上之后的列名
                value = [text.get('sql')['conds'][i][2] for i in range(len(text.get('sql')['conds']))]
                for i in range(len(value)):
                    # 如果sql中的value值出现在question中，则直接制作label
                    if value[i] in text.get('question'):
                        # find the value index from question
                        index = text.get('question').index(value[i])
                        # assign op value
                        value_op[index+1: index+1+len(value[i])] = [text.get('sql')['conds'][i][1]] * len(value[i])
                    # 如果sql中的value不出现在question中，则先使用相似度匹配，
                    # 通过sql中的value值匹配到question中的字符，使用question匹配成功的字符制作label
                    else:
                        temp = most_similar_2(value[i], text.get('question'))
                        index = text.get('question').index(temp)
                        value_op[index + 1: index + 1 + len(value[i])] = [text.get('sql')['conds'][i][1]] * len(value[i])
                value_mask = [[0 if value_op[i] == 4 else 1 for i in range(len(value_op))]]

                # 3 header_id and header_mask
                batch_index = 0
                header_id = []
                for i in range(len(token_id[0])):
                    if token_id[0][i] == 11 or token_id[0][i] == 12:
                        header_id.append([batch_index, i])
                header_id = [header_id]

                # # header_mask_list
                header_mask = [[1] * len(header_id[0])]
                print("token_id", token_id)
                print("segment_id", segment_id)
                print("header_id", header_id)
                print("header_mask", header_mask)
                print("value_mask", value_mask)
                token_id = np.array(sequence.pad_sequences(token_id, maxlen=max_len, padding='post'))
                segment_id = np.array(sequence.pad_sequences(segment_id, maxlen=max_len, padding='post', value=1))
                header_mask = np.array(sequence.pad_sequences(header_mask, maxlen=max_len, padding='post'))
                header_id = np.array(sequence.pad_sequences(header_id, maxlen=max_len, padding='post'))
                return [token_id, segment_id, header_id, header_mask]

num_sel_agg = 7
num_cond_op = 5
num_cond_conn_op = 3


def seq_gather(x):
    seq, idxs = x
    idxs = K.cast(idxs, 'int32')
    return tf.gather_nd(seq, idxs)


bert_model = load_trained_model_from_checkpoint(config_path, model_path, seq_len=None)
for l in bert_model.layers:
    l.trainable = True

inp_token_ids = Input(shape=(None,), name='input_token_ids', dtype='int32')
inp_segment_ids = Input(shape=(None,), name='input_segment_ids', dtype='int32')
inp_header_ids = Input(shape=(None, 2), name='input_header_ids', dtype='int32')
inp_header_mask = Input(shape=(None,), name='input_header_mask')

x = bert_model([inp_token_ids, inp_segment_ids])  # (None, seq_len, 768)
x_for_header = Lambda(seq_gather, name='header_seq_gather')([x, inp_header_ids])
header_mask = Lambda(lambda x: K.expand_dims(x, axis=-1), name="reduce_header_mask")(inp_header_mask)  # (None, h_len, 1)
x_for_header = Multiply()([x_for_header, header_mask])
x_for_header = Masking()(x_for_header)


x = Lambda(lambda x: tf.expand_dims(x, 2))(x)
x_for_header = Lambda(lambda x: tf.expand_dims(x, 1))(x_for_header)
pcsel_1 = Dense(256)(x)
pcsel_2 = Dense(256)(x_for_header)
pcsel = Lambda(lambda x: 0.8 * x[0] + 0.2 * x[1])([pcsel_1, pcsel_2])
pcsel = Activation('tanh')(pcsel)
p_cond_cel = Dense(1)(pcsel)
p_cond_cel = Lambda(lambda x: x[0][..., 0] - (1 - x[1]) * 1e10)([p_cond_cel, header_mask])
p_cond_cel = Activation('softmax', name="output_cond_cel")(p_cond_cel)

model = Model(
    [inp_token_ids, inp_segment_ids, inp_header_ids, inp_header_mask],
    [p_cond_cel]
)
model.load_weights(bert2sql_model_path)
model.summary()
inputs_data = get_inputs(inputs, table_id)
print("inputs_data", inputs_data)
cel_nums = 0
for i in range(len(inputs_data[3][0])):
    if inputs_data[3][0][i] == 1:
        cel_nums += 1
print(f"cel_nums is {cel_nums}")
predict_data = model.predict(inputs_data)

op_sql_dict = {0: ">", 1: "<", 2: "==", 3: "!="}
agg_sql_dict = {0: "", 1: "AVG", 2: "MAX", 3: "MIN", 4: "COUNT", 5: "SUM"}
conn_sql_dict = {0: "", 1: "and", 2: "or"}


# p_sel_agg_list = []
# sel_agg_max = np.argmax(predict_data[1], axis=-1)  # (1, 412, 1)
# print(sel_agg_max)
# for index, values in enumerate(sel_agg_max[0]): # (412, 1)
#     print(index, values)
#     if values != 6:  # inputs_data[5] headers
#         p_sel_agg_list.append((headers[index], values))
# print(p_sel_agg_list)

# 二零一九年第四周大黄蜂和密室逃生这两部影片的票房总占比是多少呀
# 请输入表id:
# 4d29d0513aaa11e9b911f40f24344a08
# print(np.argmax(predict_data[0], axis=-1)[0])
# print(predict_data[3].argmax(axis=-1))


def get_sql(predict_data, cel_nums):
    #p_cond_conn_op = predict_data[0]  # 3
    # p_sel_agg = predict_data[0]       # (None, sel_len, 7)
    p_cond_cel = predict_data[0]      # (None, 412, 412)
    #value_op = predict_data[3]        # (None, 412 , 5)
    print(f"p_cond_cel.shape is {p_cond_cel.shape}")
    print(f"p_cond_cel[0][1: 5][:].shape is {p_cond_cel[0][1: 5][:].shape}")
    print(f"cel is {np.mean(p_cond_cel[0][1: 5][:], axis=0)}")
    #
   # p_cond_conn_op = np.argmax(p_cond_conn_op, axis=-1)[0]


    p_sel_agg_list = []
    # sel_agg_max = np.argmax(p_sel_agg, axis=-1)  # (1, 412, 1)
    # print("sel_agg_max", sel_agg_max)
    # for cel, agg in enumerate(sel_agg_max[0][:cel_nums]): # (412, 1)
        # print(cel, agg)
        # if agg != 6:  # inputs_data[5] headers
        #     p_sel_agg_list.append([cel, agg])
    p_sel_agg_list.append([2, 5])
    if len(p_sel_agg_list) == 0:
        return None
    print(p_sel_agg_list)


    # 设置一个预测conds中的条件个数

    # v_str_len = 0
    # v_start = 0
    # # get the vlaue's start and len
    # v_str_len_list, v_start_list, v_str_list = [], [], []
    # ops = {}
    #
    # for index, values in enumerate(value_op[0].argmax(axis=-1)):
    #     if values != 4:
    #         if v_start == 0:
    #             v_start = index
    #             v_start_list.append(v_start)
    #         ops[index] = values
    #         v_str_len += 1
    #     else:
    #         v_start = 0
    #         v_str_len_list.append(v_str_len)
    #         v_str_len = 0
    # for i in v_str_len_list:
    #     if i != 0:
    #         v_str_list.append(i)
    # print(v_start_list, v_str_list, ops)
    #
    # table_list = []
    # with open(table_filename, 'r', encoding='utf-8') as lines:
    #     for line in lines:
    #         text = json.loads(line)
    #         if table_id == text.get('id'):
    #             for table_column in text.get("rows"):
    #                 table_list += table_column
    #
    # conds = []
    # for i in range(len(v_start_list)):
    #     v_start = v_start_list[i]
    #     v_end = v_start_list[i] + v_str_list[i]
    #     str1 = question[v_start - 1: v_end - 1]
    #     editdistance_list = [editdistance.eval(str1, str(t)) for t in table_list]
    #     print(f"table list is {table_list}")
    #     print(f"editdistance {[editdistance.eval(str1, str(t)) for t in table_list]}")
    #     # print(f"table str is {table_list[]}")
    #     op = ops[v_start]
    #     print(f"p_cond_cel.shape is {p_cond_cel.shape}")
    #     print(f"p_cond_cel[0][v_start: v_end][:].shape is {p_cond_cel[0][v_start: v_end][:].shape}")
    #     str1 = table_list[editdistance_list.index(min(editdistance_list))]
    #     cel = np.mean(p_cond_cel[0][v_start: v_end][:], axis=0).argmax() - 1  # p_cond_cel[0][v_start: v_end][:].shape (4, 412)
    #     print(f"cel is {np.mean(p_cond_cel[0][v_start: v_end][:], axis=0)}")
    #     conds.append([cel, op, str1])
    return {"agg":p_sel_agg_list[0][1],"cond_conn_op": 1, "sel": p_sel_agg_list[0][0], "conds":2}


# ('or', [(0, 2, '大黄蜂'), (0, 2, '密室逃生')])
# "sql": {"agg": [5], "cond_conn_op": 2, "sel": [2], "conds": [[0, 2, "大黄蜂"], [0, 2, "密室逃生"]]}
sql = {"agg": [5], "cond_conn_op": 2, "sel": [2], "conds": [[0, 2, "大黄蜂"], [0, 2, "密室逃生"]]}


def is_equal(sql, pred_sql):
    return sql['agg']==pred_sql['agg'] and sql['cond_conn_op']==pred_sql['cond_conn_op'] and \
            sql['sel'] == pred_sql['sel'] and sql['conds']==pred_sql['conds']


print(get_sql(predict_data, cel_nums))
