import json
import numpy as np
import jieba
import editdistance
from keras_bert import Tokenizer, load_vocabulary
from keras.preprocessing import sequence
# import matplotlib.pyplot as plt
batch_index = 0

dict_filename = '../data/chinese_wwm_ext_L-12_H-768_A-12/vocab.txt'


# 2 tokenization and label encoding
class MyTokenizer(Tokenizer):
    def _tokenize(self, text):
        R = []
        for token in text:
            if token in self._token_dict:
                R.append(token)
            elif self._is_space:
                R.append('unused1')
            else:
                R.append('UNK')
        return R


token_dict = load_vocabulary(dict_filename)
# token_dict_reserve = {}
# for keys, values in token_dict.items():
#     token_dict_reserve[values] = keys
# print(token_dict_reserve)
tokenizer = MyTokenizer(token_dict)

# value值抽取 real类型的value直接使用，text类型的进行匹配



def most_similar(w, wlist):
    """从词表中找最相近的词（当无法全匹配的时候）
    """
    if len(wlist) == 0:
        return w
    scores = [editdistance.eval(w, t) for t in wlist]
    return wlist[scores.index(min(scores))]


def most_similar_2(word, sentence):
    """从句子s中找与w最相近的片段，
    借助分词工具和ngram的方式尽量精确地确定边界。
    """
    sw = jieba.lcut(sentence)
    sl = list(sw)
    sl.extend([''.join(i) for i in zip(sw, sw[1:])])
    sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:])])
    sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:], sw[3:])])
    return most_similar(word, sl)


# token_id, segment_id, header_id, header_mask, cond_conn_op, sel_agg, cond_op
# output_sel_agg [None, h_len, 1]  output_cond_conn_op  [None, 1]   output_cond_op [None, h_len, 1]
def get_data_id(filename, methods, batch_size):
    global max_len
    max_len = 160
    token_id_list, segment_id_list, header_id_list, header_mask_list = [], [], [], []
    output_sel_agg_list, output_cond_conn_op_list, output_cond_cel_list, output_cond_nums_list = [], [], [], []
    value_op_list, value_mask_list = [], []
    question_list = []
    with open(filename, 'r', encoding='utf-8') as f:
        for line in f:
            text = json.loads(line)
            value = [text.get('sql')['conds'][i][2] for i in range(len(text.get('sql')['conds']))]
            question = text.get('question')
            question_list.append(question)
            # print(text)
            # print(value)
            # question的segment id为0
            questions_token_id, quesiton_segment_id = tokenizer.encode(text.get('question'))
            # 多个header的token_id,segment_id
            header_token_id, header_segment_id = [], []
            # 最终的header的token_id和segment_id
            header_token_id_list, header_segment_id_list = [], []
            # 在types为text的header的token将cls替换为unused1，real的header的token将cls替换成unused2
            for i in range(len(text.get('header'))):
                # headers的segment id为1，用来区别question和header
                header_token_id_, header_segment_id_ = tokenizer.encode(text.get('header')[i])
                if text.get('types')[i] == 'text':
                    header_token_id_[0] = 11
                else:
                    header_token_id_[0] = 12
                # 将header的segment id从0改为1
                # for i in range(len(header_segment_id_)):
                #     header_segment_id_[i] = 1 - header_segment_id_[i]
                header_token_id.append(header_token_id_)
                header_segment_id.append(header_segment_id_)

            # 合并多个header的token_id,segment_id
            for i in range(len(header_token_id)):
                header_token_id_list += header_token_id[i]
                header_segment_id_list += header_segment_id[i]

            token_id = questions_token_id + header_token_id_list
            segment_id = quesiton_segment_id + header_segment_id_list

            # value_op and value_mask
            value_op = [4] * (len(token_id))  # 还需要加上之后的列名
            output_cond_cel = [0] * (len(token_id))
            for i in range(len(value)):
                # 如果sql中的value值出现在question中，则直接制作label
                if value[i] in text.get('question'):
                    # find the value index from question
                    index = text.get('question').index(value[i])
                    # print(index)
                    # assign op value
                    value_op[index+1: index+1+len(value[i])] = [text.get('sql')['conds'][i][1]] * len(value[i])
                    output_cond_cel[index + 1: index + 1 + len(value[i])] = [text.get('sql')['conds'][i][0] + 1] * len(value[i])
                # 如果sql中的value不出现在question中，则先使用相似度匹配，
                # 通过sql中的value值匹配到question中的字符，使用question匹配成功的字符制作label
                else:
                    temp = most_similar_2(value[i], text.get('question'))
                    index = text.get('question').index(temp)
                    # print(index)
                    value_op[index + 1: index + 1 + len(value[i])] = [text.get('sql')['conds'][i][1]] * len(value[i])
                    output_cond_cel[index + 1: index + 1 + len(value[i])] = [text.get('sql')['conds'][i][0] + 1] * len(
                        value[i])
            # print(value_op)
            value_mask = [0 if value_op[i] == 4 else 1 for i in range(len(value_op))]
            # print(value_mask)

            # header_id_list
            global batch_index
            if batch_index == batch_size:
                batch_index = 0
            header_id = []
            for i, j in enumerate(token_id):
                if j == 11 or j == 12:
                    if i < max_len:
                        header_id.append([batch_index, i])
            batch_index += 1
            # header_mask_list
            header_mask = [1] * len(header_id)

            # output_cond_conn_op_list
            # "sql": {"agg": [5], "cond_conn_op": 2, "sel": [2], "conds": [[0, 2, "大黄蜂"], [0, 2, "密室逃生"]]},
            sql = text.get("sql")

            output_cond_conn_op_list.append([sql.get("cond_conn_op")])
            output_cond_nums_list.append([len(sql.get("conds"))])


            # output_sel_agg_list
            # output_sel_agg = [[sql.get("agg")[i]] if i == sql.get("sel")[0] else [6] \
            #                   for i in range(len(header_id))]
            output_sel_agg = [[6]] * len(header_id)
            # print(output_sel_agg)
            for i in zip(sql.get("agg"), sql.get("sel")):
                for j in range(len(header_id)):
                    if i[1] == j:
                        output_sel_agg[j] = [i[0]]


            token_id_list.append(token_id)
            # segment_id_list
            segment_id_list.append(segment_id)
            # header_id_list
            header_id_list.append(header_id)
            # header_mask_list
            header_mask_list.append(header_mask)
            # output_sel_agg_list
            output_sel_agg_list.append(output_sel_agg)
            # output_cond_op_list
            output_cond_cel_list.append(output_cond_cel)
            # value_label
            value_op_list.append(value_op)
            # value_mask
            value_mask_list.append(value_mask)


    # for i in range(len(token_id_list)):
    #     max_len = max(max_len, len(token_id_list[i]))

    token_id_list = np.array(sequence.pad_sequences(token_id_list, maxlen=max_len, padding='post', truncating='post'))
    segment_id_list = np.array(sequence.pad_sequences(segment_id_list, maxlen=max_len, padding='post', truncating='post', value=1))
    header_id_list = np.array(sequence.pad_sequences(header_id_list, maxlen=max_len, padding='post', truncating='post'))
    header_mask_list = np.array(sequence.pad_sequences(header_mask_list, maxlen=max_len, padding='post', truncating='post'))
    value_mask_list = np.array(sequence.pad_sequences(value_mask_list, maxlen=max_len, padding='post', value=0, truncating='post'))
    output_cond_conn_op_list = np.array(output_cond_conn_op_list)
    output_sel_agg_list = np.array(sequence.pad_sequences(output_sel_agg_list, maxlen=max_len, padding='post', value=6, truncating='post'))
    output_cond_cel_list = np.array(sequence.pad_sequences(output_cond_cel_list, maxlen=max_len, padding='post', value=0, truncating='post'))
    value_op_list = np.array(sequence.pad_sequences(value_op_list, maxlen=max_len, padding='post', value=4, truncating='post'))
    output_cond_nums_list = np.array(output_cond_nums_list)
    if methods == "train":
        return [token_id_list, segment_id_list, header_id_list, header_mask_list, value_mask_list,
            output_cond_conn_op_list, output_sel_agg_list, output_cond_cel_list,value_op_list, output_cond_nums_list]  # 10
    if methods == "evaluate":
        return [token_id_list, segment_id_list, header_id_list, header_mask_list, value_mask_list], question_list


# data = get_data_id(data_filename)
# sequence_len = {"-100":0, "100-160": 0 ,"160-200": 0, "200-300":0, "300+":0}
# print(len(data[0][0]))
# for i in range(len(data[0])):
#     if len(data[0][i]) <= 100:
#         sequence_len["-100"] += 1
#     elif (len(data[0][i]) > 100 and len(data[0][i]) <= 160):
#         sequence_len["100-160"] += 1
#     elif (len(data[0][i]) > 160 and len(data[0][i]) <= 200):
#         sequence_len["160-200"] += 1
#     elif (len(data[0][i]) > 200 and len(data[0][i]) <= 300):
#         sequence_len["200-300"] += 1
#     else:
#         sequence_len["300+"] += 1
#
# print(sequence_len)
# sequence_len = {"-160":0, "160_200": 0, "200+":0}
# sequence_len["160_200"] += 1
# print(sequence_len) {'-100': 32556, '100-160': 8245, '160-200': 499, '200-300': 202, '300+': 20}
# x = ['-100', '100-160', '160-200', '200-300', '300+']
# height = [round((32556/41522) * 100, 2), round((8245/41522) * 100, 2),
#           round((499/41522) * 100, 2) , round((202/41522)*100, 2),
#           round((20/41522)*100, 2)]
#
# plt.bar(x, height, width=0.5)
#
# for x, y in zip(x, height):
#     plt.text(x, y,  y, ha='center', va='bottom')
# plt.show()

