# -*- coding: utf-8 -*-
"""
@date: 2021/6/2 18:09
@file: dgcnn.py
@author: lilong
@desc: 
"""

import re
import os
import json
import tqdm
import codecs
import warnings
import numpy as np

import jieba_fast as jieba
from gensim.models import Word2Vec


import keras.backend as K
from keras.models import Model
from keras.callbacks import Callback
from keras.layers import Lambda, Input, Dropout, Dense

from dgcnnForReadingComprehension.dgcnn.optimizer_radam import RAdam
from dgcnnForReadingComprehension.dgcnn.process import tokenize, seq_padding, sent2vec, seq_and_vec, max_in_dict
from dgcnnForReadingComprehension.dgcnn.build import DilatedGatedConv1D, AttentionPooling1D, MixEmbedding, Evaluate


warnings.filterwarnings("ignore")  # 忽略keras带来的满屏警告

# 初始化
jieba.initialize()


# ------常量
TRAIN_FLAG = True


def write_json(file_path, con):
    """写json文件"""
    with open(file_path, 'w', encoding='utf-8') as f:
        json.dump(con, f, ensure_ascii=False, indent=2)


class Config:
    """基本配置"""

    mode = 3
    maxlen = 256        # 句子最大长度
    min_count = 16      # 字最小频次
    char_size = 128     # 字向量维度

    # 统计分析
    raw_char_statis = '../raw_char_statis.json'
    filter_char_statis = '../filter_char_statis.json'
    random_order_path = "../random_order.json"

    # 训练集路径
    webQA_path = "../../data/dgcnn/datasets/WebQA.json"
    SogouQA_path = "../../data/dgcnn/datasets/SogouQA.json"

    # 测试集路径
    webQA_path_test = "../../data/dgcnn/datasets/wb.json"
    SogouQA_path_test = "../../data/dgcnn/datasets/sg.json"

    # 词向量路径
    word2vec_path = "../../data/dgcnn/word2vec_baike/word2vec_baike"
    word2vec_id2word = "../word2vec_baike/id2word.json"

    # jieba更新
    jieba_new = None  # 添加新词到词典


class DataGenerator:
    """数据生成器"""

    def __init__(self, data, id2char, char2id, batch_size=128):
        self.data = data
        self.id2char = id2char
        self.char2id = char2id
        self.batch_size = batch_size

        # 迭代次数
        self.steps = len(self.data) // self.batch_size
        if len(self.data) % self.batch_size != 0:
            self.steps += 1

    def random_generate(self, s):
        """随机生成"""
        maxlen = Config.maxlen
        l = maxlen // 2 + maxlen % 2
        if len(s) > l:
            p = np.random.random()
            if p > 0.5:
                i = np.random.randint(len(s) - l + 1)
                j = np.random.randint(l + i, min(len(s), i + maxlen) + 1)
                return s[i: j]
            else:
                return s[: maxlen]
        else:
            return s

    def __len__(self):
        return self.steps

    def __iter__(self):
        while True:
            idxs = range(len(self.data))
            np.random.shuffle(idxs)
            Q1, Q2, P1, P2, A1, A2 = [], [], [], [], [], []
            for i in idxs:
                d = self.data[i]
                # 问题
                q_text = d['question']
                q_text_words = tokenize(q_text)
                q_text = ''.join(q_text_words)
                qid = [self.char2id.get(c, 1) for c in q_text]
                # 篇章
                pi = np.random.choice(len(d['passages']))
                p = d['passages'][pi]
                p_text = self.random_generate(p['passage'])
                p_text_words = tokenize(p_text)
                p_text = ''.join(p_text_words)
                pid = [self.char2id.get(c, 1) for c in p_text]
                # 答案
                a1, a2 = np.zeros(len(p_text)), np.zeros(len(p_text))
                if p['answer']:
                    for j in re.finditer(re.escape(p['answer']), p_text):
                        a1[j.start()] = 1
                        a2[j.end() - 1] = 1
                # 组合
                Q1.append(qid)
                Q2.append(q_text_words)
                P1.append(pid)
                P2.append(p_text_words)
                A1.append(a1)
                A2.append(a2)
                if len(Q1) == self.batch_size or i == idxs[-1]:
                    Q1 = seq_padding(Q1)
                    Q2 = sent2vec(Q2)
                    P1 = seq_padding(P1)
                    P2 = sent2vec(P2)
                    A1 = seq_padding(A1)
                    A2 = seq_padding(A2)
                    yield [Q1, Q2, P1, P2, A1, A2], None
                    Q1, Q2, P1, P2, A1, A2 = [], [], [], [], [], []


class DataBuild:
    """数据构建"""

    def __init__(self):

        # 词向量相关：id/词映射、词/id映射
        word2vec = Word2Vec.load(Config.word2vec_path)
        self.id2word = {i + 1: j for i, j in enumerate(word2vec.wv.index2word)}
        self.word2id = {j: i for i, j in self.id2word.items()}
        if not os.path.exists(Config.word2vec_id2word):
            write_json(Config.word2vec_id2word, self.id2word)

        # 转换为ndarray: (1056283, 256)，105万的词向量，维度256
        word2vec = word2vec.wv.syn0
        self.word_size = word2vec.shape[1]  # 维度

        # 首行拼接0向量，为什么要0向量？
        self.word2vec = np.concatenate([np.zeros((1, self.word_size)), word2vec])

        # ---jieba词典中添加新词
        for w in self.word2id:
            if w not in jieba.dt.FREQ:
                jieba.add_word(w)
        Config.jieba_new = jieba  # jieba更新

        # # 加载训练数据集
        # self.webqa_data = json.load(open(Config.webQA_path, encoding='utf-8'))
        # self.sogou_data = json.load(open(Config.SogouQA_path, encoding='utf-8'))

        # 测试数据集，跑流程调试用
        self.webqa_data = json.load(open(Config.webQA_path_test, 'r', encoding='utf-8'))
        self.sogou_data = json.load(open(Config.SogouQA_path_test, 'r', encoding='utf-8'))

    def statis_char(self):
        """字统计
        一个问题question ——>（多个片段passages、对应多个答案answer）
        """

        # webQA统计：【问题 + 片段】字频统计
        if not os.path.exists(Config.filter_char_statis):
            chars = {}
            source_data = [self.webqa_data, self.sogou_data]
            for D in source_data:
                for d in tqdm(iter(D)):
                    for c in d['question']:
                        chars[c] = chars.get(c, 0) + 1  # 问题计数
                    for p in d['passages']:
                        for c in p['passage']:
                            chars[c] = chars.get(c, 0) + 1

            # 原始字频率信息
            write_json(Config.raw_char_statis, chars)

            # 最小字频过滤后统计信息
            # 字向量相关：id/字映射、字/id映射
            # 0: mask, 1: padding 前两位是占位符，其他字id从2开始
            chars = {i: j for i, j in chars.items() if j >= Config.min_count}  # 最小字频过滤
            self.id2char = {i + 2: j for i, j in enumerate(chars)}
            self.char2id = {j: i for i, j in self.id2char.items()}
            write_json(Config.filter_char_statis, [self.id2char, self.char2id])

        else:
            with open(Config.filter_char_statis, 'r', encoding='utf-8') as f:
                self.id2char, self.char2id = json.load(f)  # 加载id2char，char2id

        # 搜狗数据：随机下标序列
        if not os.path.exists(Config.random_order_path):
            self.random_order = [i for i in range(len(self.sogou_data))]  # 以每个id为单位计算搜狗数据的长度
            np.random.shuffle(self.random_order)
            write_json(Config.random_order_path, self.random_order)
        else:
            self.random_order = json.load(open(Config.random_order_path, 'r', encoding='utf-8'))

    def getTrainData(self):
        """构造训练、测试数据"""

        # 从搜狗数据，构造训练集、验证集
        train_data = []
        dev_data = []
        for i, j in enumerate(self.random_order):
            if i % Config.mode != 0:
                train_data.append(self.sogou_data[j])
            else:
                dev_data.append(self.sogou_data[j])
        train_data.extend(train_data)           # 搜狗数据构造的train_data复制一份
        train_data.extend(self.webqa_data)      # 将SogouQA和WebQA按2:1的比例混合
        self.train_data = train_data            # 训练集
        self.dev_data = dev_data                # 验证集

        # 训练数据生成器
        self.train_iter = DataGenerator(train_data, self.id2char, self.char2id)

        # train_data = [self.sogou_data[j] for i, j in enumerate(self.random_order) if i % 3 != Config.mode]
        # train_data.extend(train_data)  # train_data重复复制一份什么意思？
        # train_data.extend(self.webqa_data)  # 将SogouQA和WebQA按2:1的比例混合
        # self.train_D = data_generator(train_data, self.id2char, self.char2id)   # 数据生成器
        # self.dev_data = [self.sogou_data[j] for i, j in enumerate(self.random_order) if i % 3 == Config.mode]   # 验证集


class ModelArc:
    """模型架构"""

    def __init__(self, db):
        self.db = db  # 构造的数据对象

    def buildModel(self):
        """模型的构建"""

        # 输入层
        q1_in = Input(shape=(None,), name='question-char')  # 问题：字id输入
        q2_in = Input(shape=(None, self.db.word_size), name='question-word')  # 问题：词向量，维度为word_size

        p1_in = Input(shape=(None,), name='para-char')  # 片段：字id输入
        p2_in = Input(shape=(None, self.db.word_size), name='para-word')  # 片段：词向量输入，维度为word_size

        a1_in = Input(shape=(None,), name='answer-left')  # 答案：左边界
        a2_in = Input(shape=(None,), name='answer-right')  # 答案：右边界

        # 重新赋值
        q1, q2, p1, p2, a1, a2 = q1_in, q2_in, p1_in, p2_in, a1_in, a2_in

        # ------mask层
        # 计算方法：维度增加后跟0比较大小
        # K_geater计算方式参考fun_K_geater_use.py
        # Lambda计算方式参考fun_Lambda_use.py
        q_mask = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), 'float32'))(q1)
        p_mask = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), 'float32'))(p1)

        # ------字、词的混合嵌入层
        # 问题：字、词embedding
        # 片段：字、词embedding
        embeddings = MixEmbedding(len(self.db.char2id) + 2, Config.char_size)   # 传入的参数：输入维度、输出维度
        q = embeddings([q1, q2])
        q = Dropout(0.1)(q)
        p = embeddings([p1, p2])
        p = Dropout(0.1)(p)

        # ------问题编码层：循环加入q_mask层
        # 开始特征编码
        q = DilatedGatedConv1D(rate=1, drop_gate=0.1)([q, q_mask])
        q = DilatedGatedConv1D(rate=2, drop_gate=0.1)([q, q_mask])
        q = DilatedGatedConv1D(rate=1, drop_gate=0.1)([q, q_mask])

        # -----问题注意力层
        qv = AttentionPooling1D()([q, q_mask])

        # 材料编码：嵌入问题向量
        p = Lambda(seq_and_vec)([p, qv])  # 混合序列
        p = Dense(Config.char_size, use_bias=False)(p)  # 全连接层

        # 开始特征编码
        p = DilatedGatedConv1D(rate=1, drop_gate=0.1)([p, p_mask])
        p = DilatedGatedConv1D(rate=2, drop_gate=0.1)([p, p_mask])
        p = DilatedGatedConv1D(rate=4, drop_gate=0.1)([p, p_mask])
        p = DilatedGatedConv1D(rate=8, drop_gate=0.1)([p, p_mask])
        p = DilatedGatedConv1D(rate=16, drop_gate=0.1)([p, p_mask])
        p = DilatedGatedConv1D(rate=1, drop_gate=0.1)([p, p_mask])

        # 问题-材料总编码
        p = Lambda(seq_and_vec)([p, qv])

        # 最后的序列
        pv = AttentionPooling1D()([p, p_mask])

        pa = Dense(1, activation='sigmoid')(pv)  # 最后序列sigmoid
        pa1 = Dense(1, activation='sigmoid')(p)  # 问题-材料总编码sigmoid起始位置，全局打分
        pa2 = Dense(1, activation='sigmoid')(p)  # 问题-材料总编码sigmoid终止位置，全局打分
        pa1 = Lambda(lambda x: x[0] * x[1][..., 0])([pa, pa1])  # 起始位置总得分
        pa2 = Lambda(lambda x: x[0] * x[1][..., 0])([pa, pa2])  # 终止位置总得分

        # 前向传播使用
        model = Model([q1_in, q2_in, p1_in, p2_in], [pa1, pa2])
        model.summary()

        # 训练模型使用
        train_model = Model([q1_in, q2_in, p1_in, p2_in, a1_in, a2_in], [pa1, pa2])
        lf = LossFun(a1_in, a2_in, pa1, pa2, p_mask)  # 损失函数
        train_model.add_loss(lf.loss)
        train_model.compile(optimizer=RAdam(1e-3))

        return model, train_model


class LossFun:
    """自定义损失"""

    def __init__(self, a1_in, a2_in, pa1, pa2, p_mask):
        loss1 = self.focal_loss(a1_in, pa1)
        loss1 = K.sum(loss1 * p_mask[..., 0]) / K.sum(p_mask)
        loss2 = self.focal_loss(a2_in, pa2)
        loss2 = K.sum(loss2 * p_mask[..., 0]) / K.sum(p_mask)

        # 返回的损失值：放大100倍，可读性好些，不影响Adam的优化
        self.loss = (loss1 + loss2) * 100

    def focal_loss(self, y_true, y_pred):
        alpha, gamma = 0.25, 2
        y_pred = K.clip(y_pred, 1e-8, 1 - 1e-8)
        return - alpha * y_true * K.log(y_pred) * (1 - y_pred) ** gamma \
               - (1 - alpha) * (1 - y_true) * K.log(1 - y_pred) * y_pred ** gamma


class Predict:
    """预测类"""

    def __init__(self, db, model):
        self.db = db
        self.model = model

    def extract_answer(self, q_text, p_texts, maxlen=12, threshold=0.1):
        """q_text为问题，p_texts为篇章集合（list）
        最终输出一个dict，dict的key为候选答案，而value为对应的分数。
        """
        Q1, Q2, P1, P2 = [], [], [], []
        # 问题
        q_text_words = tokenize(q_text)
        q_text = ''.join(q_text_words)
        qid = [self.db.char2id.get(c, 1) for c in q_text]
        for i, p_text in enumerate(p_texts):
            # 篇章
            p_text_words = tokenize(p_text)
            p_text = ''.join(p_text_words)
            pid = [self.db.char2id.get(c, 1) for c in p_text]
            Q1.append(qid)
            Q2.append(q_text_words)
            P1.append(pid)
            P2.append(p_text_words)
        # 给出结果序列
        Q1 = seq_padding(Q1)
        Q2 = sent2vec(Q2)
        P1 = seq_padding(P1)
        P2 = sent2vec(P2)
        A1, A2 = self.model.predict([Q1, Q2, P1, P2])
        # 输出每个篇章的答案
        Result = []
        for a1, a2, p in zip(A1, A2, p_texts):
            a1, a2 = a1[: len(p)], a2[: len(p)]
            l_idxs = np.where(a1 > threshold)[0]
            r_idxs = np.where(a2 > threshold)[0]
            result = {}
            for i in l_idxs:
                cond = (r_idxs >= i) & (r_idxs < i + maxlen)
                for j in r_idxs[cond]:
                    k = p[i: j + 1]
                    result[k] = max(result.get(k, 0), a1[i] * a2[j])
            if result:
                Result.append(result)
        # 综合所有答案
        R = {}
        for result in Result:
            for k, v in result.items():
                R[k] = R.get(k, []) + [v]
        R = {
            k: (np.array(v) ** 2).sum() / (sum(v) + 1)
            for k, v in R.items()
        }
        return R

    def predict(self, data, filename, threshold=0.1):
        with codecs.open(filename, 'w', encoding='utf-8') as f:
            for d in tqdm(iter(data)):
                q_text = d['question']
                p_texts = [p['passage'] for p in d['passages']]
                a = self.extract_answer(q_text, p_texts, threshold=threshold)
                a = max_in_dict(a)
                if a:
                    s = u'%s\t%s\n' % (d['id'], a)
                else:
                    s = u'%s\t\n' % (d['id'])
                f.write(s)


if __name__ == '__main__':

    # 百科词向量、字基本构造
    data_build = DataBuild()
    data_build.statis_char()

    # 获取训练数据、验证数据集
    data_build.getTrainData()

    # 模型架构构建
    ma = ModelArc(data_build)
    model, train_model = ma.buildModel()

    # 评估器
    pp = Predict(data_build, model)
    evaluator = Evaluate()
    evaluator.objTransport(train_model, data_build.dev_data, pp.predict)

    # 模型训练
    if TRAIN_FLAG == True:
        train_model.fit_generator(data_build.train_D.__iter__(),
                                  steps_per_epoch=len(data_build.train_D),
                                  epochs=120,
                                  callbacks=[evaluator])
    else:
        train_model.load_weights('best_model.weights')
