#!/usr/anaconda/env python
# -*- coding: utf-8 -*-
# author: uestcwdh
# @Time: 2020/4/6 10:14
import json
import gc
import jsonlines
import streamlit as st
import tensorflow as tf
from transformers import BertTokenizer, BertConfig
from models import TFBertForNaturalQuestionAnswering

st.title("问答推理系统")
# path = r'F:\dataset_download\SQuaD2.0\dev-v2.0.json'
#
# tokenizer = BertTokenizer.from_pretrained(
#     r'A:\pycharm\codes\natural-question-answering\tokenizer_tf2_qa\bert_large_uncased_vocab.txt')
# config = BertConfig.from_json_file(r'F:\weight_files\nqa2_kaggle_competition\bert_large_uncased_config.json')
# model = TFBertForNaturalQuestionAnswering.from_pretrained(r'F:\weight_files\nqa2_kaggle_competition\weights.h5',
#                                                           config=config)
path = r'D:\codes\natural-question-answering\SQuaD2.0\dev-v2.0.json'

tokenizer = BertTokenizer.from_pretrained(
    r'D:\codes\natural-question-answering\tokenizer_tf2_qa\bert_large_uncased_vocab.txt')
config = BertConfig.from_json_file(r'D:\codes\natural-question-answering\nqa2_kaggle_competition\bert_large_uncased_config.json')
model = TFBertForNaturalQuestionAnswering.from_pretrained(r'D:\codes\natural-question-answering\nqa2_kaggle_competition\weights.h5',
                                                          config=config)


@st.cache
def get_csqa_data_attr(csqa_path):
    with open(csqa_path, "r", encoding="utf-8") as f:
        q2id_dict, choice_lst = {}, []
        for item in jsonlines.Reader(f):
            q2id_dict.update({item['question']['stem']: item['id']})
            choice_dict = {}
            for i in range(5):
                choice_dict.update({item['question']['choices'][i]['label']: item['question']['choices'][i]['text']})
            choice_lst.append(choice_dict)
        return q2id_dict, choice_lst


def get_pred_by_id(pred_file, qid):
    with open(pred_file, 'r', encoding='utf-8') as f:
        for line in f.readlines():
            if line.split(',')[0] == qid:
                return line.split(',')[-1]


def get_choices_idx_by_ques(csqa_data, question):
    with open(csqa_data, 'r', encoding='utf-8') as f:
        for idx, item in enumerate(jsonlines.Reader(f)):
            if question == item['question']['stem']:
                return idx


que2id, choices = get_csqa_data_attr('conceptnet/commonsenseQA/test_data.jsonl')

st.sidebar.subheader("常识问答")
questions = [k for k, _ in que2id.items()]
ques = st.sidebar.selectbox(
    "常识问题选择",
    questions
)


st.header("常识问答")
st.subheader("CSQA数据集")
st.write("train/dev/test：9741/1221/1140")

st.subheader("验证集上准确率:")
st.write('80.1')

st.header("演示区")
st.subheader("问题")
st.write(ques)

st.subheader("选项")
q_index = get_choices_idx_by_ques('conceptnet/commonsenseQA/test_data.jsonl', ques)
st.write(choices[q_index])


pred = get_pred_by_id('commonsense_output/predictions.csv', que2id[ques])
st.subheader("预测结果")
st.write(pred)


@st.cache(persist=True)
def load_data(fname):
    with open(fname, 'r+', encoding='utf-8') as f:
        json_data = json.load(f)
        data = json_data['data']
    return data


@st.cache(persist=True)
def load_title(data):
    title = []
    for i in range(len(data)):
        title.append(data[i]['title'])
    return title


data = load_data(path)
titles = load_title(data)
st.sidebar.subheader("领域问答")
title = st.sidebar.selectbox(
    "选择一个话题",
    titles
)


@st.cache(persist=True)
def load_context(data, topic):
    context = []
    for i in range(len(data)):
        if data[i]['title'] == topic:
            para = data[i]['paragraphs']
            for j in range(len(para)):
                context.append(para[j]['context'])
    return context


contexts = load_context(data, title)
context = st.sidebar.selectbox(
    "选择背景文章",
    contexts
)


@st.cache(persist=True)
def load_question(data, topic, context):
    questions = []
    for i in range(len(data)):
        if data[i]['title'] == topic:
            para = data[i]['paragraphs']
            for j in range(len(para)):
                if para[j]['context'] == context:
                    qas = para[j]['qas']
                    for k in range(len(qas)):
                        questions.append(qas[k]['question'])

    return questions


questions = load_question(data, title, context)
question = st.sidebar.selectbox(
    "选择问题",
    questions
)


def true_answer(data, title, context, question):
    for i in range(len(data)):
        if data[i]['title'] == title:
            para = data[i]['paragraphs']
            for j in range(len(para)):
                if para[j]['context'] == context:
                    qas = para[j]['qas']
                    for k in range(len(qas)):
                        if qas[k]['question'] == question:
                            answers = qas[k]['answers']
                            if len(answers) == 0:
                                st.write('没有参考答案')
                            for n in range(len(answers)):
                                st.subheader("问题的参考答案[{}]".format(n))
                                st.write(answers[n]['text'])


st.header("领域问答")
st.subheader("Squad2.0数据集")
st.write("train/dev：130319/11873")
st.subheader("验证集上准确率")
st.write('72.9')
st.subheader("F1得分：")
st.write('76.2')
st.header("演示区")
st.subheader("选择的话题")
st.write(title)
st.subheader("选择的背景文章")
st.write(context)
st.subheader("选择的问题")
st.write(question)
pre = "'''"
open_tag = "<p>"
close_tag = "</p>"
paragraphs = context.split('.')
paragraph = []
for index, item in enumerate(paragraphs):
    if index == 0:
        paragraph.append(pre)
    paragraph.append(open_tag + paragraphs[index] + '.' + close_tag)
    if index == len(paragraphs) - 1:
        paragraph.append(pre)
if paragraph:
    paragraph_tokens = tokenizer.tokenize(''.join(paragraph))
    if question:
        question_tokens = tokenizer.tokenize(question)
        tokens = ['[CLS]'] + question_tokens + ['[SEP]'] + paragraph_tokens + ['[SEP]']
        input_word_ids = tokenizer.convert_tokens_to_ids(tokens)
        input_mask = [1] * len(input_word_ids)
        input_type_ids = [0] * (1 + len(question_tokens) + 1) + [1] * (len(paragraph_tokens) + 1)
        input_word_ids, input_mask, input_type_ids = map(lambda t: tf.expand_dims(
            tf.convert_to_tensor(t, dtype=tf.int32), 0), (input_word_ids, input_mask, input_type_ids))
        outputs = model([input_word_ids, input_mask, input_type_ids])
        # using `[1:]` will enforce an answer. `outputs[:][0][0]` is the ignored '[CLS]' token logit.
        short_start = tf.argmax(outputs[0][0][1:]) + 1
        short_end = tf.argmax(outputs[1][0][1:]) + 1
        answer_tokens = tokens[short_start: short_end + 1]
        answer = tokenizer.convert_tokens_to_string(answer_tokens)
        st.subheader("模型预测结果")
        st.write(answer)
        true_answer(data, title, context, question)


del tokenizer, model
gc.collect()
