import json
import math

import jieba
import numpy as np

import treebuilder
from config import Config
from dataset.collator import ClassificationType
from dataset.data_preprocessor import clean_punctuations
from predict import Predictor
from treebuilder import get_deepest_data

from flask import Flask, render_template, request, Response, jsonify

app = Flask(__name__)


@app.route('/nlp/get_structured_data', methods=['GET'])
def hello_world():
    return jsonify


if __name__ == '__main__':
    config_file = 'conf/train.json'

    config = Config(config_file=config_file)
    predictor = Predictor(config)
    batch_size = config.eval.batch_size

    id_to_label_map = predictor.dataset.id_to_label_map
    label_list = list(id_to_label_map.values())
    prime_json_tree = treebuilder.build_tree(label_list)

    input_texts = []

    origin_texts_id_list = [1, 2, 3]
    origin_text_list = [
        '两上肺野见斑点、片絮状密度增高影，右上肺拟见一结节影，余肺纹理增多。两侧肺门和纵隔影未见明显异常；心脏大小、形态在正常范围之内；膈肌平滑，右侧肋膈角锐利，左肋膈角浅钝。']
    for index, origin_text in enumerate(origin_text_list):
        text_seg_list = jieba.cut(origin_text.strip())
        text_data = ' '.join(text_seg_list)
        text_data = clean_punctuations(text_data)
        # multiple spaces and reserving one
        text_data = ' '.join(text_data.split())
        sample = dict()
        sample['doc_label'] = ['TEST' + str(origin_texts_id_list[index])]
        sample['doc_token'] = text_data.split(' ')
        sample['doc_keyword'] = []
        sample['doc_topic'] = []
        json_str = json.dumps(sample, ensure_ascii=False)
        input_texts.append(json_str)

    is_multi = config.task_info.label_type == ClassificationType.MULTI_LABEL
    epoches = math.ceil(len(input_texts) / batch_size)
    for i in range(epoches):
        batch_texts = input_texts[i * batch_size:(i + 1) * batch_size]
        predict_probs = predictor.predict(batch_texts)
    for predict_prob in predict_probs:
        predict_label_ids = []
        predict_label_idx = np.argsort(-predict_prob)
        for j in range(0, config.eval.top_k):
            if predict_prob[predict_label_idx[j]] > config.eval.threshold:
                predict_label_ids.append(predict_label_idx[j])
        predict_label_list = [predictor.dataset.id_to_label_map[predict_label_id] \
                              for predict_label_id in predict_label_ids]
        predict_label_list = get_deepest_data(predict_label_list)
        print(predict_label_list)
        json_tree = prime_json_tree
        for predict_label in predict_label_list:
            json_tree = treebuilder.traverse_tree(json_tree, predict_label)
        print(json.dumps(json_tree))
