import os, collections,sys
import time
import re,json
sys.path.append("../") # needed to import bert modules

from flask import Flask, render_template, request,redirect, jsonify, Response
import tokenization
import modeling
from distutils import util

from data_new import process_data_for_prediction

import tensorflow as tf
from tensorflow.contrib.tpu.python.tpu.tpu_estimator import export_estimator_savedmodel
from util import first_occurence, join

print(tf.__version__)
# tf.enable_eager_execution()
from run_squad import model_fn_builder, \
    convert_examples_to_features,FeatureWriter, \
    input_fn_builder, write_predictions, SquadExample, \
    read_squad_examples, FLAGS, InputFeatures





global session,tokenizer, estimator, topK, model,predict_fn
session = None
tokenizer = None
estimator = None
topK= None
predict_fn = None

print("tf version:\t", tf.__version__)
app = Flask(__name__)


RawResult = collections.namedtuple("RawResult",
                                   ["unique_id", "start_logits", "end_logits"])

@app.route('/')
def index():
    return render_template('post_paragraph.html')


@app.route('/', methods=['POST'])
def predict_post():
    text = request.form['text']
    processed_text = text.lower()
    return processed_text



@app.route('/predict')
# def predict(paragraph, topK=3):
def predict(topk=10,_split_on_paragraphs=False):

    orig_paragraph = (request.args.get('p'))
    split_on_paragraphs = True if util.strtobool(request.args.get('splitParagraphs')) ==1 else False
    _topK_ = request.args.get('topK')
    maxTokens = request.args.get('maxTokens')
    dispPred = True if util.strtobool(request.args.get('dispPred')) == 1 else False
    if _topK_ is not None:
        topK = int(_topK_)
    else:
        topK = topk
    # return paragraph
    RawResult = collections.namedtuple("RawResult",
                                       ["unique_id", "start_logits", "end_logits"])

    output_file_dir = "/tmp/predict/"
    tf.gfile.MakeDirs(output_file_dir)

    # orig_paragraph = re.sub('\s+',' ',orig_paragraph)
    answers = {}
    start_time = time.time()
    split_on = "\n{2,}" if split_on_paragraphs else "35353535353353535"
    for paragraph in re.split(split_on,re.sub('\r+','',orig_paragraph)):
        paragraph = re.sub('\s+',' ',paragraph.strip())


        # prediction_file =  process_data_for_prediction(paragraph)
        prediction_data = process_data_for_prediction(paragraph)

        orig_examples = read_squad_examples(
            input_file=None, is_training=False,data=prediction_data)

        orig_features = []

        def append_feature(feature):
            orig_features.append(feature)
            # eval_writer.process_feature(feature)


        # this will update orig_features
        convert_examples_to_features(
            examples=orig_examples,
            tokenizer=tokenizer,
            max_seq_length=FLAGS.max_seq_length,
            doc_stride=FLAGS.doc_stride,
            max_query_length=FLAGS.max_query_length,
            is_training=False,
            output_fn=append_feature)


        eval_features = map(lambda feature :
            {"unique_ids": tf.train.Feature(int64_list=tf.train.Int64List(value=[feature.unique_id])),
                   "input_ids": tf.train.Feature(int64_list=tf.train.Int64List(value=feature.input_ids)), \
                   "input_mask": tf.train.Feature(int64_list=tf.train.Int64List(value=feature.input_mask)), \
                   "segment_ids": tf.train.Feature(int64_list=tf.train.Int64List(value=feature.segment_ids))},orig_features)

        eval_examples = map(lambda feature :
                            tf.train.Example(features=tf.train.Features(feature=feature)).SerializeToString(),
                            eval_features)
        all_results = []

        for example in eval_examples:
            predictions = predict_fn({'examples': [example]})
            # print(predictions)


            # for key in predictions.iterkeys():
            result = predictions
            unique_id = int(result["unique_ids"][0])
            start_logits = [float(x) for x in result["start_logits"].flat]
            end_logits = [float(x) for x in result["end_logits"].flat]
            all_results.append(
                    RawResult(
                        unique_id=unique_id,
                        start_logits=start_logits,
                        end_logits=end_logits))
        stop_time = time.time()
        output_prediction_file = os.path.join(FLAGS.output_dir, "predictions.json")
        output_nbest_file = os.path.join(FLAGS.output_dir, "nbest_predictions.json")
        output_null_log_odds_file = os.path.join(FLAGS.output_dir, "null_odds.json")

        if topK == -1:
            topK = 1000
        all_nbest_json = write_predictions(orig_examples, orig_features, all_results,
                          FLAGS.n_best_size, FLAGS.max_answer_length,
                          FLAGS.do_lower_case, output_prediction_file,
                          output_nbest_file, output_null_log_odds_file, topK,False)



        all_nbest_json['text'] = paragraph
        all_nbest_json['prediction time'] = str(stop_time-start_time)

        for value in all_nbest_json.values():
            if type(value) == list:
                _values_ = (list(map(lambda order_dict : (order_dict['text']),value)))

                # filter out the empty text values
                _values_ = filter(lambda t : t != 'empty' and t != '' and len(t.split()) >=2 and len(t.split()) <=int(maxTokens),_values_)
                if len(_values_) == 0:
                    continue
                _values_= re.split("\n",join(first_occurence(_values_, paragraph), paragraph))
                value.append({'union':_values_})
                question = value[0]['question']
                if answers.get(question) is None:
                    answers[question] = [_values_]
                else:
                    answers[question].append(_values_)
                all_nbest_json['union'] = answers[question]
    stop_time = time.time()
    answers['prediction time'] = str(stop_time - start_time)
    answers['paragraph'] = orig_paragraph
    if dispPred is True:
        return jsonify(all_nbest_json)
    else:
        return jsonify(answers)


#todo: this function is  currently not being used
def compute_predictions(prediction_file, tokenizer, estimator,topK=3,data = None):
    eval_examples = read_squad_examples(
        input_file=None, is_training=False,data=data)

    eval_writer = FeatureWriter(
        filename=os.path.join(FLAGS.output_dir, "eval.tf_record"),
        is_training=False)
    eval_features = []

    def append_feature(feature):
        eval_features.append(feature)
        eval_writer.process_feature(feature)

    convert_examples_to_features(
        examples=eval_examples,
        tokenizer=tokenizer,
        max_seq_length=FLAGS.max_seq_length,
        doc_stride=FLAGS.doc_stride,
        max_query_length=FLAGS.max_query_length,
        is_training=False,
        output_fn=append_feature)
    eval_writer.close()

    tf.logging.info("***** Running predictions *****")
    tf.logging.info("  Num orig examples = %d", len(eval_examples))
    tf.logging.info("  Num split examples = %d", len(eval_features))
    tf.logging.info("  Batch size = %d", FLAGS.predict_batch_size)

    all_results = []

    predict_input_fn = input_fn_builder(
        input_file=eval_writer.filename,
        seq_length=FLAGS.max_seq_length,
        is_training=False,
        drop_remainder=False)

    # If running eval on the TPU, you will need to specify the number of
    # steps.
    all_results = []

    for result in estimator.predict(
            predict_input_fn, yield_single_examples=True):
        if len(all_results) % 1000 == 0:
            tf.logging.info("Processing example: %d" % (len(all_results)))
        unique_id = int(result["unique_ids"])
        start_logits = [float(x) for x in result["start_logits"].flat]
        end_logits = [float(x) for x in result["end_logits"].flat]
        all_results.append(
            RawResult(
                unique_id=unique_id,
                start_logits=start_logits,
                end_logits=end_logits))

    output_prediction_file = os.path.join(FLAGS.output_dir, "predictions.json")
    output_nbest_file = os.path.join(FLAGS.output_dir, "nbest_predictions.json")
    output_null_log_odds_file = os.path.join(FLAGS.output_dir, "null_odds.json")

    if topK == -1:
        topK = 1000
    all_nbest_json = write_predictions(eval_examples, eval_features, all_results,
                      FLAGS.n_best_size, FLAGS.max_answer_length,
                      FLAGS.do_lower_case, output_prediction_file,
                      output_nbest_file, output_null_log_odds_file, topK,False)


    return jsonify(all_nbest_json)

def main():
    tf.logging.set_verbosity(tf.logging.INFO)

    bert_config =  modeling.BertConfig.from_json_file(FLAGS.bert_config_file)


    tf.gfile.MakeDirs(FLAGS.output_dir)

    FLAGS.do_train = False
    tokenizer = tokenization.FullTokenizer(
        vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)

    tpu_cluster_resolver = None
    if FLAGS.use_tpu and FLAGS.tpu_name:
        tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)

    is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
    run_config = tf.contrib.tpu.RunConfig(
        cluster=tpu_cluster_resolver,
        master=FLAGS.master,
        model_dir=FLAGS.output_dir,
        save_checkpoints_steps=FLAGS.save_checkpoints_steps,
        tpu_config=tf.contrib.tpu.TPUConfig(
            iterations_per_loop=FLAGS.iterations_per_loop,
            num_shards=FLAGS.num_tpu_cores,
            per_host_input_for_training=is_per_host))

    train_examples = None
    num_train_steps = None
    num_warmup_steps = None

    topK = int(FLAGS.topK)

    def serving_input_receiver_fn():
        """An input receiver that expects a serialized tf.Example."""
        serialized_tf_example = tf.placeholder(dtype=tf.string,
                                               shape=[None],      # there are 4 prediction features
                                               name='input_example_tensor')
        receiver_tensors = {'examples': serialized_tf_example}

        feature_spec = {
          "unique_ids": tf.FixedLenFeature([], tf.int64),
          "input_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64),
          "input_mask": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64),
          "segment_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64),
  }

        features = tf.parse_example(serialized_tf_example, feature_spec)
        # features = feature_spec

        # return tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec)
        return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)

    model_fn = model_fn_builder(
        bert_config=bert_config,
        init_checkpoint=FLAGS.init_checkpoint,
        learning_rate=FLAGS.learning_rate,
        num_train_steps=num_train_steps,
        num_warmup_steps=num_warmup_steps,
        use_tpu=False,
        use_one_hot_embeddings=False)

    # If TPU is not available, this will fall back to normal Estimator on CPU
    # or GPU.

    estimator = tf.contrib.tpu.TPUEstimator(
        use_tpu=False,
        model_fn=model_fn,
        config=run_config,
        train_batch_size=FLAGS.train_batch_size,
        predict_batch_size=FLAGS.predict_batch_size,
    warm_start_from=FLAGS.init_checkpoint)

    model_location = export_estimator_savedmodel(estimator,
                                FLAGS.output_dir,
                                serving_input_receiver_fn,
                                assets_extra=None,
                                as_text=False,
                                checkpoint_path=None,
                                strip_default_attrs=False)

    # model_location = "/Users/avinash.v/Projects/indix/qa/bert_model" \
    #                        "/bert_large_with_qdotp_more_qa/bert_large_with_qdotp_more_qa/1560188863"
    global model

    # with tf.Session(graph=tf.Graph()) as sess:
    #     model = tf.saved_model.loader.load(sess, ["serve"],export_dir= model_location)

    global predict_fn
    predict_fn = tf.contrib.predictor.from_saved_model(model_location)

    return tokenizer,estimator,topK

tf.flags.DEFINE_integer(
    "port", 5000,
    "The default port to run the service")

if __name__ == '__main__':
    flags = tf.flags
    FLAGS = flags.FLAGS


    flags.mark_flag_as_required("vocab_file")
    flags.mark_flag_as_required("bert_config_file")
    flags.mark_flag_as_required("output_dir")
    flags.mark_flag_as_required("port")
    port = FLAGS.port
    tokenizer, estimator,topK = main()

    app.run(host='0.0.0.0',port=port)