# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#     http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.

"""
ModelService defines an API for service.
"""
import json
import os
import logging
import tensorflow as tf
from bert4keras.backend import keras, set_gelu, K
from bert4keras.tokenizers import Tokenizer
from bert4keras.models import build_transformer_model
from bert4keras.snippets import sequence_padding
from keras.layers import Dropout, Dense
import keras.backend.tensorflow_backend as KTF

from model_handler import ModelHandler

set_gelu('tanh')  # 切换gelu版本

class ModelService(ModelHandler):
    """
    ModelService defines the fundamental loading model and inference
    operations when serving model. This is a base class and needs to be
    inherited.
    """

    def __init__(self):
        super(ModelService, self).__init__()
        self.model = None
        self.signature = None
        self.maxlen = 32        
        self.model_type = 'albert'
        self.best_model = 'model/albert_small.weights'
        self.config_path = 'model/albert_config_small_google.json'
        self.checkpoint_path = 'model/albert_model.ckpt'
        self.dict_path = 'model/vocab.txt'

    def initialize(self, context):
        """
        Initialize model. This will be called during model loading time

        :param context: Initial context contains model server system properties.
        :return:
        """
        super(ModelService, self).initialize(context)

        assert self._batch_size == 1, "Must have only 1 list data."

        properties = context.system_properties
        model_dir = properties.get("model_dir")
        gpu_id = properties.get("gpu_id")
        gpu_frac = properties.get("gpu_frac")

        logging.info("gpu_id %s" % str(gpu_id))
        logging.info("gpu_frac %s" % str(gpu_frac))

        self.best_model = os.path.join(model_dir, self.best_model)
        self.config_path = os.path.join(model_dir, self.config_path)
        self.checkpoint_path = os.path.join(model_dir, self.checkpoint_path)
        self.dict_path = os.path.join(model_dir, self.dict_path)

        signature_file_path = os.path.join(model_dir, "signature.json")
        if not os.path.isfile(signature_file_path):
            raise RuntimeError("Missing signature.json file.")

        with open(signature_file_path) as f:
            self.signature = json.load(f)

        data_names = []
        for input_data in self.signature["inputs"]:
            data_name = input_data["data_name"]
            data_names.append(data_name)

        if gpu_id is not None:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
            config = tf.ConfigProto()
            config.gpu_options.allow_growth=True
            config.gpu_options.per_process_gpu_memory_fraction = 0.25
            sess = tf.Session(config=config)
            KTF.set_session(sess)
            # 建立分词器
            self.tokenizer = Tokenizer(self.dict_path, do_lower_case=True)
            # 加载预训练模型
            bert = build_transformer_model(
                model=self.model_type,
                config_path=self.config_path,
                checkpoint_path=self.checkpoint_path,
                with_pool=True,
                return_keras_model=False,
            )

            output = Dropout(rate=0.1)(bert.model.output)
            output = Dense(
                units=2, activation='softmax', kernel_initializer=bert.initializer
            )(output)

            self.model = keras.models.Model(bert.model.input, output)

            # 加载权重
            self.model.load_weights(self.best_model)

    def preprocess(self, batch):
        """
        Transform raw input into model input data.

        :param batch: list of raw requests, should match batch size
        :return: list of preprocessed model input data
        """
        assert self._batch_size == len(batch), "Invalid input batch size: {}".format(len(batch))

        ret = []
        param_name = self.signature['inputs'][0]['data_name']

        for idx, request in enumerate(batch):
            data = request.get(param_name)
            if data is None:
                data = request.get("body")

            if data is None:
                data = request.get("data")

            ret.append(map(np.array, data))

        return ret

    def inference(self, model_input):
        """
        Internal inference methods for MXNet. Run forward computation and
        return output.

        :param model_input: list of NDArray
            Preprocessed inputs in NDArray format.
        :return: list of NDArray
            Inference output.
        """
        if self.error is not None:
            return None

        # Check input shape
        # check_input_shape(model_input, self.signature)
        return model_input

    def postprocess(self, inference_output):
        if self.error is not None:
            return [self.error] * self._batch_size

        return [str(d.tolist()) for d in inference_output]