# import the necessary packages

import redis
import time
import json
import os
import sys

sys.path.insert(0, os.path.dirname(os.getcwd()))

from Common.helpers import decode_predictions
from Common import settings
from Common import helpers
from Common import AlexnetModel

# connect to Redis server
db = redis.StrictRedis(host=settings.REDIS_HOST,
                       port=settings.REDIS_PORT, db=settings.REDIS_DB, password=settings.PASSWORD)
dbCloud = redis.StrictRedis(host=settings.Cloud_REDIS_HOST,
                       port=settings.Cloud_REDIS_PORT, db=settings.Cloud_REDIS_DB, password=settings.Cloud_PASSWORD)


def classify_process_cloud():
    # load the pre-trained Keras model (here we are using a model
    # pre-trained on ImageNet and provided by Keras, but you can
    # substitute in your own networks just as easily)
    print("* Loading model...")
    model = AlexnetModel.AlexnetModel()
    print("* Cloud Alex Model loaded")
    # continually pool for new images to classify
    while True:
        # attempt to grab a batch of images from the database, then
        # initialize the image IDs and batch of images themselves
        queue = dbCloud.lrange(settings.AlexNet_CLOUD_QUEUE, 0, 1)
        if queue is None:
            continue
        transendtime = int(time.time()*1000)
        imageIDs = []

        # loop over the queue
        for q in queue:
            # deserialize the object and obtain the input image
            q = json.loads(q.decode("utf-8"))
            if int(q["index"]) == 0:
                image = helpers.base64_decode_image(q["temp"], q["dtype"],
                                                    (1, int(q["height"]), int(q["width"]), int(q["chan"])))
            else:
                image = helpers.base64_decode_image(q["temp"], q["dtype"], q["shape"])
            # update the list of image IDs
            imageIDs.append(q["id"])

            preds = model.splitpredict(image, int(q["index"]), settings.AlexNet_Layers)
            results = decode_predictions(preds)

            # loop over the image IDs and their corresponding set of
            # results from our model
            for (imageID, resultSet) in zip(imageIDs, results):
                # initialize the list of output predictions
                output = {"result": []}
                # loop over the results and add them to the list of
                # output predictions
                for (imagenetID, label, prob) in resultSet:
                    r = {"label": label, "probability": float(prob)}
                    output["result"].append(r)
                # store the output predictions in the database, using
                # the image ID as the key so we can fetch the results]
                output["tt"] = transendtime - int(q["tt"])
                output["ct"] = int(time.time() * 1000) - transendtime + int(q["ect"])
                output["et"] = int(time.time()*1000)
                db.set(imageID, json.dumps(output))
            # remove the set of images from our queue
            dbCloud.ltrim(settings.AlexNet_CLOUD_QUEUE, len(imageIDs), -1)
        # time.sleep(settings.SERVER_SLEEP)


# if this is the main thread of execution start the model server
# process
if __name__ == "__main__":
    classify_process_cloud()
