# import the necessary packages

import redis
import time
import json
import os
import sys

sys.path.insert(0, os.path.dirname(os.getcwd()))

from Common.helpers import decode_predictions
from Common import settings
from Common import helpers
from Common import AlexnetModel

# connect to Redis server
db = redis.StrictRedis(host=settings.REDIS_HOST,
                       port=settings.REDIS_PORT, db=settings.REDIS_DB, password=settings.PASSWORD)
dbCloud = redis.StrictRedis(host=settings.Cloud_REDIS_HOST,
                            port=settings.Cloud_REDIS_PORT, db=settings.Cloud_REDIS_DB,
                            password=settings.Cloud_PASSWORD)


def classify_process_edge():
    # load the pre-trained Keras model (here we are using a model
    # pre-trained on ImageNet and provided by Keras, but you can
    # substitute in your own networks just as easily)
    print("* Loading model...")
    model = AlexnetModel.AlexnetModel()
    print("* Edge Alex Model loaded")
    # continually pool for new images to classify
    while True:
        # attempt to grab a batch of images from the database, then
        # initialize the image IDs and batch of images themselves
        queue = db.lrange(settings.IMAGE_QUEUE[1], 0, 1)
        if queue is None:
            continue
        imageIDs = []
        # loop over the queue
        for q in queue:
            # deserialize the object and obtain the input image
            q = json.loads(q.decode("utf-8"))
            image = helpers.base64_decode_image(q["image"], settings.AlexNet_IMAGE_DTYPE,
                                                (1, settings.AlexNet_IMAGE_HEIGHT,
                                                 settings.AlexNet_IMAGE_WIDTH, settings.AlexNet_IMAGE_CHANS))
            # update the list of image IDs
            imageIDs.append(q["id"])
            # # classify the batch
            # print("* Batch size: {}".format(1))
            temp = model.splitpredict(image, 0, int(q["index"]))
            if int(q["index"]) != settings.AlexNet_Layers:
                image = helpers.base64_encode_image(temp)
                d = {"id": q["id"], "index": q["index"], "dtype": str(temp.dtype),
                     "shape": temp.shape, "temp": image, "tt": int(time.time() * 1000), "ect": int(time.time()*1000) - q["ect"]}
                dbCloud.rpush(settings.AlexNet_CLOUD_QUEUE, json.dumps(d))
            else:
                # loop over the image IDs and their corresponding set of
                # results from our model
                results = decode_predictions(temp)
                for (imageID, resultSet) in zip(imageIDs, results):
                    # initialize the list of output predictions
                    output = {"result": []}
                    # loop over the results and add them to the list of
                    # output predictions
                    for (imagenetID, label, prob) in resultSet:
                        r = {"label": label, "probability": float(prob)}
                        output["result"].append(r)
                    # store the output predictions in the database, using
                    # the image ID as the key so we can fetch the results]
                    output["tt"] = 0
                    output["ct"] = output["et"] - q["ect"]
                    output["et"] = int(time.time() * 1000)
                    db.set(imageID, json.dumps(output))
            db.ltrim(settings.IMAGE_QUEUE[1], len(imageIDs), -1)
        # sleep for a small amount
        # time.sleep(settings.SERVER_SLEEP)


# if this is the main thread of execution start the model server
# process
if __name__ == "__main__":
    classify_process_edge()
