import time
import grpc
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = ""
from concurrent import futures
import torchvision.models as models
import torchvision.transforms as transforms
from torchvision.models.detection import fasterrcnn_resnet50_fpn
from torchvision.models.segmentation import deeplabv3_resnet101
from flask import Flask, jsonify, request
import numpy as np
import base64
from PIL import Image
import torch
import io
import json
import cv2

val_preprocess = transforms.Compose([
    transforms.Resize(224),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406],
                         std=[0.229, 0.224, 0.225]),
])

seg_preprocess = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406],
                         std=[0.229, 0.224, 0.225]),
])

app = Flask(__name__)
imagenet_class_index = json.load(open('imagenet_class_index.json'))


class NetServicer():
    def __init__(self):
        self.classify_model = models.resnet34(pretrained=True, progress=True)
        self.detection_model = fasterrcnn_resnet50_fpn(pretrained=True)
        self.segmentation_model = deeplabv3_resnet101(pretrained=True)
        # create dummy input
        input_tensor = torch.rand(size=(1, 3, 224, 224))

        if torch.cuda.is_available():
            self.classify_model.cuda()
            self.detection_model.cuda()
            self.segmentation_model.cuda()

            # create dummy input
            input_tensor = input_tensor.cuda()

        self.classify_model.eval()
        self.detection_model.eval()
        self.segmentation_model.eval()


        out1 = self.classify_model(input_tensor)
        out2 = self.detection_model(input_tensor)
        out3 = self.segmentation_model(input_tensor)

        del out1, out2, out3, input_tensor

    def decode_image(self, request):
        # image = Image.open(io.BytesIO(base64.b64decode(image_bytes))).convert("RGB")

        #parse image
        try:
            image_bytes = request.json.get['file'].read()
        except:
            image_bytes = request.json.get["file"]
        image = Image.open(io.BytesIO(base64.b64decode(image_bytes))).convert("RGB")
        width, height = image.size

        return image, height,width

    def ClassifyImage(self, request):
        img, height, width = self.decode_image(request)

        # img = Image.frombuffer("RGB", size=(width,height,3),data=decode_img)
        input_tensor = val_preprocess(img).unsqueeze(dim=0)
        with torch.no_grad():
            print("start classification ...")
            start = time.time()
            if torch.cuda.is_available():
                input_tensor = input_tensor.cuda()
                output = self.classify_model(input_tensor)
                output = output.cpu()
            else:
                input_tensor = input_tensor
                output = self.classify_model(input_tensor)

            label_index = int(torch.argmax(output).numpy())
            labels = imagenet_class_index[str(label_index)]
            end = time.time()
            run_time = end - start

        # src_image = np.array2string(np.asarray(img))
        src_image = base64.b64encode(np.asarray(img).tobytes())

        return labels, src_image

    def Detection(self, request):
        start = time.time()
        img, height, width = self.decode_image(request)
        input_tensor = transforms.ToTensor()(img).unsqueeze(dim=0)

        with torch.no_grad():

            if torch.cuda.is_available():
                print("start gpu detection....")
                input_tensor = input_tensor.cuda()

                output = self.detection_model(input_tensor)
                # only accept batch_size = 1 in test phase
                output = output[0]
                boxes = output["boxes"].cpu().numpy()
                labels = output["labels"].cpu().numpy()
                scores = output["scores"].cpu().numpy()
            else:
                print("start cpu detection....")
                input_tensor = input_tensor
                output = self.detection_model(input_tensor)
                # only accept batch_size = 1 in test phase
                output = output[0]
                boxes = output["boxes"].numpy()
                labels = output["labels"].numpy()
                scores = output["scores"].numpy()

            end = time.time()
            run_time = end - start

            boxes_str = base64.b64encode(boxes)
            labels_str = base64.b64encode(labels)
            scores_str = base64.b64encode(scores)

        # return calligraphy_pb2.DetectionResult(run_time=run_time)
        return boxes_str, scores_str, labels_str

    def Segmentation(self, request):
        img, height, width = self.decode_image(request)
        input_tensor = seg_preprocess(img).unsqueeze(dim=0)

        with torch.no_grad():
            print("start segmentation...")
            start = time.time()
            if torch.cuda.is_available():
                input_tensor = input_tensor.cuda()
                output = self.segmentation_model(input_tensor)
                out = output["out"].squeeze(dim=0).argmax(dim=0).cpu().numpy()
                aux = output["aux"].squeeze(dim=0).argmax(dim=0).cpu().numpy()
            else:
                input_tensor = input_tensor
                output = self.segmentation_model(input_tensor)
                out = output["out"].squeeze(dim=0).argmax(dim=0).numpy()
                aux = output["aux"].squeeze(dim=0).argmax(dim=0).numpy()

            end = time.time()
            run_time = end - start

            out_str = np.array2string(out)
            # aux_str = base64.b64encode(aux)

        return out_str

net = NetServicer()

@app.route('/predict', methods=['POST'])
def predict():
    if request.method == 'POST':
        (class_id, class_name), src_image = net.ClassifyImage(request)
        #.decode("utf-8") to solve TypeError: Object of type 'bytes' is not JSON serializable
        return jsonify({'class_id': class_id, 'class_name': class_name, "src_image":src_image.decode("utf-8")})
        # mask_str = net.Segmentation(img_bytes)
        # return jsonify({"mask": mask_str})


if __name__ == '__main__':
    app.run(host="0.0.0.0", port=2501)
