import grpc
import calligraphy_pb2_grpc, calligraphy_pb2

import os
from PIL import Image, ImageDraw, ImageFont
import torchvision.transforms as transforms
import glob
import matplotlib.pyplot as plt
import numpy as np
import time
import torch
import base64
from torchvision.models.detection import fasterrcnn_resnet50_fpn
import itertools
import cv2

preprocessing_train = transforms.Compose([
    transforms.RandomResizedCrop(224),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406],
                         std=[0.229, 0.224, 0.225]),
])

preprocessing_val = transforms.Compose([
    transforms.Resize(224),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406],
                         std=[0.229, 0.224, 0.225]),
])


def run(image, height, width):
    # setting max receive and max send mesaage length
    options = [('grpc.max_receive_message_length', 100 * 2048 * 2048),
               ('grpc.max_message_length', 100 * 2048 * 2048)]
    with grpc.insecure_channel('localhost:8080', options=options) as channel:
        # with grpc.insecure_channel('207.246.117.29:8080') as channel:
        stub = calligraphy_pb2_grpc.NetStub(channel=channel)
        request = calligraphy_pb2.ImageMatrix(image=image, height=height, width=width)

        start = time.time()
        response = stub.Segmentation(request)
        end = time.time()

        mask = np.frombuffer(base64.b64decode(response.mask), dtype=np.int64)

        run_time = response.run_time
        total_time = end - start
        image_transfer_time = total_time - run_time

        print("total time = ", end - start,
              "  forward time = ", run_time,
              "  image_transfer_time = ", image_transfer_time)
        return mask


if __name__ == '__main__':

    for path in glob.glob("/media/retoo/RetooDisk/wanghui/Data/COCO/test2014/*.jpg"):
        src = Image.open(path).convert('RGB')
        name = os.path.basename(path).strip()
        subdir = os.path.split(os.path.split(path)[0])[1]
        width, height = src.size
        image = np.asarray(src)

        ## encode image
        # str_encode = base64.b64encode(image)

        _, ext = os.path.splitext(name)
        img_data = cv2.imread(path)[:, :, ::-1]  # read image and convert to RGB channel
        _, img_encode = cv2.imencode(ext, img_data)
        data_encode = np.array(img_encode)
        str_encode = data_encode.tobytes()

        ## decode image
        # nparr = np.frombuffer(str_encode, np.uint8)
        # decode_img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        mask = run(str_encode, height, width)

        mask = np.reshape(mask, (height, width))

        fig, ax = plt.subplots(1, 2)
        ax[0].imshow(img_data)
        ax[1].imshow(mask)
        plt.show()

# docker build --pull -t resnet-libtorch-serving -f libtorch_cpu_Dockerfile .
# docker run -p 45.77.51.15:8080:8080 --name=pytorch_service -d -it dl-pytorch-serving:latest python3 ./server.py
# docker run -p 207.246.117.29:8080:8080 --name=pytorch_service -it dl-pytorch-serving:latest python3 ./server.py
