import time
import grpc
from concurrent import futures
import torchvision.models as models
import torchvision.transforms as transforms
from torchvision.models.detection import fasterrcnn_resnet50_fpn
from torchvision.models.segmentation import deeplabv3_resnet101
import calligraphy_pb2, calligraphy_pb2_grpc  # 刚刚生产的两个文件
import numpy as np
import base64
from PIL import Image
import torch
# import cv2
import io

val_preprocess = transforms.Compose([
    transforms.Resize(224),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406],
                         std=[0.229, 0.224, 0.225]),
])

seg_preprocess = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406],
                         std=[0.229, 0.224, 0.225]),
])



class NetServicer(calligraphy_pb2_grpc.NetServicer):
    def __init__(self):
        self.classify_model = models.resnet34(pretrained=True, progress=True)
        self.detection_model = fasterrcnn_resnet50_fpn(pretrained=True)
        self.segmentation_model = deeplabv3_resnet101(pretrained=True)
        # create dummy input
        input_tensor = torch.rand(size=(1, 3, 224, 224))

        if torch.cuda.is_available():
            self.classify_model.cuda()
            self.detection_model.cuda()
            self.segmentation_model.cuda()

            # create dummy input
            input_tensor = input_tensor.cuda()

        self.classify_model.eval()
        self.detection_model.eval()
        self.segmentation_model.eval()

        out1 = self.classify_model(input_tensor)
        out2 = self.detection_model(input_tensor)
        out3 = self.segmentation_model(input_tensor)

        del out1, out2, out3, input_tensor


    def decode_image(self, request):

        # height = request.height
        # width = request.width
        ## opencv parser
        # nparr = np.frombuffer(request.image, np.uint8)
        # decode_img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        # img = Image.fromarray(decode_img, mode="RGB")


        ## for binary decode
        try:
            img = Image.open(io.BytesIO(base64.b64decode(request.image))).convert("RGB")
        except:
            img = Image.open(io.BytesIO(base64.b64decode(request.image.read()))).convert("RGB")

        width, height = img.size
        return img, height, width

    def ClassifyImage(self, request, context):
        img, height, width = self.decode_image(request)

        # img = Image.frombuffer("RGB", size=(width,height,3),data=decode_img)
        input_tensor = val_preprocess(img).unsqueeze(dim=0)
        with torch.no_grad():
            print("start classification ...")
            start = time.time()
            if torch.cuda.is_available():
                input_tensor = input_tensor.cuda()
                output = self.classify_model(input_tensor).cpu()
                label_index = int(torch.argmax(output).numpy())
            else:
                input_tensor = input_tensor
                output = self.classify_model(input_tensor)
                label_index = int(torch.argmax(output).numpy())

            end = time.time()
            run_time = end - start

        return calligraphy_pb2.ClassifyResult(category=label_index,run_time=run_time)

    def Detection(self, request, context):
        start = time.time()
        img, height, width = self.decode_image(request)
        input_tensor = transforms.ToTensor()(img).unsqueeze(dim=0)

        with torch.no_grad():

            if torch.cuda.is_available():
                print("start gpu detection....")
                input_tensor = input_tensor.cuda()

                output = self.detection_model(input_tensor)
                # only accept batch_size = 1 in test phase
                output = output[0]
                boxes = output["boxes"].cpu().numpy()
                labels = output["labels"].cpu().numpy()
                scores = output["scores"].cpu().numpy()
            else:
                print("start cpu detection....")
                input_tensor = input_tensor
                output = self.detection_model(input_tensor)
                # only accept batch_size = 1 in test phase
                output = output[0]
                boxes = output["boxes"].numpy()
                labels = output["labels"].numpy()
                scores = output["scores"].numpy()

            end = time.time()
            run_time = end - start

            boxes_str = base64.b64encode(boxes)
            labels_str = base64.b64encode(labels)
            scores_str = base64.b64encode(scores)

        # return calligraphy_pb2.DetectionResult(run_time=run_time)
        return calligraphy_pb2.DetectionResult(bboxes=boxes_str, scores=scores_str, labels=labels_str,run_time=run_time)

    def Segmentation(self, request, context):
        img, height, width = self.decode_image(request)
        input_tensor = seg_preprocess(img).unsqueeze(dim=0)

        with torch.no_grad():
            print("start segmentation...")
            start = time.time()
            if torch.cuda.is_available():
                input_tensor = input_tensor.cuda()
                output = self.segmentation_model(input_tensor)
                out = output["out"].squeeze(dim=0).argmax(dim=0).cpu().numpy()
                aux = output["aux"].squeeze(dim=0).argmax(dim=0).cpu().numpy()
            else:
                input_tensor = input_tensor
                output = self.segmentation_model(input_tensor)
                out = output["out"].squeeze(dim=0).argmax(dim=0).numpy()
                aux = output["aux"].squeeze(dim=0).argmax(dim=0).numpy()

            end = time.time()
            run_time = end - start

            out_str = base64.b64encode(out)
            # aux_str = base64.b64encode(aux)

        return calligraphy_pb2.SegmentationResult(mask=out_str,run_time=run_time)


def serve():
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    calligraphy_pb2_grpc.add_NetServicer_to_server(NetServicer(), server)
    print('Starting server. Listening on port {}'.format(2500))
    server.add_insecure_port("[::]:2500")
    # 开始接收请求进行服务
    server.start()
    try:
        while True:
            time.sleep(86400)
    except KeyboardInterrupt:
        print("\nStopping server!")
        server.stop(0)



if __name__ == '__main__':
    serve()
