import torch
from handpose.inference_api import inference, load_model
import numpy
import time
from contextlib import ContextDecorator
import matplotlib.pyplot as plot
import matplotlib
import torchvision.transforms.functional as F


class TimePrinter(ContextDecorator):
    def __init__(self, text):
        # self.time = 0.
        self.text = text
        self.s = None

    def __enter__(self):
        self.s = time.time()

    def __exit__(self, *exc):
        if exc[0] is not None:
            print(exc[1])
        print(f"{self.text}: {round(time.time() - self.s, 2)} seconds")
        return True


class Finger(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.detect_model = torch.hub.load('./yolov5', 'custom',
                                           path='./resources/yolov5m.torchscript',
                                           source='local', force_reload=True)
        self.detect_model = Detection(self.detect_model)

        self.hand_pose_model = load_model(
            model_path='./resources/ReXNetV1-size-256-loss-wing_loss102-0.115-20211108.pth',
            num_classes=42,
            img_size=(256, 256),
            model='ReXNetV1'
        )
        # self.hand_pose_model = torch.jit.load('./ReXNetV1.pt')

    def forward(self, x):
        img = x.float()
        r = self.detect_model(img).detach()
        position = torch.round(r[:, 0:4]).int()
        # confidence = r[4]
        # label = r[5]
        images = []
        shapes = []
        for i, p in enumerate(position):
            cropped_image = F.crop(img[i], p[1], p[0], p[3] - p[1], p[2] - p[0])
            h, w = cropped_image.shape[1], cropped_image.shape[2]
            resized_image = F.resize(cropped_image, [256, 256])
            shapes.append(torch.tensor([h, w], device=x.device))
            images.append(resized_image)
        shapes = torch.stack(shapes).repeat(1, 21).view(-1, 21, 2)
        points = inference(model_=self.hand_pose_model, img=torch.stack(images))
        points *= shapes
        points[:, :, 0] += position[:, None, 0]
        points[:, :, 1] += position[:, None, 1]
        return points


class Detection(torch.nn.Module):
    def __init__(self, model):
        super().__init__()
        self.model = model

    def forward(self, img):  # x shape:[b,c,h,w], return Python Array
        return self.model(img)


def show(points, image, save=False, filename=None):
    matplotlib.use('module://backend_interagg')

    plot.imshow(image)
    plot.plot(points[:, 0], points[:, 1], 'ob')
    if save:
        plot.savefig(filename)
        plot.close()
    else:
        plot.show()


import PIL.Image as Image


class Pose(torch.nn.Module):

    def __init__(self, model):
        super().__init__()
        self.model = model

    def forward(self, x):
        return inference(self.model, x.float())


import requests
import io


def get_points(host, filename, version=1.0):
    f = open(filename, 'rb')
    b = f.read()
    f.close()
    res0 = requests.post(f'http://{host}/predictions/detect/{version}', data={'data': b})
    box = torch.tensor(res0.json()['box']).int()
    stream = io.BytesIO(b)
    arr = numpy.array(Image.open(stream))

    h, w = arr.shape[0:2]
    x1, y1, x2, y2 = max(0, min(box[0], w)), max(0, min(box[1], h)), max(0, min(box[2], w)), max(0, min(box[3], h))
    cropped_arr = arr[y1:y2, x1:x2, :]
    image = Image.fromarray(cropped_arr)
    output_stream = io.BytesIO()
    image.save(output_stream, 'jpeg')
    res1 = requests.post(f'http://{host}/predictions/pose/{version}', data={'data': output_stream.getvalue()})
    points = torch.tensor(res1.json())
    points[:, 0] += x1
    points[:, 1] += y1
    points = torch.round(points).int()
    return points


def pose_export():
    hand_pose_model = load_model(
        model_path='./resources/checkpoints/ReXNetV1-size-256-loss-wing_loss102-0.115-20211108.pth',
        num_classes=42,
        img_size=(256, 256),
        model='ReXNetV1'
    )
    model = Pose(hand_pose_model)
    x = torch.randn([1, 3, 256, 256])
    ts = torch.jit.trace(model, x)
    ts.save('./pose.cpu.pt')


def detect_export():
    detect_model = torch.hub.load('./yolov5', 'custom',
                                  path='./resources/checkpoints/yolov5m.cpu.pt',
                                  source='local')
    model = Detection(detect_model)
    x = torch.randn(1, 3, 640, 640)
    ts = torch.jit.trace(model, x)
    ts.save('./resources/checkpoints/detect.cpu.pt')


if __name__ == '__main__':
    # results = get_points('10.11.153.13:60080', './resources/images/test1/WIN_20220513_14_07_28_Pro.jpg', version=1.0)
    # print(results)

    model = torch.jit.load('./resources/checkpoints/best.torchscript',map_location=torch.device('cpu'))
    x = torch.randn(2, 3, 640, 640)
    print(model(x)[0].shape)