


import os
import cv2
import torch
from skimage import io, transform
from torchvision import transforms

class FashionKpDetManager:
    def __init__(self, model_path):
        model = torch.load(model_path)
        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        # model.to(device)
        self.model = torch.nn.DataParallel(model).to(self.device)
        self.model.eval()

    def preprocess_image(self, image):
        if isinstance(image, str):
            image = cv2.imread(image)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image_orig_h, image_orig_w = image.shape[0], image.shape[1]
        H, W = 256, 256
        image = transform.resize(image, (H, W))
        image = transforms.ToTensor()(image).float()
        image = image.unsqueeze(0).to(self.device)
        # 计算图片长宽缩放比例
        w_scale = image_orig_w / W
        h_scale = image_orig_h / H
        
        return image, w_scale, h_scale

    # 推理
    def inference(self, image):
        coords, _ = self.model(image)
        return coords
    
    # 推理全封装
    def inference_wrapper(self, image):
        image, w_scale, h_scale = self.preprocess_image(image)
        coords = self.inference(image)[0]
        coords = self.postprocess_output(coords, w_scale, h_scale)
        return coords

    def postprocess_output(self, coords, w_scale, h_scale):
        coords = coords.cpu()
        # coords = coords * 32.0 + 31.5
        coords = coords * 128.0 + 127.5
        # 长宽缩放
        coords[:, 0] = coords[:, 0] * w_scale
        coords[:, 1] = coords[:, 1] * h_scale
        return coords

    # 可视化结果
    def visualize_result(self, image, coords, show_num = False, color = (0, 0, 255)):
        # 如果输入的str
        if isinstance(image, str):
            image = cv2.imread(image)
        for i in range(coords.shape[0]):
            cv2.circle(image, (int(coords[i, 0]), int(coords[i, 1])), 3, color, -1)
            if show_num:
                cv2.putText(image, str(i), (int(coords[i, 0]), int(coords[i, 1])), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1, cv2.LINE_AA)
        return image

model_path = "/mnt/e/code/FashionPosePrediction/keyPointsDetectionMethodWithTorch/trouser_kp/blouse_kp_unet/blouse_kp_70_0.028609881177544594.pth"
model_path = "/mnt/e/code/FashionPosePrediction/keyPointsDetectionMethodWithTorch/trouser_kp/blouse_kp_unet_10_15/blouse_kp_0_0.015139601193368435.pth"
model_path = "/mnt/e/code/FashionPosePrediction/keyPointsDetectionMethodWithTorch/blouse_kp/blouse_kp_unet_256_10_16/blouse_kp_0_0.4866272807121277.pth"
model_path = "/mnt/e/code/FashionPosePrediction/keyPointsDetectionMethodWithTorch/blouse_kp/blouse_kp_unet_256_10_16/blouse_kp_60_0.07835938781499863.pth"
model_path = "/mnt/e/code/FashionPosePrediction/keyPointsDetectionMethodWithTorch/blouse_kp/blouse_kp_unet_256_10_16/blouse_kp_70_0.06313033401966095.pth"
model_path = "/mnt/e/code/FashionPosePrediction/keyPointsDetectionMethodWithTorch/blouse_kp/blouse_kp_unet_256_10_16/blouse_kp_80_0.057434987276792526.pth"
model_path = "/mnt/e/code/FashionPosePrediction/keyPointsDetectionMethodWithTorch/blouse_kp/blouse_kp_unet_256_10_16/blouse_kp_90_0.04773353040218353.pth"
FashionKpDetObj = FashionKpDetManager(model_path)

def main():
    # load image/preprocess image
    image_path = "/mnt/d/数据集/FashionAI-keypoints/test/Images/blouse/00badeb261193b83f6da43d39700a338.jpg"
    image_path = "/mnt/e/code/FashionPosePrediction/image/36.jpg"

    # run model
    coords = FashionKpDetObj.inference_wrapper(image_path)

    # save output
    image_coords = FashionKpDetObj.visualize_result(image_path, coords, show_num=True)
    cv2.imwrite("output.jpg", image_coords)
    cv2.imshow("output", image_coords)
    cv2.waitKey(0)

if __name__ == "__main__":
    # model_path = "trouser_kp/blouse_kp_unet/blouse_kp_70_0.028609881177544594.pth"
    # main(model_path)
    main()
    print("The end!")