import torch
from .models.resnet import resnet18, resnet34, resnet50, resnet101
from .models.squeezenet import squeezenet1_1, squeezenet1_0
from .models.shufflenetv2 import ShuffleNetV2
from .models.shufflenet import ShuffleNet
from .models.mobilenetv2 import MobileNetV2
from torchvision.models import shufflenet_v2_x1_5, shufflenet_v2_x1_0, shufflenet_v2_x2_0
from .models.rexnetv1 import ReXNetV1

from .utils.common_utils import *


def load_model(model, num_classes, img_size, model_path):
    if model == 'resnet_50':
        model_ = resnet50(num_classes=num_classes, img_size=img_size[0])
    elif model == 'resnet_18':
        model_ = resnet18(num_classes=num_classes, img_size=img_size[0])
    elif model == 'resnet_34':
        model_ = resnet34(num_classes=num_classes, img_size=img_size[0])
    elif model == 'resnet_101':
        model_ = resnet101(num_classes=num_classes, img_size=img_size[0])
    elif model == "squeezenet1_0":
        model_ = squeezenet1_0(num_classes=num_classes)
    elif model == "squeezenet1_1":
        model_ = squeezenet1_1(num_classes=num_classes)
    elif model == "shufflenetv2":
        model_ = ShuffleNetV2(ratio=1., num_classes=num_classes)
    elif model == "shufflenet_v2_x1_5":
        model_ = shufflenet_v2_x1_5(pretrained=False, num_classes=num_classes)
    elif model == "shufflenet_v2_x1_0":
        model_ = shufflenet_v2_x1_0(pretrained=False, num_classes=num_classes)
    elif model == "shufflenet_v2_x2_0":
        model_ = shufflenet_v2_x2_0(pretrained=False, num_classes=num_classes)
    elif model == "shufflenet":
        model_ = ShuffleNet(num_blocks=[2, 4, 2], num_classes=num_classes, groups=3)
    elif model == "mobilenetv2":
        model_ = MobileNetV2(num_classes=num_classes)
    elif model == "ReXNetV1":
        model_ = ReXNetV1(num_classes=num_classes)

    use_cuda = torch.cuda.is_available()

    device = torch.device("cuda:0" if use_cuda else "cpu")
    model_ = model_.to(device)
    model_.eval()  # 设置为前向推断模式
    if os.access(model_path, os.F_OK):  # checkpoint
        chkpt = torch.load(model_path, map_location=device)
        model_.load_state_dict(chkpt)
        return model_
    return None


import torchvision.transforms.functional as F


def inference(model_, img):
    n = img.shape[0]
    with torch.no_grad():
        img_ = (img - 128.) / 256.
        pre_ = model_(img_)  # 模型推理
        output = pre_.detach()
        return output.view(n, 21, 2)
