import torchvision
import torch

INPUT_SIZE = 224


class ModelWrapper(torch.nn.Module):
    def __init__(self, name: str, num_cls: int) -> None:
        super().__init__()
        net, self.fc, self.feature_num = ModelWrapper.get_net(name, num_cls)
        self.backbone = torch.nn.Sequential(*(list(net.children())[:-1]))

    def forward(self, x):
        x = self.backbone(x)
        x = torch.flatten(x, 1)
        return self.fc(x)

    # optional models and adjust their fc layer for task's class num
    def get_net(name: str, num_cls: int) -> torch.nn.Module:
        if name == 'resnet34':
            net = torchvision.models.resnet34()
            feature_num = net.fc.in_features
            fc = torch.nn.Linear(in_features=feature_num, out_features=num_cls)
            return net, fc, feature_num
        elif name == 'resnet50':
            net = torchvision.models.resnet50()
            feature_num = net.fc.in_features
            fc = torch.nn.Linear(in_features=feature_num, out_features=num_cls)
            return net, fc, feature_num
        elif name == 'resnet101':
            net = torchvision.models.resnet101()
            feature_num = net.fc.in_features
            fc = torch.nn.Linear(in_features=feature_num, out_features=num_cls)
            return net, fc, feature_num
        else:
            raise Exception("wrong model name!")


class I_Net:
    def __init__(self, net_name: str, model_pth: str, num_cls: int) -> None:
        # get net
        self.net = ModelWrapper(net_name, num_cls)
        self.net.load_state_dict(torch.load(
            f'{model_pth}', map_location=torch.device('cpu')))
        self.backbone = self.net.backbone

        # get input format
        self.transfrom = torchvision.transforms.Compose([
            torchvision.transforms.Resize((INPUT_SIZE, INPUT_SIZE)),
            torchvision.transforms.ToTensor()
        ])

    def predict(self, img):
        img_tensor = self.transfrom(img)
        img_tensor = img_tensor.view(1, 3, INPUT_SIZE, INPUT_SIZE)
        with torch.no_grad():
            outs = self.net(img_tensor)
            res = torch.max(outs, dim=1)
            index = res[1][0].item()
            prob = torch.softmax(outs[0], 0)[index].item()
            return index, prob

    def feature(self, img_tensor):
        x = self.backbone(img_tensor)
        return torch.flatten(x, 1)
