from ResNet18_PRO import ResNet18_PRO_Model, BasicBlock, SPP
import torchvision
from DTs import *
from SDT import SDT


def frozen_model_paramters(model, frozen_namelist):
    for name, param in model.named_parameters():
        if any(x in name.split('.') for x in frozen_namelist):
            param.requires_grad = False
    return model


####### SVM model ########
class LinearKernelSVM(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(LinearKernelSVM, self).__init__()
        self.input_dim = input_dim
        self.linear = nn.Linear(input_dim, output_dim)

    def forward(self, x):
        # Expect input to be shape (N, self.input_dim)
        x = x.flatten(start_dim=1)
        output = self.linear(x)  # output shape N, 1
        return output


class HingeLoss(nn.Module):
    def __init__(self, ):
        super().__init__()

    def forward(self, scores, truth):
        # Expect both of shape (N)
        N, _ = scores.shape
        correct_class_scores = scores[np.arange(N), truth].reshape(N, 1)
        margin = torch.clamp(1 + scores - correct_class_scores, min=0)
        margin[np.arange(N), truth] = 0
        return torch.sum(margin) / N


class LabelSmoothingSVM(nn.Module):
    """NLL loss with label smoothing.
    """

    def __init__(self, smoothing=0.0, num_class=3):
        """Constructor for the LabelSmoothing module.
        :param smoothing: label smoothing factor
        """
        super(LabelSmoothingSVM, self).__init__()
        self.num_class = num_class
        self.ave_smooth = smoothing / (num_class - 1)
        self.confidence = 1.0 - smoothing - self.ave_smooth
        # 此处的self.smoothing即我们的epsilon平滑参数。

    def forward(self, scores, target):
        N = scores.shape[0]
        label_one_hot = torch.nn.functional.one_hot(target, self.num_class)
        label_smooth = label_one_hot * self.confidence + self.ave_smooth
        correct_class_scores = scores[np.arange(N), target].reshape(-1, 1)
        margin = torch.clamp(1 + scores - correct_class_scores, min=0)
        margin[np.arange(N), target] = 0
        loss = (label_smooth * margin).sum(axis=1)
        return loss.mean()


def SVM(device, num_class):
    model = LinearKernelSVM(416 * 416 * 3, 3)
    model = model.to(device)
    return model


####### SoftDecisionTree ########
def SDTModel(device, num_class=3):
    input_dim = 416 * 416 * 3  # the number of input dimensions
    output_dim = num_class  # the number of outputs (i.e., # classes on MNIST)
    depth = 5  # tree depth
    lamda = 1e-3  # coefficient of the regularization term
    use_cuda = False
    if torch.cuda.is_available():
        use_cuda = True
    # print(use_cuda)
    model = SDT(input_dim, output_dim, depth, lamda, use_cuda)
    model = model.to(device)
    return model


####### SoftDecisionTree for transfer learning ########
def transfer_SDTModel(device, saved_pth, num_class=3):
    input_dim = 416 * 416 * 3  # the number of input dimensions
    output_dim = 2  # the number of outputs (i.e., # classes on MNIST)
    depth = 5  # tree depth
    lamda = 1e-3  # coefficient of the regularization term
    use_cuda = False
    if torch.cuda.is_available():
        use_cuda = True
    model = SDT(input_dim, output_dim, depth, lamda, use_cuda)
    pretrained_dict = torch.load(saved_pth)
    model.load_state_dict(pretrained_dict)
    model.leaf_nodes = nn.Linear(model.leaf_nodes.in_features, num_class, bias=False)
    model = model.to(device)
    return model


####### VGG13 ########

def VGG13(device, num_class=2, test_pth_path=None, Trans_Learning=False, Freeze=False):
    model = torchvision.models.vgg13_bn(pretrained=Trans_Learning)
    in_features = model.classifier[6].in_features
    model.classifier[6] = nn.Linear(in_features, num_class)
    model = model.to(device)
    if test_pth_path is not None:
        print(f"############# loading model : {test_pth_path}  ###########")
        model.load_state_dict(torch.load(test_pth_path))
    elif Trans_Learning:
        print("############# using model in pytorch ######################")
    if Freeze:
        print("############# Freezing the network ########################")
        frozen_namelist = ["features"]
        model = frozen_model_paramters(model, frozen_namelist)
    return model


####### ResNet18 ########
def ResNet18(device, num_class=2, test_pth_path=None, Trans_Learning=False, Freeze=False):
    model = torchvision.models.resnet18(pretrained=Trans_Learning)
    in_features = model.fc.in_features
    model.fc = nn.Linear(in_features, num_class)
    model = model.to(device)
    if test_pth_path is not None:
        print(f"############# loading model : {test_pth_path}  ###########")
        model.load_state_dict(torch.load(test_pth_path))
    elif Trans_Learning:
        print("############# using model in pytorch ######################")
    if Freeze:
        print("############# Freezing the network ########################")
        frozen_namelist = ["layer1", "layer2"]
        for param in model.conv1.parameters():
            param.requires_grad = False
        for param in model.bn1.parameters():
            param.requires_grad = False
        model = frozen_model_paramters(model, frozen_namelist)
    return model


####### GoogleNet ########
def GoogLeNet(device, num_class=2, test_pth_path=None, Trans_Learning=False, Freeze=False):
    model = torchvision.models.googlenet(pretrained=False)
    in_features = model.fc.in_features
    model.fc = nn.Linear(in_features, num_class)
    model.aux1.fc2 = nn.Linear(in_features, num_class)
    model.aux2.fc2 = nn.Linear(in_features, num_class)
    model = model.to(device)
    if test_pth_path is not None:
        print(f"############# loading model : {test_pth_path}  ###########")
        model.load_state_dict(torch.load(test_pth_path))
    elif Trans_Learning:
        print("############# using model in pytorch ######################")
    if Freeze:
        print("############# Freezing the network ########################")
        frozen_namelist = ['conv1', 'conv2', 'conv3', 'inception3a', 'inception3b', 'inception4a',
                           'inception4b', 'inception4c', 'inception4d', 'inception4e']
        model = frozen_model_paramters(model, frozen_namelist)
    return model


##### ResNet18_PRO #####
def ResNet18_PRO(device, num_class=2, test_pth_path=None, Trans_Learning=False, Freeze=False):
    model = ResNet18_PRO_Model(BasicBlock, [2, 2, 2, 2], num_class, SPP)
    model = model.to(device)
    if test_pth_path is not None:
        print(f"############# loading model : {test_pth_path}  ###########")
        model.load_state_dict(torch.load(test_pth_path))
    elif Trans_Learning:
        print("############# using model in pytorch ######################")
    if Freeze:
        print("############# Freezing the network ########################")
    return model


if __name__ == "__main__":
    model = GoogLeNet("cuda", 3)
    print(model)
