sia_tp_sample / Cadene__pretrained-models.pytorch.jsonl
shahp7575's picture
commit files to HF hub
3a7f06a
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/nasnet.py","language":"python","identifier":"nasnetalarge","parameters":"(num_classes=1001, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"r\"\"\"NASNetALarge model architecture from the\n `\"NASNet\" <https:\/\/arxiv.org\/abs\/1707.07012>`_ paper.","docstring_summary":"r\"\"\"NASNetALarge model architecture from the\n `\"NASNet\" <https:\/\/arxiv.org\/abs\/1707.07012>`_ paper.","docstring_tokens":["r","NASNetALarge","model","architecture","from","the","NASNet","<https",":","\/\/","arxiv",".","org","\/","abs","\/","1707",".","07012",">","_","paper","."],"function":"def nasnetalarge(num_classes=1001, pretrained='imagenet'):\n r\"\"\"NASNetALarge model architecture from the\n `\"NASNet\" <https:\/\/arxiv.org\/abs\/1707.07012>`_ paper.\n \"\"\"\n if pretrained:\n settings = pretrained_settings['nasnetalarge'][pretrained]\n assert num_classes == settings['num_classes'], \\\n \"num_classes should be {}, but is {}\".format(settings['num_classes'], num_classes)\n\n # both 'imagenet'&'imagenet+background' are loaded from same parameters\n model = NASNetALarge(num_classes=1001)\n model.load_state_dict(model_zoo.load_url(settings['url']))\n\n if pretrained == 'imagenet':\n new_last_linear = nn.Linear(model.last_linear.in_features, 1000)\n new_last_linear.weight.data = model.last_linear.weight.data[1:]\n new_last_linear.bias.data = model.last_linear.bias.data[1:]\n model.last_linear = new_last_linear\n\n model.input_space = settings['input_space']\n model.input_size = settings['input_size']\n model.input_range = settings['input_range']\n\n model.mean = settings['mean']\n model.std = settings['std']\n else:\n model = NASNetALarge(num_classes=num_classes)\n return model","function_tokens":["def","nasnetalarge","(","num_classes","=","1001",",","pretrained","=","'imagenet'",")",":","if","pretrained",":","settings","=","pretrained_settings","[","'nasnetalarge'","]","[","pretrained","]","assert","num_classes","==","settings","[","'num_classes'","]",",","\"num_classes should be {}, but is {}\"",".","format","(","settings","[","'num_classes'","]",",","num_classes",")","# both 'imagenet'&'imagenet+background' are loaded from same parameters","model","=","NASNetALarge","(","num_classes","=","1001",")","model",".","load_state_dict","(","model_zoo",".","load_url","(","settings","[","'url'","]",")",")","if","pretrained","==","'imagenet'",":","new_last_linear","=","nn",".","Linear","(","model",".","last_linear",".","in_features",",","1000",")","new_last_linear",".","weight",".","data","=","model",".","last_linear",".","weight",".","data","[","1",":","]","new_last_linear",".","bias",".","data","=","model",".","last_linear",".","bias",".","data","[","1",":","]","model",".","last_linear","=","new_last_linear","model",".","input_space","=","settings","[","'input_space'","]","model",".","input_size","=","settings","[","'input_size'","]","model",".","input_range","=","settings","[","'input_range'","]","model",".","mean","=","settings","[","'mean'","]","model",".","std","=","settings","[","'std'","]","else",":","model","=","NASNetALarge","(","num_classes","=","num_classes",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/nasnet.py#L608-L635"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/bninception.py","language":"python","identifier":"bninception","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"r\"\"\"BNInception model architecture from <https:\/\/arxiv.org\/pdf\/1502.03167.pdf>`_ paper.","docstring_summary":"r\"\"\"BNInception model architecture from <https:\/\/arxiv.org\/pdf\/1502.03167.pdf>`_ paper.","docstring_tokens":["r","BNInception","model","architecture","from","<https",":","\/\/","arxiv",".","org","\/","pdf","\/","1502",".","03167",".","pdf",">","_","paper","."],"function":"def bninception(num_classes=1000, pretrained='imagenet'):\n r\"\"\"BNInception model architecture from <https:\/\/arxiv.org\/pdf\/1502.03167.pdf>`_ paper.\n \"\"\"\n model = BNInception(num_classes=num_classes)\n if pretrained is not None:\n settings = pretrained_settings['bninception'][pretrained]\n assert num_classes == settings['num_classes'], \\\n \"num_classes should be {}, but is {}\".format(settings['num_classes'], num_classes)\n model.load_state_dict(model_zoo.load_url(settings['url']))\n model.input_space = settings['input_space']\n model.input_size = settings['input_size']\n model.input_range = settings['input_range']\n model.mean = settings['mean']\n model.std = settings['std']\n return model","function_tokens":["def","bninception","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","BNInception","(","num_classes","=","num_classes",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'bninception'","]","[","pretrained","]","assert","num_classes","==","settings","[","'num_classes'","]",",","\"num_classes should be {}, but is {}\"",".","format","(","settings","[","'num_classes'","]",",","num_classes",")","model",".","load_state_dict","(","model_zoo",".","load_url","(","settings","[","'url'","]",")",")","model",".","input_space","=","settings","[","'input_space'","]","model",".","input_size","=","settings","[","'input_size'","]","model",".","input_range","=","settings","[","'input_range'","]","model",".","mean","=","settings","[","'mean'","]","model",".","std","=","settings","[","'std'","]","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/bninception.py#L497-L511"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"alexnet","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"r\"\"\"AlexNet model architecture from the\n `\"One weird trick...\" <https:\/\/arxiv.org\/abs\/1404.5997>`_ paper.","docstring_summary":"r\"\"\"AlexNet model architecture from the\n `\"One weird trick...\" <https:\/\/arxiv.org\/abs\/1404.5997>`_ paper.","docstring_tokens":["r","AlexNet","model","architecture","from","the","One","weird","trick","...","<https",":","\/\/","arxiv",".","org","\/","abs","\/","1404",".","5997",">","_","paper","."],"function":"def alexnet(num_classes=1000, pretrained='imagenet'):\n r\"\"\"AlexNet model architecture from the\n `\"One weird trick...\" <https:\/\/arxiv.org\/abs\/1404.5997>`_ paper.\n \"\"\"\n # https:\/\/github.com\/pytorch\/vision\/blob\/master\/torchvision\/models\/alexnet.py\n model = models.alexnet(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['alexnet'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_alexnet(model)\n return model","function_tokens":["def","alexnet","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","# https:\/\/github.com\/pytorch\/vision\/blob\/master\/torchvision\/models\/alexnet.py","model","=","models",".","alexnet","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'alexnet'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","model","=","modify_alexnet","(","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L168-L178"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"densenet121","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"r\"\"\"Densenet-121 model from\n `\"Densely Connected Convolutional Networks\" <https:\/\/arxiv.org\/pdf\/1608.06993.pdf>`","docstring_summary":"r\"\"\"Densenet-121 model from\n `\"Densely Connected Convolutional Networks\" <https:\/\/arxiv.org\/pdf\/1608.06993.pdf>`","docstring_tokens":["r","Densenet","-","121","model","from","Densely","Connected","Convolutional","Networks","<https",":","\/\/","arxiv",".","org","\/","pdf","\/","1608",".","06993",".","pdf",">"],"function":"def densenet121(num_classes=1000, pretrained='imagenet'):\n r\"\"\"Densenet-121 model from\n `\"Densely Connected Convolutional Networks\" <https:\/\/arxiv.org\/pdf\/1608.06993.pdf>`\n \"\"\"\n model = models.densenet121(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['densenet121'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_densenets(model)\n return model","function_tokens":["def","densenet121","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","models",".","densenet121","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'densenet121'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","model","=","modify_densenets","(","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L205-L214"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"densenet169","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"r\"\"\"Densenet-169 model from\n `\"Densely Connected Convolutional Networks\" <https:\/\/arxiv.org\/pdf\/1608.06993.pdf>`","docstring_summary":"r\"\"\"Densenet-169 model from\n `\"Densely Connected Convolutional Networks\" <https:\/\/arxiv.org\/pdf\/1608.06993.pdf>`","docstring_tokens":["r","Densenet","-","169","model","from","Densely","Connected","Convolutional","Networks","<https",":","\/\/","arxiv",".","org","\/","pdf","\/","1608",".","06993",".","pdf",">"],"function":"def densenet169(num_classes=1000, pretrained='imagenet'):\n r\"\"\"Densenet-169 model from\n `\"Densely Connected Convolutional Networks\" <https:\/\/arxiv.org\/pdf\/1608.06993.pdf>`\n \"\"\"\n model = models.densenet169(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['densenet169'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_densenets(model)\n return model","function_tokens":["def","densenet169","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","models",".","densenet169","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'densenet169'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","model","=","modify_densenets","(","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L216-L225"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"densenet201","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"r\"\"\"Densenet-201 model from\n `\"Densely Connected Convolutional Networks\" <https:\/\/arxiv.org\/pdf\/1608.06993.pdf>`","docstring_summary":"r\"\"\"Densenet-201 model from\n `\"Densely Connected Convolutional Networks\" <https:\/\/arxiv.org\/pdf\/1608.06993.pdf>`","docstring_tokens":["r","Densenet","-","201","model","from","Densely","Connected","Convolutional","Networks","<https",":","\/\/","arxiv",".","org","\/","pdf","\/","1608",".","06993",".","pdf",">"],"function":"def densenet201(num_classes=1000, pretrained='imagenet'):\n r\"\"\"Densenet-201 model from\n `\"Densely Connected Convolutional Networks\" <https:\/\/arxiv.org\/pdf\/1608.06993.pdf>`\n \"\"\"\n model = models.densenet201(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['densenet201'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_densenets(model)\n return model","function_tokens":["def","densenet201","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","models",".","densenet201","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'densenet201'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","model","=","modify_densenets","(","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L227-L236"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"densenet161","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"r\"\"\"Densenet-161 model from\n `\"Densely Connected Convolutional Networks\" <https:\/\/arxiv.org\/pdf\/1608.06993.pdf>`","docstring_summary":"r\"\"\"Densenet-161 model from\n `\"Densely Connected Convolutional Networks\" <https:\/\/arxiv.org\/pdf\/1608.06993.pdf>`","docstring_tokens":["r","Densenet","-","161","model","from","Densely","Connected","Convolutional","Networks","<https",":","\/\/","arxiv",".","org","\/","pdf","\/","1608",".","06993",".","pdf",">"],"function":"def densenet161(num_classes=1000, pretrained='imagenet'):\n r\"\"\"Densenet-161 model from\n `\"Densely Connected Convolutional Networks\" <https:\/\/arxiv.org\/pdf\/1608.06993.pdf>`\n \"\"\"\n model = models.densenet161(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['densenet161'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_densenets(model)\n return model","function_tokens":["def","densenet161","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","models",".","densenet161","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'densenet161'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","model","=","modify_densenets","(","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L238-L247"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"inceptionv3","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"r\"\"\"Inception v3 model architecture from\n `\"Rethinking the Inception Architecture for Computer Vision\" <http:\/\/arxiv.org\/abs\/1512.00567>`_.","docstring_summary":"r\"\"\"Inception v3 model architecture from\n `\"Rethinking the Inception Architecture for Computer Vision\" <http:\/\/arxiv.org\/abs\/1512.00567>`_.","docstring_tokens":["r","Inception","v3","model","architecture","from","Rethinking","the","Inception","Architecture","for","Computer","Vision","<http",":","\/\/","arxiv",".","org","\/","abs","\/","1512",".","00567",">","_","."],"function":"def inceptionv3(num_classes=1000, pretrained='imagenet'):\n r\"\"\"Inception v3 model architecture from\n `\"Rethinking the Inception Architecture for Computer Vision\" <http:\/\/arxiv.org\/abs\/1512.00567>`_.\n \"\"\"\n model = models.inception_v3(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['inceptionv3'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n\n # Modify attributs\n model.last_linear = model.fc\n del model.fc\n\n def features(self, input):\n # 299 x 299 x 3\n x = self.Conv2d_1a_3x3(input) # 149 x 149 x 32\n x = self.Conv2d_2a_3x3(x) # 147 x 147 x 32\n x = self.Conv2d_2b_3x3(x) # 147 x 147 x 64\n x = F.max_pool2d(x, kernel_size=3, stride=2) # 73 x 73 x 64\n x = self.Conv2d_3b_1x1(x) # 73 x 73 x 80\n x = self.Conv2d_4a_3x3(x) # 71 x 71 x 192\n x = F.max_pool2d(x, kernel_size=3, stride=2) # 35 x 35 x 192\n x = self.Mixed_5b(x) # 35 x 35 x 256\n x = self.Mixed_5c(x) # 35 x 35 x 288\n x = self.Mixed_5d(x) # 35 x 35 x 288\n x = self.Mixed_6a(x) # 17 x 17 x 768\n x = self.Mixed_6b(x) # 17 x 17 x 768\n x = self.Mixed_6c(x) # 17 x 17 x 768\n x = self.Mixed_6d(x) # 17 x 17 x 768\n x = self.Mixed_6e(x) # 17 x 17 x 768\n if self.training and self.aux_logits:\n self._out_aux = self.AuxLogits(x) # 17 x 17 x 768\n x = self.Mixed_7a(x) # 8 x 8 x 1280\n x = self.Mixed_7b(x) # 8 x 8 x 2048\n x = self.Mixed_7c(x) # 8 x 8 x 2048\n return x\n\n def logits(self, features):\n x = F.avg_pool2d(features, kernel_size=8) # 1 x 1 x 2048\n x = F.dropout(x, training=self.training) # 1 x 1 x 2048\n x = x.view(x.size(0), -1) # 2048\n x = self.last_linear(x) # 1000 (num_classes)\n if self.training and self.aux_logits:\n aux = self._out_aux\n self._out_aux = None\n return x, aux\n return x\n\n def forward(self, input):\n x = self.features(input)\n x = self.logits(x)\n return x\n\n # Modify methods\n model.features = types.MethodType(features, model)\n model.logits = types.MethodType(logits, model)\n model.forward = types.MethodType(forward, model)\n return model","function_tokens":["def","inceptionv3","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","models",".","inception_v3","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'inceptionv3'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","# Modify attributs","model",".","last_linear","=","model",".","fc","del","model",".","fc","def","features","(","self",",","input",")",":","# 299 x 299 x 3","x","=","self",".","Conv2d_1a_3x3","(","input",")","# 149 x 149 x 32","x","=","self",".","Conv2d_2a_3x3","(","x",")","# 147 x 147 x 32","x","=","self",".","Conv2d_2b_3x3","(","x",")","# 147 x 147 x 64","x","=","F",".","max_pool2d","(","x",",","kernel_size","=","3",",","stride","=","2",")","# 73 x 73 x 64","x","=","self",".","Conv2d_3b_1x1","(","x",")","# 73 x 73 x 80","x","=","self",".","Conv2d_4a_3x3","(","x",")","# 71 x 71 x 192","x","=","F",".","max_pool2d","(","x",",","kernel_size","=","3",",","stride","=","2",")","# 35 x 35 x 192","x","=","self",".","Mixed_5b","(","x",")","# 35 x 35 x 256","x","=","self",".","Mixed_5c","(","x",")","# 35 x 35 x 288","x","=","self",".","Mixed_5d","(","x",")","# 35 x 35 x 288","x","=","self",".","Mixed_6a","(","x",")","# 17 x 17 x 768","x","=","self",".","Mixed_6b","(","x",")","# 17 x 17 x 768","x","=","self",".","Mixed_6c","(","x",")","# 17 x 17 x 768","x","=","self",".","Mixed_6d","(","x",")","# 17 x 17 x 768","x","=","self",".","Mixed_6e","(","x",")","# 17 x 17 x 768","if","self",".","training","and","self",".","aux_logits",":","self",".","_out_aux","=","self",".","AuxLogits","(","x",")","# 17 x 17 x 768","x","=","self",".","Mixed_7a","(","x",")","# 8 x 8 x 1280","x","=","self",".","Mixed_7b","(","x",")","# 8 x 8 x 2048","x","=","self",".","Mixed_7c","(","x",")","# 8 x 8 x 2048","return","x","def","logits","(","self",",","features",")",":","x","=","F",".","avg_pool2d","(","features",",","kernel_size","=","8",")","# 1 x 1 x 2048","x","=","F",".","dropout","(","x",",","training","=","self",".","training",")","# 1 x 1 x 2048","x","=","x",".","view","(","x",".","size","(","0",")",",","-","1",")","# 2048","x","=","self",".","last_linear","(","x",")","# 1000 (num_classes)","if","self",".","training","and","self",".","aux_logits",":","aux","=","self",".","_out_aux","self",".","_out_aux","=","None","return","x",",","aux","return","x","def","forward","(","self",",","input",")",":","x","=","self",".","features","(","input",")","x","=","self",".","logits","(","x",")","return","x","# Modify methods","model",".","features","=","types",".","MethodType","(","features",",","model",")","model",".","logits","=","types",".","MethodType","(","logits",",","model",")","model",".","forward","=","types",".","MethodType","(","forward",",","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L252-L309"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"resnet18","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"Constructs a ResNet-18 model.","docstring_summary":"Constructs a ResNet-18 model.","docstring_tokens":["Constructs","a","ResNet","-","18","model","."],"function":"def resnet18(num_classes=1000, pretrained='imagenet'):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model = models.resnet18(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['resnet18'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_resnets(model)\n return model","function_tokens":["def","resnet18","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","models",".","resnet18","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'resnet18'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","model","=","modify_resnets","(","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L348-L356"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"resnet34","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"Constructs a ResNet-34 model.","docstring_summary":"Constructs a ResNet-34 model.","docstring_tokens":["Constructs","a","ResNet","-","34","model","."],"function":"def resnet34(num_classes=1000, pretrained='imagenet'):\n \"\"\"Constructs a ResNet-34 model.\n \"\"\"\n model = models.resnet34(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['resnet34'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_resnets(model)\n return model","function_tokens":["def","resnet34","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","models",".","resnet34","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'resnet34'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","model","=","modify_resnets","(","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L358-L366"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"resnet50","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"Constructs a ResNet-50 model.","docstring_summary":"Constructs a ResNet-50 model.","docstring_tokens":["Constructs","a","ResNet","-","50","model","."],"function":"def resnet50(num_classes=1000, pretrained='imagenet'):\n \"\"\"Constructs a ResNet-50 model.\n \"\"\"\n model = models.resnet50(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['resnet50'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_resnets(model)\n return model","function_tokens":["def","resnet50","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","models",".","resnet50","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'resnet50'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","model","=","modify_resnets","(","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L368-L376"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"resnet101","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"Constructs a ResNet-101 model.","docstring_summary":"Constructs a ResNet-101 model.","docstring_tokens":["Constructs","a","ResNet","-","101","model","."],"function":"def resnet101(num_classes=1000, pretrained='imagenet'):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = models.resnet101(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['resnet101'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_resnets(model)\n return model","function_tokens":["def","resnet101","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","models",".","resnet101","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'resnet101'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","model","=","modify_resnets","(","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L378-L386"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"resnet152","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"Constructs a ResNet-152 model.","docstring_summary":"Constructs a ResNet-152 model.","docstring_tokens":["Constructs","a","ResNet","-","152","model","."],"function":"def resnet152(num_classes=1000, pretrained='imagenet'):\n \"\"\"Constructs a ResNet-152 model.\n \"\"\"\n model = models.resnet152(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['resnet152'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_resnets(model)\n return model","function_tokens":["def","resnet152","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","models",".","resnet152","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'resnet152'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","model","=","modify_resnets","(","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L388-L396"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"squeezenet1_0","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"r\"\"\"SqueezeNet model architecture from the `\"SqueezeNet: AlexNet-level\n accuracy with 50x fewer parameters and <0.5MB model size\"\n <https:\/\/arxiv.org\/abs\/1602.07360>`_ paper.","docstring_summary":"r\"\"\"SqueezeNet model architecture from the `\"SqueezeNet: AlexNet-level\n accuracy with 50x fewer parameters and <0.5MB model size\"\n <https:\/\/arxiv.org\/abs\/1602.07360>`_ paper.","docstring_tokens":["r","SqueezeNet","model","architecture","from","the","SqueezeNet",":","AlexNet","-","level","accuracy","with","50x","fewer","parameters","and","<0",".","5MB","model","size","<https",":","\/\/","arxiv",".","org","\/","abs","\/","1602",".","07360",">","_","paper","."],"function":"def squeezenet1_0(num_classes=1000, pretrained='imagenet'):\n r\"\"\"SqueezeNet model architecture from the `\"SqueezeNet: AlexNet-level\n accuracy with 50x fewer parameters and <0.5MB model size\"\n <https:\/\/arxiv.org\/abs\/1602.07360>`_ paper.\n \"\"\"\n model = models.squeezenet1_0(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['squeezenet1_0'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_squeezenets(model)\n return model","function_tokens":["def","squeezenet1_0","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","models",".","squeezenet1_0","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'squeezenet1_0'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","model","=","modify_squeezenets","(","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L428-L438"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"squeezenet1_1","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"r\"\"\"SqueezeNet 1.1 model from the `official SqueezeNet repo\n <https:\/\/github.com\/DeepScale\/SqueezeNet\/tree\/master\/SqueezeNet_v1.1>`_.\n SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters\n than SqueezeNet 1.0, without sacrificing accuracy.","docstring_summary":"r\"\"\"SqueezeNet 1.1 model from the `official SqueezeNet repo\n <https:\/\/github.com\/DeepScale\/SqueezeNet\/tree\/master\/SqueezeNet_v1.1>`_.\n SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters\n than SqueezeNet 1.0, without sacrificing accuracy.","docstring_tokens":["r","SqueezeNet","1",".","1","model","from","the","official","SqueezeNet","repo","<https",":","\/\/","github",".","com","\/","DeepScale","\/","SqueezeNet","\/","tree","\/","master","\/","SqueezeNet_v1",".","1",">","_",".","SqueezeNet","1",".","1","has","2",".","4x","less","computation","and","slightly","fewer","parameters","than","SqueezeNet","1",".","0","without","sacrificing","accuracy","."],"function":"def squeezenet1_1(num_classes=1000, pretrained='imagenet'):\n r\"\"\"SqueezeNet 1.1 model from the `official SqueezeNet repo\n <https:\/\/github.com\/DeepScale\/SqueezeNet\/tree\/master\/SqueezeNet_v1.1>`_.\n SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters\n than SqueezeNet 1.0, without sacrificing accuracy.\n \"\"\"\n model = models.squeezenet1_1(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['squeezenet1_1'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_squeezenets(model)\n return model","function_tokens":["def","squeezenet1_1","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","models",".","squeezenet1_1","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'squeezenet1_1'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","model","=","modify_squeezenets","(","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L440-L451"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"vgg11","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"VGG 11-layer model (configuration \"A\")","docstring_summary":"VGG 11-layer model (configuration \"A\")","docstring_tokens":["VGG","11","-","layer","model","(","configuration","A",")"],"function":"def vgg11(num_classes=1000, pretrained='imagenet'):\n \"\"\"VGG 11-layer model (configuration \"A\")\n \"\"\"\n model = models.vgg11(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['vgg11'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_vggs(model)\n return model","function_tokens":["def","vgg11","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","models",".","vgg11","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'vgg11'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","model","=","modify_vggs","(","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L495-L503"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"vgg11_bn","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"VGG 11-layer model (configuration \"A\") with batch normalization","docstring_summary":"VGG 11-layer model (configuration \"A\") with batch normalization","docstring_tokens":["VGG","11","-","layer","model","(","configuration","A",")","with","batch","normalization"],"function":"def vgg11_bn(num_classes=1000, pretrained='imagenet'):\n \"\"\"VGG 11-layer model (configuration \"A\") with batch normalization\n \"\"\"\n model = models.vgg11_bn(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['vgg11_bn'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_vggs(model)\n return model","function_tokens":["def","vgg11_bn","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","models",".","vgg11_bn","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'vgg11_bn'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","model","=","modify_vggs","(","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L505-L513"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"vgg13","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"VGG 13-layer model (configuration \"B\")","docstring_summary":"VGG 13-layer model (configuration \"B\")","docstring_tokens":["VGG","13","-","layer","model","(","configuration","B",")"],"function":"def vgg13(num_classes=1000, pretrained='imagenet'):\n \"\"\"VGG 13-layer model (configuration \"B\")\n \"\"\"\n model = models.vgg13(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['vgg13'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_vggs(model)\n return model","function_tokens":["def","vgg13","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","models",".","vgg13","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'vgg13'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","model","=","modify_vggs","(","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L515-L523"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"vgg13_bn","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"VGG 13-layer model (configuration \"B\") with batch normalization","docstring_summary":"VGG 13-layer model (configuration \"B\") with batch normalization","docstring_tokens":["VGG","13","-","layer","model","(","configuration","B",")","with","batch","normalization"],"function":"def vgg13_bn(num_classes=1000, pretrained='imagenet'):\n \"\"\"VGG 13-layer model (configuration \"B\") with batch normalization\n \"\"\"\n model = models.vgg13_bn(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['vgg13_bn'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_vggs(model)\n return model","function_tokens":["def","vgg13_bn","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","models",".","vgg13_bn","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'vgg13_bn'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","model","=","modify_vggs","(","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L525-L533"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"vgg16","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"VGG 16-layer model (configuration \"D\")","docstring_summary":"VGG 16-layer model (configuration \"D\")","docstring_tokens":["VGG","16","-","layer","model","(","configuration","D",")"],"function":"def vgg16(num_classes=1000, pretrained='imagenet'):\n \"\"\"VGG 16-layer model (configuration \"D\")\n \"\"\"\n model = models.vgg16(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['vgg16'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_vggs(model)\n return model","function_tokens":["def","vgg16","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","models",".","vgg16","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'vgg16'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","model","=","modify_vggs","(","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L535-L543"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"vgg16_bn","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"VGG 16-layer model (configuration \"D\") with batch normalization","docstring_summary":"VGG 16-layer model (configuration \"D\") with batch normalization","docstring_tokens":["VGG","16","-","layer","model","(","configuration","D",")","with","batch","normalization"],"function":"def vgg16_bn(num_classes=1000, pretrained='imagenet'):\n \"\"\"VGG 16-layer model (configuration \"D\") with batch normalization\n \"\"\"\n model = models.vgg16_bn(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['vgg16_bn'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_vggs(model)\n return model","function_tokens":["def","vgg16_bn","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","models",".","vgg16_bn","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'vgg16_bn'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","model","=","modify_vggs","(","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L545-L553"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"vgg19","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"VGG 19-layer model (configuration \"E\")","docstring_summary":"VGG 19-layer model (configuration \"E\")","docstring_tokens":["VGG","19","-","layer","model","(","configuration","E",")"],"function":"def vgg19(num_classes=1000, pretrained='imagenet'):\n \"\"\"VGG 19-layer model (configuration \"E\")\n \"\"\"\n model = models.vgg19(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['vgg19'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_vggs(model)\n return model","function_tokens":["def","vgg19","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","models",".","vgg19","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'vgg19'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","model","=","modify_vggs","(","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L555-L563"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/torchvision_models.py","language":"python","identifier":"vgg19_bn","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"VGG 19-layer model (configuration 'E') with batch normalization","docstring_summary":"VGG 19-layer model (configuration 'E') with batch normalization","docstring_tokens":["VGG","19","-","layer","model","(","configuration","E",")","with","batch","normalization"],"function":"def vgg19_bn(num_classes=1000, pretrained='imagenet'):\n \"\"\"VGG 19-layer model (configuration 'E') with batch normalization\n \"\"\"\n model = models.vgg19_bn(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['vgg19_bn'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_vggs(model)\n return model","function_tokens":["def","vgg19_bn","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","models",".","vgg19_bn","(","pretrained","=","False",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'vgg19_bn'","]","[","pretrained","]","model","=","load_pretrained","(","model",",","num_classes",",","settings",")","model","=","modify_vggs","(","model",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/torchvision_models.py#L565-L573"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/senet.py","language":"python","identifier":"SENet.__init__","parameters":"(self, block, layers, groups, reduction, dropout_p=0.2,\n inplanes=128, input_3x3=True, downsample_kernel_size=3,\n downsample_padding=1, num_classes=1000)","argument_list":"","return_statement":"","docstring":"Parameters\n ----------\n block (nn.Module): Bottleneck class.\n - For SENet154: SEBottleneck\n - For SE-ResNet models: SEResNetBottleneck\n - For SE-ResNeXt models: SEResNeXtBottleneck\n layers (list of ints): Number of residual blocks for 4 layers of the\n network (layer1...layer4).\n groups (int): Number of groups for the 3x3 convolution in each\n bottleneck block.\n - For SENet154: 64\n - For SE-ResNet models: 1\n - For SE-ResNeXt models: 32\n reduction (int): Reduction ratio for Squeeze-and-Excitation modules.\n - For all models: 16\n dropout_p (float or None): Drop probability for the Dropout layer.\n If `None` the Dropout layer is not used.\n - For SENet154: 0.2\n - For SE-ResNet models: None\n - For SE-ResNeXt models: None\n inplanes (int): Number of input channels for layer1.\n - For SENet154: 128\n - For SE-ResNet models: 64\n - For SE-ResNeXt models: 64\n input_3x3 (bool): If `True`, use three 3x3 convolutions instead of\n a single 7x7 convolution in layer0.\n - For SENet154: True\n - For SE-ResNet models: False\n - For SE-ResNeXt models: False\n downsample_kernel_size (int): Kernel size for downsampling convolutions\n in layer2, layer3 and layer4.\n - For SENet154: 3\n - For SE-ResNet models: 1\n - For SE-ResNeXt models: 1\n downsample_padding (int): Padding for downsampling convolutions in\n layer2, layer3 and layer4.\n - For SENet154: 1\n - For SE-ResNet models: 0\n - For SE-ResNeXt models: 0\n num_classes (int): Number of outputs in `last_linear` layer.\n - For all models: 1000","docstring_summary":"Parameters\n ----------\n block (nn.Module): Bottleneck class.\n - For SENet154: SEBottleneck\n - For SE-ResNet models: SEResNetBottleneck\n - For SE-ResNeXt models: SEResNeXtBottleneck\n layers (list of ints): Number of residual blocks for 4 layers of the\n network (layer1...layer4).\n groups (int): Number of groups for the 3x3 convolution in each\n bottleneck block.\n - For SENet154: 64\n - For SE-ResNet models: 1\n - For SE-ResNeXt models: 32\n reduction (int): Reduction ratio for Squeeze-and-Excitation modules.\n - For all models: 16\n dropout_p (float or None): Drop probability for the Dropout layer.\n If `None` the Dropout layer is not used.\n - For SENet154: 0.2\n - For SE-ResNet models: None\n - For SE-ResNeXt models: None\n inplanes (int): Number of input channels for layer1.\n - For SENet154: 128\n - For SE-ResNet models: 64\n - For SE-ResNeXt models: 64\n input_3x3 (bool): If `True`, use three 3x3 convolutions instead of\n a single 7x7 convolution in layer0.\n - For SENet154: True\n - For SE-ResNet models: False\n - For SE-ResNeXt models: False\n downsample_kernel_size (int): Kernel size for downsampling convolutions\n in layer2, layer3 and layer4.\n - For SENet154: 3\n - For SE-ResNet models: 1\n - For SE-ResNeXt models: 1\n downsample_padding (int): Padding for downsampling convolutions in\n layer2, layer3 and layer4.\n - For SENet154: 1\n - For SE-ResNet models: 0\n - For SE-ResNeXt models: 0\n num_classes (int): Number of outputs in `last_linear` layer.\n - For all models: 1000","docstring_tokens":["Parameters","----------","block","(","nn",".","Module",")",":","Bottleneck","class",".","-","For","SENet154",":","SEBottleneck","-","For","SE","-","ResNet","models",":","SEResNetBottleneck","-","For","SE","-","ResNeXt","models",":","SEResNeXtBottleneck","layers","(","list","of","ints",")",":","Number","of","residual","blocks","for","4","layers","of","the","network","(","layer1","...","layer4",")",".","groups","(","int",")",":","Number","of","groups","for","the","3x3","convolution","in","each","bottleneck","block",".","-","For","SENet154",":","64","-","For","SE","-","ResNet","models",":","1","-","For","SE","-","ResNeXt","models",":","32","reduction","(","int",")",":","Reduction","ratio","for","Squeeze","-","and","-","Excitation","modules",".","-","For","all","models",":","16","dropout_p","(","float","or","None",")",":","Drop","probability","for","the","Dropout","layer",".","If","None","the","Dropout","layer","is","not","used",".","-","For","SENet154",":","0",".","2","-","For","SE","-","ResNet","models",":","None","-","For","SE","-","ResNeXt","models",":","None","inplanes","(","int",")",":","Number","of","input","channels","for","layer1",".","-","For","SENet154",":","128","-","For","SE","-","ResNet","models",":","64","-","For","SE","-","ResNeXt","models",":","64","input_3x3","(","bool",")",":","If","True","use","three","3x3","convolutions","instead","of","a","single","7x7","convolution","in","layer0",".","-","For","SENet154",":","True","-","For","SE","-","ResNet","models",":","False","-","For","SE","-","ResNeXt","models",":","False","downsample_kernel_size","(","int",")",":","Kernel","size","for","downsampling","convolutions","in","layer2","layer3","and","layer4",".","-","For","SENet154",":","3","-","For","SE","-","ResNet","models",":","1","-","For","SE","-","ResNeXt","models",":","1","downsample_padding","(","int",")",":","Padding","for","downsampling","convolutions","in","layer2","layer3","and","layer4",".","-","For","SENet154",":","1","-","For","SE","-","ResNet","models",":","0","-","For","SE","-","ResNeXt","models",":","0","num_classes","(","int",")",":","Number","of","outputs","in","last_linear","layer",".","-","For","all","models",":","1000"],"function":"def __init__(self, block, layers, groups, reduction, dropout_p=0.2,\n inplanes=128, input_3x3=True, downsample_kernel_size=3,\n downsample_padding=1, num_classes=1000):\n \"\"\"\n Parameters\n ----------\n block (nn.Module): Bottleneck class.\n - For SENet154: SEBottleneck\n - For SE-ResNet models: SEResNetBottleneck\n - For SE-ResNeXt models: SEResNeXtBottleneck\n layers (list of ints): Number of residual blocks for 4 layers of the\n network (layer1...layer4).\n groups (int): Number of groups for the 3x3 convolution in each\n bottleneck block.\n - For SENet154: 64\n - For SE-ResNet models: 1\n - For SE-ResNeXt models: 32\n reduction (int): Reduction ratio for Squeeze-and-Excitation modules.\n - For all models: 16\n dropout_p (float or None): Drop probability for the Dropout layer.\n If `None` the Dropout layer is not used.\n - For SENet154: 0.2\n - For SE-ResNet models: None\n - For SE-ResNeXt models: None\n inplanes (int): Number of input channels for layer1.\n - For SENet154: 128\n - For SE-ResNet models: 64\n - For SE-ResNeXt models: 64\n input_3x3 (bool): If `True`, use three 3x3 convolutions instead of\n a single 7x7 convolution in layer0.\n - For SENet154: True\n - For SE-ResNet models: False\n - For SE-ResNeXt models: False\n downsample_kernel_size (int): Kernel size for downsampling convolutions\n in layer2, layer3 and layer4.\n - For SENet154: 3\n - For SE-ResNet models: 1\n - For SE-ResNeXt models: 1\n downsample_padding (int): Padding for downsampling convolutions in\n layer2, layer3 and layer4.\n - For SENet154: 1\n - For SE-ResNet models: 0\n - For SE-ResNeXt models: 0\n num_classes (int): Number of outputs in `last_linear` layer.\n - For all models: 1000\n \"\"\"\n super(SENet, self).__init__()\n self.inplanes = inplanes\n if input_3x3:\n layer0_modules = [\n ('conv1', nn.Conv2d(3, 64, 3, stride=2, padding=1,\n bias=False)),\n ('bn1', nn.BatchNorm2d(64)),\n ('relu1', nn.ReLU(inplace=True)),\n ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1,\n bias=False)),\n ('bn2', nn.BatchNorm2d(64)),\n ('relu2', nn.ReLU(inplace=True)),\n ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1,\n bias=False)),\n ('bn3', nn.BatchNorm2d(inplanes)),\n ('relu3', nn.ReLU(inplace=True)),\n ]\n else:\n layer0_modules = [\n ('conv1', nn.Conv2d(3, inplanes, kernel_size=7, stride=2,\n padding=3, bias=False)),\n ('bn1', nn.BatchNorm2d(inplanes)),\n ('relu1', nn.ReLU(inplace=True)),\n ]\n # To preserve compatibility with Caffe weights `ceil_mode=True`\n # is used instead of `padding=1`.\n layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2,\n ceil_mode=True)))\n self.layer0 = nn.Sequential(OrderedDict(layer0_modules))\n self.layer1 = self._make_layer(\n block,\n planes=64,\n blocks=layers[0],\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=1,\n downsample_padding=0\n )\n self.layer2 = self._make_layer(\n block,\n planes=128,\n blocks=layers[1],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n self.layer3 = self._make_layer(\n block,\n planes=256,\n blocks=layers[2],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n self.layer4 = self._make_layer(\n block,\n planes=512,\n blocks=layers[3],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n self.avg_pool = nn.AvgPool2d(7, stride=1)\n self.dropout = nn.Dropout(dropout_p) if dropout_p is not None else None\n self.last_linear = nn.Linear(512 * block.expansion, num_classes)","function_tokens":["def","__init__","(","self",",","block",",","layers",",","groups",",","reduction",",","dropout_p","=","0.2",",","inplanes","=","128",",","input_3x3","=","True",",","downsample_kernel_size","=","3",",","downsample_padding","=","1",",","num_classes","=","1000",")",":","super","(","SENet",",","self",")",".","__init__","(",")","self",".","inplanes","=","inplanes","if","input_3x3",":","layer0_modules","=","[","(","'conv1'",",","nn",".","Conv2d","(","3",",","64",",","3",",","stride","=","2",",","padding","=","1",",","bias","=","False",")",")",",","(","'bn1'",",","nn",".","BatchNorm2d","(","64",")",")",",","(","'relu1'",",","nn",".","ReLU","(","inplace","=","True",")",")",",","(","'conv2'",",","nn",".","Conv2d","(","64",",","64",",","3",",","stride","=","1",",","padding","=","1",",","bias","=","False",")",")",",","(","'bn2'",",","nn",".","BatchNorm2d","(","64",")",")",",","(","'relu2'",",","nn",".","ReLU","(","inplace","=","True",")",")",",","(","'conv3'",",","nn",".","Conv2d","(","64",",","inplanes",",","3",",","stride","=","1",",","padding","=","1",",","bias","=","False",")",")",",","(","'bn3'",",","nn",".","BatchNorm2d","(","inplanes",")",")",",","(","'relu3'",",","nn",".","ReLU","(","inplace","=","True",")",")",",","]","else",":","layer0_modules","=","[","(","'conv1'",",","nn",".","Conv2d","(","3",",","inplanes",",","kernel_size","=","7",",","stride","=","2",",","padding","=","3",",","bias","=","False",")",")",",","(","'bn1'",",","nn",".","BatchNorm2d","(","inplanes",")",")",",","(","'relu1'",",","nn",".","ReLU","(","inplace","=","True",")",")",",","]","# To preserve compatibility with Caffe weights `ceil_mode=True`","# is used instead of `padding=1`.","layer0_modules",".","append","(","(","'pool'",",","nn",".","MaxPool2d","(","3",",","stride","=","2",",","ceil_mode","=","True",")",")",")","self",".","layer0","=","nn",".","Sequential","(","OrderedDict","(","layer0_modules",")",")","self",".","layer1","=","self",".","_make_layer","(","block",",","planes","=","64",",","blocks","=","layers","[","0","]",",","groups","=","groups",",","reduction","=","reduction",",","downsample_kernel_size","=","1",",","downsample_padding","=","0",")","self",".","layer2","=","self",".","_make_layer","(","block",",","planes","=","128",",","blocks","=","layers","[","1","]",",","stride","=","2",",","groups","=","groups",",","reduction","=","reduction",",","downsample_kernel_size","=","downsample_kernel_size",",","downsample_padding","=","downsample_padding",")","self",".","layer3","=","self",".","_make_layer","(","block",",","planes","=","256",",","blocks","=","layers","[","2","]",",","stride","=","2",",","groups","=","groups",",","reduction","=","reduction",",","downsample_kernel_size","=","downsample_kernel_size",",","downsample_padding","=","downsample_padding",")","self",".","layer4","=","self",".","_make_layer","(","block",",","planes","=","512",",","blocks","=","layers","[","3","]",",","stride","=","2",",","groups","=","groups",",","reduction","=","reduction",",","downsample_kernel_size","=","downsample_kernel_size",",","downsample_padding","=","downsample_padding",")","self",".","avg_pool","=","nn",".","AvgPool2d","(","7",",","stride","=","1",")","self",".","dropout","=","nn",".","Dropout","(","dropout_p",")","if","dropout_p","is","not","None","else","None","self",".","last_linear","=","nn",".","Linear","(","512","*","block",".","expansion",",","num_classes",")"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/senet.py#L209-L325"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/cafferesnet.py","language":"python","identifier":"conv3x3","parameters":"(in_planes, out_planes, stride=1)","argument_list":"","return_statement":"return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)","docstring":"3x3 convolution with padding","docstring_summary":"3x3 convolution with padding","docstring_tokens":["3x3","convolution","with","padding"],"function":"def conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)","function_tokens":["def","conv3x3","(","in_planes",",","out_planes",",","stride","=","1",")",":","return","nn",".","Conv2d","(","in_planes",",","out_planes",",","kernel_size","=","3",",","stride","=","stride",",","padding","=","1",",","bias","=","False",")"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/cafferesnet.py#L23-L26"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/cafferesnet.py","language":"python","identifier":"cafferesnet101","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet","docstring_summary":"Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet","docstring_tokens":["Constructs","a","ResNet","-","101","model",".","Args",":","pretrained","(","bool",")",":","If","True","returns","a","model","pre","-","trained","on","ImageNet"],"function":"def cafferesnet101(num_classes=1000, pretrained='imagenet'):\n \"\"\"Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes)\n if pretrained is not None:\n settings = pretrained_settings['cafferesnet101'][pretrained]\n assert num_classes == settings['num_classes'], \\\n \"num_classes should be {}, but is {}\".format(settings['num_classes'], num_classes)\n model.load_state_dict(model_zoo.load_url(settings['url']))\n model.input_space = settings['input_space']\n model.input_size = settings['input_size']\n model.input_range = settings['input_range']\n model.mean = settings['mean']\n model.std = settings['std']\n return model","function_tokens":["def","cafferesnet101","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","ResNet","(","Bottleneck",",","[","3",",","4",",","23",",","3","]",",","num_classes","=","num_classes",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'cafferesnet101'","]","[","pretrained","]","assert","num_classes","==","settings","[","'num_classes'","]",",","\"num_classes should be {}, but is {}\"",".","format","(","settings","[","'num_classes'","]",",","num_classes",")","model",".","load_state_dict","(","model_zoo",".","load_url","(","settings","[","'url'","]",")",")","model",".","input_space","=","settings","[","'input_space'","]","model",".","input_size","=","settings","[","'input_size'","]","model",".","input_range","=","settings","[","'input_range'","]","model",".","mean","=","settings","[","'mean'","]","model",".","std","=","settings","[","'std'","]","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/cafferesnet.py#L168-L184"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/nasnet_mobile.py","language":"python","identifier":"nasnetamobile","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"r\"\"\"NASNetALarge model architecture from the\n `\"NASNet\" <https:\/\/arxiv.org\/abs\/1707.07012>`_ paper.","docstring_summary":"r\"\"\"NASNetALarge model architecture from the\n `\"NASNet\" <https:\/\/arxiv.org\/abs\/1707.07012>`_ paper.","docstring_tokens":["r","NASNetALarge","model","architecture","from","the","NASNet","<https",":","\/\/","arxiv",".","org","\/","abs","\/","1707",".","07012",">","_","paper","."],"function":"def nasnetamobile(num_classes=1000, pretrained='imagenet'):\n r\"\"\"NASNetALarge model architecture from the\n `\"NASNet\" <https:\/\/arxiv.org\/abs\/1707.07012>`_ paper.\n \"\"\"\n if pretrained:\n settings = pretrained_settings['nasnetamobile'][pretrained]\n assert num_classes == settings['num_classes'], \\\n \"num_classes should be {}, but is {}\".format(settings['num_classes'], num_classes)\n\n # both 'imagenet'&'imagenet+background' are loaded from same parameters\n model = NASNetAMobile(num_classes=num_classes)\n model.load_state_dict(model_zoo.load_url(settings['url'], map_location=None))\n\n # if pretrained == 'imagenet':\n # new_last_linear = nn.Linear(model.last_linear.in_features, 1000)\n # new_last_linear.weight.data = model.last_linear.weight.data[1:]\n # new_last_linear.bias.data = model.last_linear.bias.data[1:]\n # model.last_linear = new_last_linear\n\n model.input_space = settings['input_space']\n model.input_size = settings['input_size']\n model.input_range = settings['input_range']\n\n model.mean = settings['mean']\n model.std = settings['std']\n else:\n settings = pretrained_settings['nasnetamobile']['imagenet']\n model = NASNetAMobile(num_classes=num_classes)\n model.input_space = settings['input_space']\n model.input_size = settings['input_size']\n model.input_range = settings['input_range']\n\n model.mean = settings['mean']\n model.std = settings['std']\n return model","function_tokens":["def","nasnetamobile","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","if","pretrained",":","settings","=","pretrained_settings","[","'nasnetamobile'","]","[","pretrained","]","assert","num_classes","==","settings","[","'num_classes'","]",",","\"num_classes should be {}, but is {}\"",".","format","(","settings","[","'num_classes'","]",",","num_classes",")","# both 'imagenet'&'imagenet+background' are loaded from same parameters","model","=","NASNetAMobile","(","num_classes","=","num_classes",")","model",".","load_state_dict","(","model_zoo",".","load_url","(","settings","[","'url'","]",",","map_location","=","None",")",")","# if pretrained == 'imagenet':","# new_last_linear = nn.Linear(model.last_linear.in_features, 1000)","# new_last_linear.weight.data = model.last_linear.weight.data[1:]","# new_last_linear.bias.data = model.last_linear.bias.data[1:]","# model.last_linear = new_last_linear","model",".","input_space","=","settings","[","'input_space'","]","model",".","input_size","=","settings","[","'input_size'","]","model",".","input_range","=","settings","[","'input_range'","]","model",".","mean","=","settings","[","'mean'","]","model",".","std","=","settings","[","'std'","]","else",":","settings","=","pretrained_settings","[","'nasnetamobile'","]","[","'imagenet'","]","model","=","NASNetAMobile","(","num_classes","=","num_classes",")","model",".","input_space","=","settings","[","'input_space'","]","model",".","input_size","=","settings","[","'input_size'","]","model",".","input_range","=","settings","[","'input_range'","]","model",".","mean","=","settings","[","'mean'","]","model",".","std","=","settings","[","'std'","]","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/nasnet_mobile.py#L618-L652"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/fbresnet.py","language":"python","identifier":"conv3x3","parameters":"(in_planes, out_planes, stride=1)","argument_list":"","return_statement":"return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=True)","docstring":"3x3 convolution with padding","docstring_summary":"3x3 convolution with padding","docstring_tokens":["3x3","convolution","with","padding"],"function":"def conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=True)","function_tokens":["def","conv3x3","(","in_planes",",","out_planes",",","stride","=","1",")",":","return","nn",".","Conv2d","(","in_planes",",","out_planes",",","kernel_size","=","3",",","stride","=","stride",",","padding","=","1",",","bias","=","True",")"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/fbresnet.py#L27-L30"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/fbresnet.py","language":"python","identifier":"fbresnet18","parameters":"(num_classes=1000)","argument_list":"","return_statement":"return model","docstring":"Constructs a ResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet","docstring_summary":"Constructs a ResNet-18 model.","docstring_tokens":["Constructs","a","ResNet","-","18","model","."],"function":"def fbresnet18(num_classes=1000):\n \"\"\"Constructs a ResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = FBResNet(BasicBlock, [2, 2, 2, 2], num_classes=num_classes)\n return model","function_tokens":["def","fbresnet18","(","num_classes","=","1000",")",":","model","=","FBResNet","(","BasicBlock",",","[","2",",","2",",","2",",","2","]",",","num_classes","=","num_classes",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/fbresnet.py#L176-L183"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/fbresnet.py","language":"python","identifier":"fbresnet34","parameters":"(num_classes=1000)","argument_list":"","return_statement":"return model","docstring":"Constructs a ResNet-34 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet","docstring_summary":"Constructs a ResNet-34 model.","docstring_tokens":["Constructs","a","ResNet","-","34","model","."],"function":"def fbresnet34(num_classes=1000):\n \"\"\"Constructs a ResNet-34 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = FBResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes)\n return model","function_tokens":["def","fbresnet34","(","num_classes","=","1000",")",":","model","=","FBResNet","(","BasicBlock",",","[","3",",","4",",","6",",","3","]",",","num_classes","=","num_classes",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/fbresnet.py#L186-L193"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/fbresnet.py","language":"python","identifier":"fbresnet50","parameters":"(num_classes=1000)","argument_list":"","return_statement":"return model","docstring":"Constructs a ResNet-50 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet","docstring_summary":"Constructs a ResNet-50 model.","docstring_tokens":["Constructs","a","ResNet","-","50","model","."],"function":"def fbresnet50(num_classes=1000):\n \"\"\"Constructs a ResNet-50 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = FBResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes)\n return model","function_tokens":["def","fbresnet50","(","num_classes","=","1000",")",":","model","=","FBResNet","(","Bottleneck",",","[","3",",","4",",","6",",","3","]",",","num_classes","=","num_classes",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/fbresnet.py#L196-L203"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/fbresnet.py","language":"python","identifier":"fbresnet101","parameters":"(num_classes=1000)","argument_list":"","return_statement":"return model","docstring":"Constructs a ResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet","docstring_summary":"Constructs a ResNet-101 model.","docstring_tokens":["Constructs","a","ResNet","-","101","model","."],"function":"def fbresnet101(num_classes=1000):\n \"\"\"Constructs a ResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = FBResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes)\n return model","function_tokens":["def","fbresnet101","(","num_classes","=","1000",")",":","model","=","FBResNet","(","Bottleneck",",","[","3",",","4",",","23",",","3","]",",","num_classes","=","num_classes",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/fbresnet.py#L206-L213"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/fbresnet.py","language":"python","identifier":"fbresnet152","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"Constructs a ResNet-152 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet","docstring_summary":"Constructs a ResNet-152 model.","docstring_tokens":["Constructs","a","ResNet","-","152","model","."],"function":"def fbresnet152(num_classes=1000, pretrained='imagenet'):\n \"\"\"Constructs a ResNet-152 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = FBResNet(Bottleneck, [3, 8, 36, 3], num_classes=num_classes)\n if pretrained is not None:\n settings = pretrained_settings['fbresnet152'][pretrained]\n assert num_classes == settings['num_classes'], \\\n \"num_classes should be {}, but is {}\".format(settings['num_classes'], num_classes)\n model.load_state_dict(model_zoo.load_url(settings['url']))\n model.input_space = settings['input_space']\n model.input_size = settings['input_size']\n model.input_range = settings['input_range']\n model.mean = settings['mean']\n model.std = settings['std']\n return model","function_tokens":["def","fbresnet152","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","model","=","FBResNet","(","Bottleneck",",","[","3",",","8",",","36",",","3","]",",","num_classes","=","num_classes",")","if","pretrained","is","not","None",":","settings","=","pretrained_settings","[","'fbresnet152'","]","[","pretrained","]","assert","num_classes","==","settings","[","'num_classes'","]",",","\"num_classes should be {}, but is {}\"",".","format","(","settings","[","'num_classes'","]",",","num_classes",")","model",".","load_state_dict","(","model_zoo",".","load_url","(","settings","[","'url'","]",")",")","model",".","input_space","=","settings","[","'input_space'","]","model",".","input_size","=","settings","[","'input_size'","]","model",".","input_range","=","settings","[","'input_range'","]","model",".","mean","=","settings","[","'mean'","]","model",".","std","=","settings","[","'std'","]","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/fbresnet.py#L216-L233"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/inceptionresnetv2.py","language":"python","identifier":"inceptionresnetv2","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"r\"\"\"InceptionResNetV2 model architecture from the\n `\"InceptionV4, Inception-ResNet...\" <https:\/\/arxiv.org\/abs\/1602.07261>`_ paper.","docstring_summary":"r\"\"\"InceptionResNetV2 model architecture from the\n `\"InceptionV4, Inception-ResNet...\" <https:\/\/arxiv.org\/abs\/1602.07261>`_ paper.","docstring_tokens":["r","InceptionResNetV2","model","architecture","from","the","InceptionV4","Inception","-","ResNet","...","<https",":","\/\/","arxiv",".","org","\/","abs","\/","1602",".","07261",">","_","paper","."],"function":"def inceptionresnetv2(num_classes=1000, pretrained='imagenet'):\n r\"\"\"InceptionResNetV2 model architecture from the\n `\"InceptionV4, Inception-ResNet...\" <https:\/\/arxiv.org\/abs\/1602.07261>`_ paper.\n \"\"\"\n if pretrained:\n settings = pretrained_settings['inceptionresnetv2'][pretrained]\n assert num_classes == settings['num_classes'], \\\n \"num_classes should be {}, but is {}\".format(settings['num_classes'], num_classes)\n\n # both 'imagenet'&'imagenet+background' are loaded from same parameters\n model = InceptionResNetV2(num_classes=1001)\n model.load_state_dict(model_zoo.load_url(settings['url']))\n\n if pretrained == 'imagenet':\n new_last_linear = nn.Linear(1536, 1000)\n new_last_linear.weight.data = model.last_linear.weight.data[1:]\n new_last_linear.bias.data = model.last_linear.bias.data[1:]\n model.last_linear = new_last_linear\n\n model.input_space = settings['input_space']\n model.input_size = settings['input_size']\n model.input_range = settings['input_range']\n\n model.mean = settings['mean']\n model.std = settings['std']\n else:\n model = InceptionResNetV2(num_classes=num_classes)\n return model","function_tokens":["def","inceptionresnetv2","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","if","pretrained",":","settings","=","pretrained_settings","[","'inceptionresnetv2'","]","[","pretrained","]","assert","num_classes","==","settings","[","'num_classes'","]",",","\"num_classes should be {}, but is {}\"",".","format","(","settings","[","'num_classes'","]",",","num_classes",")","# both 'imagenet'&'imagenet+background' are loaded from same parameters","model","=","InceptionResNetV2","(","num_classes","=","1001",")","model",".","load_state_dict","(","model_zoo",".","load_url","(","settings","[","'url'","]",")",")","if","pretrained","==","'imagenet'",":","new_last_linear","=","nn",".","Linear","(","1536",",","1000",")","new_last_linear",".","weight",".","data","=","model",".","last_linear",".","weight",".","data","[","1",":","]","new_last_linear",".","bias",".","data","=","model",".","last_linear",".","bias",".","data","[","1",":","]","model",".","last_linear","=","new_last_linear","model",".","input_space","=","settings","[","'input_space'","]","model",".","input_size","=","settings","[","'input_size'","]","model",".","input_range","=","settings","[","'input_range'","]","model",".","mean","=","settings","[","'mean'","]","model",".","std","=","settings","[","'std'","]","else",":","model","=","InceptionResNetV2","(","num_classes","=","num_classes",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/inceptionresnetv2.py#L333-L360"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/xception.py","language":"python","identifier":"Xception.__init__","parameters":"(self, num_classes=1000)","argument_list":"","return_statement":"","docstring":"Constructor\n Args:\n num_classes: number of classes","docstring_summary":"Constructor\n Args:\n num_classes: number of classes","docstring_tokens":["Constructor","Args",":","num_classes",":","number","of","classes"],"function":"def __init__(self, num_classes=1000):\n \"\"\" Constructor\n Args:\n num_classes: number of classes\n \"\"\"\n super(Xception, self).__init__()\n self.num_classes = num_classes\n\n self.conv1 = nn.Conv2d(3, 32, 3,2, 0, bias=False)\n self.bn1 = nn.BatchNorm2d(32)\n self.relu1 = nn.ReLU(inplace=True)\n\n self.conv2 = nn.Conv2d(32,64,3,bias=False)\n self.bn2 = nn.BatchNorm2d(64)\n self.relu2 = nn.ReLU(inplace=True)\n #do relu here\n\n self.block1=Block(64,128,2,2,start_with_relu=False,grow_first=True)\n self.block2=Block(128,256,2,2,start_with_relu=True,grow_first=True)\n self.block3=Block(256,728,2,2,start_with_relu=True,grow_first=True)\n\n self.block4=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n self.block5=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n self.block6=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n self.block7=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n\n self.block8=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n self.block9=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n self.block10=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n self.block11=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n\n self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)\n\n self.conv3 = SeparableConv2d(1024,1536,3,1,1)\n self.bn3 = nn.BatchNorm2d(1536)\n self.relu3 = nn.ReLU(inplace=True)\n\n #do relu here\n self.conv4 = SeparableConv2d(1536,2048,3,1,1)\n self.bn4 = nn.BatchNorm2d(2048)\n\n self.fc = nn.Linear(2048, num_classes)","function_tokens":["def","__init__","(","self",",","num_classes","=","1000",")",":","super","(","Xception",",","self",")",".","__init__","(",")","self",".","num_classes","=","num_classes","self",".","conv1","=","nn",".","Conv2d","(","3",",","32",",","3",",","2",",","0",",","bias","=","False",")","self",".","bn1","=","nn",".","BatchNorm2d","(","32",")","self",".","relu1","=","nn",".","ReLU","(","inplace","=","True",")","self",".","conv2","=","nn",".","Conv2d","(","32",",","64",",","3",",","bias","=","False",")","self",".","bn2","=","nn",".","BatchNorm2d","(","64",")","self",".","relu2","=","nn",".","ReLU","(","inplace","=","True",")","#do relu here","self",".","block1","=","Block","(","64",",","128",",","2",",","2",",","start_with_relu","=","False",",","grow_first","=","True",")","self",".","block2","=","Block","(","128",",","256",",","2",",","2",",","start_with_relu","=","True",",","grow_first","=","True",")","self",".","block3","=","Block","(","256",",","728",",","2",",","2",",","start_with_relu","=","True",",","grow_first","=","True",")","self",".","block4","=","Block","(","728",",","728",",","3",",","1",",","start_with_relu","=","True",",","grow_first","=","True",")","self",".","block5","=","Block","(","728",",","728",",","3",",","1",",","start_with_relu","=","True",",","grow_first","=","True",")","self",".","block6","=","Block","(","728",",","728",",","3",",","1",",","start_with_relu","=","True",",","grow_first","=","True",")","self",".","block7","=","Block","(","728",",","728",",","3",",","1",",","start_with_relu","=","True",",","grow_first","=","True",")","self",".","block8","=","Block","(","728",",","728",",","3",",","1",",","start_with_relu","=","True",",","grow_first","=","True",")","self",".","block9","=","Block","(","728",",","728",",","3",",","1",",","start_with_relu","=","True",",","grow_first","=","True",")","self",".","block10","=","Block","(","728",",","728",",","3",",","1",",","start_with_relu","=","True",",","grow_first","=","True",")","self",".","block11","=","Block","(","728",",","728",",","3",",","1",",","start_with_relu","=","True",",","grow_first","=","True",")","self",".","block12","=","Block","(","728",",","1024",",","2",",","2",",","start_with_relu","=","True",",","grow_first","=","False",")","self",".","conv3","=","SeparableConv2d","(","1024",",","1536",",","3",",","1",",","1",")","self",".","bn3","=","nn",".","BatchNorm2d","(","1536",")","self",".","relu3","=","nn",".","ReLU","(","inplace","=","True",")","#do relu here","self",".","conv4","=","SeparableConv2d","(","1536",",","2048",",","3",",","1",",","1",")","self",".","bn4","=","nn",".","BatchNorm2d","(","2048",")","self",".","fc","=","nn",".","Linear","(","2048",",","num_classes",")"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/xception.py#L119-L160"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/pnasnet.py","language":"python","identifier":"pnasnet5large","parameters":"(num_classes=1001, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"r\"\"\"PNASNet-5 model architecture from the\n `\"Progressive Neural Architecture Search\"\n <https:\/\/arxiv.org\/abs\/1712.00559>`_ paper.","docstring_summary":"r\"\"\"PNASNet-5 model architecture from the\n `\"Progressive Neural Architecture Search\"\n <https:\/\/arxiv.org\/abs\/1712.00559>`_ paper.","docstring_tokens":["r","PNASNet","-","5","model","architecture","from","the","Progressive","Neural","Architecture","Search","<https",":","\/\/","arxiv",".","org","\/","abs","\/","1712",".","00559",">","_","paper","."],"function":"def pnasnet5large(num_classes=1001, pretrained='imagenet'):\n r\"\"\"PNASNet-5 model architecture from the\n `\"Progressive Neural Architecture Search\"\n <https:\/\/arxiv.org\/abs\/1712.00559>`_ paper.\n \"\"\"\n if pretrained:\n settings = pretrained_settings['pnasnet5large'][pretrained]\n assert num_classes == settings[\n 'num_classes'], 'num_classes should be {}, but is {}'.format(\n settings['num_classes'], num_classes)\n\n # both 'imagenet'&'imagenet+background' are loaded from same parameters\n model = PNASNet5Large(num_classes=1001)\n model.load_state_dict(model_zoo.load_url(settings['url']))\n\n if pretrained == 'imagenet':\n new_last_linear = nn.Linear(model.last_linear.in_features, 1000)\n new_last_linear.weight.data = model.last_linear.weight.data[1:]\n new_last_linear.bias.data = model.last_linear.bias.data[1:]\n model.last_linear = new_last_linear\n\n model.input_space = settings['input_space']\n model.input_size = settings['input_size']\n model.input_range = settings['input_range']\n\n model.mean = settings['mean']\n model.std = settings['std']\n else:\n model = PNASNet5Large(num_classes=num_classes)\n return model","function_tokens":["def","pnasnet5large","(","num_classes","=","1001",",","pretrained","=","'imagenet'",")",":","if","pretrained",":","settings","=","pretrained_settings","[","'pnasnet5large'","]","[","pretrained","]","assert","num_classes","==","settings","[","'num_classes'","]",",","'num_classes should be {}, but is {}'",".","format","(","settings","[","'num_classes'","]",",","num_classes",")","# both 'imagenet'&'imagenet+background' are loaded from same parameters","model","=","PNASNet5Large","(","num_classes","=","1001",")","model",".","load_state_dict","(","model_zoo",".","load_url","(","settings","[","'url'","]",")",")","if","pretrained","==","'imagenet'",":","new_last_linear","=","nn",".","Linear","(","model",".","last_linear",".","in_features",",","1000",")","new_last_linear",".","weight",".","data","=","model",".","last_linear",".","weight",".","data","[","1",":","]","new_last_linear",".","bias",".","data","=","model",".","last_linear",".","bias",".","data","[","1",":","]","model",".","last_linear","=","new_last_linear","model",".","input_space","=","settings","[","'input_space'","]","model",".","input_size","=","settings","[","'input_size'","]","model",".","input_range","=","settings","[","'input_range'","]","model",".","mean","=","settings","[","'mean'","]","model",".","std","=","settings","[","'std'","]","else",":","model","=","PNASNet5Large","(","num_classes","=","num_classes",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/pnasnet.py#L372-L401"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/polynet.py","language":"python","identifier":"polynet","parameters":"(num_classes=1000, pretrained='imagenet')","argument_list":"","return_statement":"return model","docstring":"PolyNet architecture from the paper\n 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks'\n https:\/\/arxiv.org\/abs\/1611.05725","docstring_summary":"PolyNet architecture from the paper\n 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks'\n https:\/\/arxiv.org\/abs\/1611.05725","docstring_tokens":["PolyNet","architecture","from","the","paper","PolyNet",":","A","Pursuit","of","Structural","Diversity","in","Very","Deep","Networks","https",":","\/\/","arxiv",".","org","\/","abs","\/","1611",".","05725"],"function":"def polynet(num_classes=1000, pretrained='imagenet'):\n \"\"\"PolyNet architecture from the paper\n 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks'\n https:\/\/arxiv.org\/abs\/1611.05725\n \"\"\"\n if pretrained:\n settings = pretrained_settings['polynet'][pretrained]\n assert num_classes == settings['num_classes'], \\\n 'num_classes should be {}, but is {}'.format(\n settings['num_classes'], num_classes)\n model = PolyNet(num_classes=num_classes)\n model.load_state_dict(model_zoo.load_url(settings['url']))\n model.input_space = settings['input_space']\n model.input_size = settings['input_size']\n model.input_range = settings['input_range']\n model.mean = settings['mean']\n model.std = settings['std']\n else:\n model = PolyNet(num_classes=num_classes)\n return model","function_tokens":["def","polynet","(","num_classes","=","1000",",","pretrained","=","'imagenet'",")",":","if","pretrained",":","settings","=","pretrained_settings","[","'polynet'","]","[","pretrained","]","assert","num_classes","==","settings","[","'num_classes'","]",",","'num_classes should be {}, but is {}'",".","format","(","settings","[","'num_classes'","]",",","num_classes",")","model","=","PolyNet","(","num_classes","=","num_classes",")","model",".","load_state_dict","(","model_zoo",".","load_url","(","settings","[","'url'","]",")",")","model",".","input_space","=","settings","[","'input_space'","]","model",".","input_size","=","settings","[","'input_size'","]","model",".","input_range","=","settings","[","'input_range'","]","model",".","mean","=","settings","[","'mean'","]","model",".","std","=","settings","[","'std'","]","else",":","model","=","PolyNet","(","num_classes","=","num_classes",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/polynet.py#L461-L480"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/dpn.py","language":"python","identifier":"adaptive_avgmax_pool2d","parameters":"(x, pool_type='avg', padding=0, count_include_pad=False)","argument_list":"","return_statement":"return x","docstring":"Selectable global pooling function with dynamic input kernel size","docstring_summary":"Selectable global pooling function with dynamic input kernel size","docstring_tokens":["Selectable","global","pooling","function","with","dynamic","input","kernel","size"],"function":"def adaptive_avgmax_pool2d(x, pool_type='avg', padding=0, count_include_pad=False):\n \"\"\"Selectable global pooling function with dynamic input kernel size\n \"\"\"\n if pool_type == 'avgmaxc':\n x = torch.cat([\n F.avg_pool2d(\n x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad),\n F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)\n ], dim=1)\n elif pool_type == 'avgmax':\n x_avg = F.avg_pool2d(\n x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)\n x_max = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)\n x = 0.5 * (x_avg + x_max)\n elif pool_type == 'max':\n x = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)\n else:\n if pool_type != 'avg':\n print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)\n x = F.avg_pool2d(\n x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)\n return x","function_tokens":["def","adaptive_avgmax_pool2d","(","x",",","pool_type","=","'avg'",",","padding","=","0",",","count_include_pad","=","False",")",":","if","pool_type","==","'avgmaxc'",":","x","=","torch",".","cat","(","[","F",".","avg_pool2d","(","x",",","kernel_size","=","(","x",".","size","(","2",")",",","x",".","size","(","3",")",")",",","padding","=","padding",",","count_include_pad","=","count_include_pad",")",",","F",".","max_pool2d","(","x",",","kernel_size","=","(","x",".","size","(","2",")",",","x",".","size","(","3",")",")",",","padding","=","padding",")","]",",","dim","=","1",")","elif","pool_type","==","'avgmax'",":","x_avg","=","F",".","avg_pool2d","(","x",",","kernel_size","=","(","x",".","size","(","2",")",",","x",".","size","(","3",")",")",",","padding","=","padding",",","count_include_pad","=","count_include_pad",")","x_max","=","F",".","max_pool2d","(","x",",","kernel_size","=","(","x",".","size","(","2",")",",","x",".","size","(","3",")",")",",","padding","=","padding",")","x","=","0.5","*","(","x_avg","+","x_max",")","elif","pool_type","==","'max'",":","x","=","F",".","max_pool2d","(","x",",","kernel_size","=","(","x",".","size","(","2",")",",","x",".","size","(","3",")",")",",","padding","=","padding",")","else",":","if","pool_type","!=","'avg'",":","print","(","'Invalid pool type %s specified. Defaulting to average pooling.'","%","pool_type",")","x","=","F",".","avg_pool2d","(","x",",","kernel_size","=","(","x",".","size","(","2",")",",","x",".","size","(","3",")",")",",","padding","=","padding",",","count_include_pad","=","count_include_pad",")","return","x"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/dpn.py#L407-L428"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/fbresnet\/resnet152_load.py","language":"python","identifier":"conv3x3","parameters":"(in_planes, out_planes, stride=1)","argument_list":"","return_statement":"return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=True)","docstring":"3x3 convolution with padding","docstring_summary":"3x3 convolution with padding","docstring_tokens":["3x3","convolution","with","padding"],"function":"def conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=True)","function_tokens":["def","conv3x3","(","in_planes",",","out_planes",",","stride","=","1",")",":","return","nn",".","Conv2d","(","in_planes",",","out_planes",",","kernel_size","=","3",",","stride","=","stride",",","padding","=","1",",","bias","=","True",")"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/fbresnet\/resnet152_load.py#L20-L23"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/fbresnet\/resnet152_load.py","language":"python","identifier":"resnet18","parameters":"(pretrained=False, **kwargs)","argument_list":"","return_statement":"return model","docstring":"Constructs a ResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet","docstring_summary":"Constructs a ResNet-18 model.","docstring_tokens":["Constructs","a","ResNet","-","18","model","."],"function":"def resnet18(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model","function_tokens":["def","resnet18","(","pretrained","=","False",",","*","*","kwargs",")",":","model","=","ResNet","(","BasicBlock",",","[","2",",","2",",","2",",","2","]",",","*","*","kwargs",")","if","pretrained",":","model",".","load_state_dict","(","model_zoo",".","load_url","(","model_urls","[","'resnet18'","]",")",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/fbresnet\/resnet152_load.py#L160-L169"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/fbresnet\/resnet152_load.py","language":"python","identifier":"resnet34","parameters":"(pretrained=False, **kwargs)","argument_list":"","return_statement":"return model","docstring":"Constructs a ResNet-34 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet","docstring_summary":"Constructs a ResNet-34 model.","docstring_tokens":["Constructs","a","ResNet","-","34","model","."],"function":"def resnet34(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-34 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model","function_tokens":["def","resnet34","(","pretrained","=","False",",","*","*","kwargs",")",":","model","=","ResNet","(","BasicBlock",",","[","3",",","4",",","6",",","3","]",",","*","*","kwargs",")","if","pretrained",":","model",".","load_state_dict","(","model_zoo",".","load_url","(","model_urls","[","'resnet34'","]",")",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/fbresnet\/resnet152_load.py#L172-L181"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/fbresnet\/resnet152_load.py","language":"python","identifier":"resnet50","parameters":"(pretrained=False, **kwargs)","argument_list":"","return_statement":"return model","docstring":"Constructs a ResNet-50 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet","docstring_summary":"Constructs a ResNet-50 model.","docstring_tokens":["Constructs","a","ResNet","-","50","model","."],"function":"def resnet50(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model","function_tokens":["def","resnet50","(","pretrained","=","False",",","*","*","kwargs",")",":","model","=","ResNet","(","Bottleneck",",","[","3",",","4",",","6",",","3","]",",","*","*","kwargs",")","if","pretrained",":","model",".","load_state_dict","(","model_zoo",".","load_url","(","model_urls","[","'resnet50'","]",")",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/fbresnet\/resnet152_load.py#L184-L193"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/fbresnet\/resnet152_load.py","language":"python","identifier":"resnet101","parameters":"(pretrained=False, **kwargs)","argument_list":"","return_statement":"return model","docstring":"Constructs a ResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet","docstring_summary":"Constructs a ResNet-101 model.","docstring_tokens":["Constructs","a","ResNet","-","101","model","."],"function":"def resnet101(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model","function_tokens":["def","resnet101","(","pretrained","=","False",",","*","*","kwargs",")",":","model","=","ResNet","(","Bottleneck",",","[","3",",","4",",","23",",","3","]",",","*","*","kwargs",")","if","pretrained",":","model",".","load_state_dict","(","model_zoo",".","load_url","(","model_urls","[","'resnet101'","]",")",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/fbresnet\/resnet152_load.py#L196-L205"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/models\/fbresnet\/resnet152_load.py","language":"python","identifier":"resnet152","parameters":"(pretrained=False, **kwargs)","argument_list":"","return_statement":"return model","docstring":"Constructs a ResNet-152 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet","docstring_summary":"Constructs a ResNet-152 model.","docstring_tokens":["Constructs","a","ResNet","-","152","model","."],"function":"def resnet152(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-152 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model","function_tokens":["def","resnet152","(","pretrained","=","False",",","*","*","kwargs",")",":","model","=","ResNet","(","Bottleneck",",","[","3",",","8",",","36",",","3","]",",","*","*","kwargs",")","if","pretrained",":","model",".","load_state_dict","(","model_zoo",".","load_url","(","model_urls","[","'resnet152'","]",")",")","return","model"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/models\/fbresnet\/resnet152_load.py#L208-L217"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/datasets\/utils.py","language":"python","identifier":"download_url","parameters":"(url, destination=None, progress_bar=True)","argument_list":"","return_statement":"","docstring":"Download a URL to a local file.\n\n Parameters\n ----------\n url : str\n The URL to download.\n destination : str, None\n The destination of the file. If None is given the file is saved to a temporary directory.\n progress_bar : bool\n Whether to show a command-line progress bar while downloading.\n\n Returns\n -------\n filename : str\n The location of the downloaded file.\n\n Notes\n -----\n Progress bar use\/example adapted from tqdm documentation: https:\/\/github.com\/tqdm\/tqdm","docstring_summary":"Download a URL to a local file.","docstring_tokens":["Download","a","URL","to","a","local","file","."],"function":"def download_url(url, destination=None, progress_bar=True):\n \"\"\"Download a URL to a local file.\n\n Parameters\n ----------\n url : str\n The URL to download.\n destination : str, None\n The destination of the file. If None is given the file is saved to a temporary directory.\n progress_bar : bool\n Whether to show a command-line progress bar while downloading.\n\n Returns\n -------\n filename : str\n The location of the downloaded file.\n\n Notes\n -----\n Progress bar use\/example adapted from tqdm documentation: https:\/\/github.com\/tqdm\/tqdm\n \"\"\"\n\n def my_hook(t):\n last_b = [0]\n\n def inner(b=1, bsize=1, tsize=None):\n if tsize is not None:\n t.total = tsize\n if b > 0:\n t.update((b - last_b[0]) * bsize)\n last_b[0] = b\n\n return inner\n\n if progress_bar:\n with tqdm(unit='B', unit_scale=True, miniters=1, desc=url.split('\/')[-1]) as t:\n filename, _ = urlretrieve(url, filename=destination, reporthook=my_hook(t))\n else:\n filename, _ = urlretrieve(url, filename=destination)","function_tokens":["def","download_url","(","url",",","destination","=","None",",","progress_bar","=","True",")",":","def","my_hook","(","t",")",":","last_b","=","[","0","]","def","inner","(","b","=","1",",","bsize","=","1",",","tsize","=","None",")",":","if","tsize","is","not","None",":","t",".","total","=","tsize","if","b",">","0",":","t",".","update","(","(","b","-","last_b","[","0","]",")","*","bsize",")","last_b","[","0","]","=","b","return","inner","if","progress_bar",":","with","tqdm","(","unit","=","'B'",",","unit_scale","=","True",",","miniters","=","1",",","desc","=","url",".","split","(","'\/'",")","[","-","1","]",")","as","t",":","filename",",","_","=","urlretrieve","(","url",",","filename","=","destination",",","reporthook","=","my_hook","(","t",")",")","else",":","filename",",","_","=","urlretrieve","(","url",",","filename","=","destination",")"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/datasets\/utils.py#L45-L83"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/datasets\/utils.py","language":"python","identifier":"AveragePrecisionMeter.reset","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Resets the meter with empty member variables","docstring_summary":"Resets the meter with empty member variables","docstring_tokens":["Resets","the","meter","with","empty","member","variables"],"function":"def reset(self):\n \"\"\"Resets the meter with empty member variables\"\"\"\n self.scores = torch.FloatTensor(torch.FloatStorage())\n self.targets = torch.LongTensor(torch.LongStorage())","function_tokens":["def","reset","(","self",")",":","self",".","scores","=","torch",".","FloatTensor","(","torch",".","FloatStorage","(",")",")","self",".","targets","=","torch",".","LongTensor","(","torch",".","LongStorage","(",")",")"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/datasets\/utils.py#L105-L108"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/datasets\/utils.py","language":"python","identifier":"AveragePrecisionMeter.add","parameters":"(self, output, target)","argument_list":"","return_statement":"","docstring":"Args:\n output (Tensor): NxK tensor that for each of the N examples\n indicates the probability of the example belonging to each of\n the K classes, according to the model. The probabilities should\n sum to one over all classes\n target (Tensor): binary NxK tensort that encodes which of the K\n classes are associated with the N-th input\n (eg: a row [0, 1, 0, 1] indicates that the example is\n associated with classes 2 and 4)\n weight (optional, Tensor): Nx1 tensor representing the weight for\n each example (each weight > 0)","docstring_summary":"Args:\n output (Tensor): NxK tensor that for each of the N examples\n indicates the probability of the example belonging to each of\n the K classes, according to the model. The probabilities should\n sum to one over all classes\n target (Tensor): binary NxK tensort that encodes which of the K\n classes are associated with the N-th input\n (eg: a row [0, 1, 0, 1] indicates that the example is\n associated with classes 2 and 4)\n weight (optional, Tensor): Nx1 tensor representing the weight for\n each example (each weight > 0)","docstring_tokens":["Args",":","output","(","Tensor",")",":","NxK","tensor","that","for","each","of","the","N","examples","indicates","the","probability","of","the","example","belonging","to","each","of","the","K","classes","according","to","the","model",".","The","probabilities","should","sum","to","one","over","all","classes","target","(","Tensor",")",":","binary","NxK","tensort","that","encodes","which","of","the","K","classes","are","associated","with","the","N","-","th","input","(","eg",":","a","row","[","0","1","0","1","]","indicates","that","the","example","is","associated","with","classes","2","and","4",")","weight","(","optional","Tensor",")",":","Nx1","tensor","representing","the","weight","for","each","example","(","each","weight",">","0",")"],"function":"def add(self, output, target):\n \"\"\"\n Args:\n output (Tensor): NxK tensor that for each of the N examples\n indicates the probability of the example belonging to each of\n the K classes, according to the model. The probabilities should\n sum to one over all classes\n target (Tensor): binary NxK tensort that encodes which of the K\n classes are associated with the N-th input\n (eg: a row [0, 1, 0, 1] indicates that the example is\n associated with classes 2 and 4)\n weight (optional, Tensor): Nx1 tensor representing the weight for\n each example (each weight > 0)\n \"\"\"\n if not torch.is_tensor(output):\n output = torch.from_numpy(output)\n if not torch.is_tensor(target):\n target = torch.from_numpy(target)\n\n if output.dim() == 1:\n output = output.view(-1, 1)\n else:\n assert output.dim() == 2, \\\n 'wrong output size (should be 1D or 2D with one column \\\n per class)'\n if target.dim() == 1:\n target = target.view(-1, 1)\n else:\n assert target.dim() == 2, \\\n 'wrong target size (should be 1D or 2D with one column \\\n per class)'\n if self.scores.numel() > 0:\n assert target.size(1) == self.targets.size(1), \\\n 'dimensions for output should match previously added examples.'\n\n # make sure storage is of sufficient size\n if self.scores.storage().size() < self.scores.numel() + output.numel():\n new_size = math.ceil(self.scores.storage().size() * 1.5)\n self.scores.storage().resize_(int(new_size + output.numel()))\n self.targets.storage().resize_(int(new_size + output.numel()))\n\n # store scores and targets\n offset = self.scores.size(0) if self.scores.dim() > 0 else 0\n self.scores.resize_(offset + output.size(0), output.size(1))\n self.targets.resize_(offset + target.size(0), target.size(1))\n self.scores.narrow(0, offset, output.size(0)).copy_(output)\n self.targets.narrow(0, offset, target.size(0)).copy_(target)","function_tokens":["def","add","(","self",",","output",",","target",")",":","if","not","torch",".","is_tensor","(","output",")",":","output","=","torch",".","from_numpy","(","output",")","if","not","torch",".","is_tensor","(","target",")",":","target","=","torch",".","from_numpy","(","target",")","if","output",".","dim","(",")","==","1",":","output","=","output",".","view","(","-","1",",","1",")","else",":","assert","output",".","dim","(",")","==","2",",","'wrong output size (should be 1D or 2D with one column \\\n per class)'","if","target",".","dim","(",")","==","1",":","target","=","target",".","view","(","-","1",",","1",")","else",":","assert","target",".","dim","(",")","==","2",",","'wrong target size (should be 1D or 2D with one column \\\n per class)'","if","self",".","scores",".","numel","(",")",">","0",":","assert","target",".","size","(","1",")","==","self",".","targets",".","size","(","1",")",",","'dimensions for output should match previously added examples.'","# make sure storage is of sufficient size","if","self",".","scores",".","storage","(",")",".","size","(",")","<","self",".","scores",".","numel","(",")","+","output",".","numel","(",")",":","new_size","=","math",".","ceil","(","self",".","scores",".","storage","(",")",".","size","(",")","*","1.5",")","self",".","scores",".","storage","(",")",".","resize_","(","int","(","new_size","+","output",".","numel","(",")",")",")","self",".","targets",".","storage","(",")",".","resize_","(","int","(","new_size","+","output",".","numel","(",")",")",")","# store scores and targets","offset","=","self",".","scores",".","size","(","0",")","if","self",".","scores",".","dim","(",")",">","0","else","0","self",".","scores",".","resize_","(","offset","+","output",".","size","(","0",")",",","output",".","size","(","1",")",")","self",".","targets",".","resize_","(","offset","+","target",".","size","(","0",")",",","target",".","size","(","1",")",")","self",".","scores",".","narrow","(","0",",","offset",",","output",".","size","(","0",")",")",".","copy_","(","output",")","self",".","targets",".","narrow","(","0",",","offset",",","target",".","size","(","0",")",")",".","copy_","(","target",")"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/datasets\/utils.py#L110-L156"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"pretrainedmodels\/datasets\/utils.py","language":"python","identifier":"AveragePrecisionMeter.value","parameters":"(self)","argument_list":"","return_statement":"return ap","docstring":"Returns the model's average precision for each class\n Return:\n ap (FloatTensor): 1xK tensor, with avg precision for each class k","docstring_summary":"Returns the model's average precision for each class\n Return:\n ap (FloatTensor): 1xK tensor, with avg precision for each class k","docstring_tokens":["Returns","the","model","s","average","precision","for","each","class","Return",":","ap","(","FloatTensor",")",":","1xK","tensor","with","avg","precision","for","each","class","k"],"function":"def value(self):\n \"\"\"Returns the model's average precision for each class\n Return:\n ap (FloatTensor): 1xK tensor, with avg precision for each class k\n \"\"\"\n\n if self.scores.numel() == 0:\n return 0\n ap = torch.zeros(self.scores.size(1))\n rg = torch.arange(1, self.scores.size(0)).float()\n\n # compute average precision for each class\n for k in range(self.scores.size(1)):\n # sort scores\n scores = self.scores[:, k]\n targets = self.targets[:, k]\n\n # compute average precision\n ap[k] = AveragePrecisionMeter.average_precision(scores, targets, self.difficult_examples)\n return ap","function_tokens":["def","value","(","self",")",":","if","self",".","scores",".","numel","(",")","==","0",":","return","0","ap","=","torch",".","zeros","(","self",".","scores",".","size","(","1",")",")","rg","=","torch",".","arange","(","1",",","self",".","scores",".","size","(","0",")",")",".","float","(",")","# compute average precision for each class","for","k","in","range","(","self",".","scores",".","size","(","1",")",")",":","# sort scores","scores","=","self",".","scores","[",":",",","k","]","targets","=","self",".","targets","[",":",",","k","]","# compute average precision","ap","[","k","]","=","AveragePrecisionMeter",".","average_precision","(","scores",",","targets",",","self",".","difficult_examples",")","return","ap"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/pretrainedmodels\/datasets\/utils.py#L158-L177"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"examples\/imagenet_eval.py","language":"python","identifier":"adjust_learning_rate","parameters":"(optimizer, epoch)","argument_list":"","return_statement":"","docstring":"Sets the learning rate to the initial LR decayed by 10 every 30 epochs","docstring_summary":"Sets the learning rate to the initial LR decayed by 10 every 30 epochs","docstring_tokens":["Sets","the","learning","rate","to","the","initial","LR","decayed","by","10","every","30","epochs"],"function":"def adjust_learning_rate(optimizer, epoch):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.lr * (0.1 ** (epoch \/\/ 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr","function_tokens":["def","adjust_learning_rate","(","optimizer",",","epoch",")",":","lr","=","args",".","lr","*","(","0.1","**","(","epoch","\/\/","30",")",")","for","param_group","in","optimizer",".","param_groups",":","param_group","[","'lr'","]","=","lr"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/examples\/imagenet_eval.py#L280-L284"}
{"nwo":"Cadene\/pretrained-models.pytorch","sha":"8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0","path":"examples\/imagenet_eval.py","language":"python","identifier":"accuracy","parameters":"(output, target, topk=(1,))","argument_list":"","return_statement":"return res","docstring":"Computes the precision@k for the specified values of k","docstring_summary":"Computes the precision","docstring_tokens":["Computes","the","precision"],"function":"def accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 \/ batch_size))\n return res","function_tokens":["def","accuracy","(","output",",","target",",","topk","=","(","1",",",")",")",":","maxk","=","max","(","topk",")","batch_size","=","target",".","size","(","0",")","_",",","pred","=","output",".","topk","(","maxk",",","1",",","True",",","True",")","pred","=","pred",".","t","(",")","correct","=","pred",".","eq","(","target",".","view","(","1",",","-","1",")",".","expand_as","(","pred",")",")","res","=","[","]","for","k","in","topk",":","correct_k","=","correct","[",":","k","]",".","view","(","-","1",")",".","float","(",")",".","sum","(","0",")","res",".","append","(","correct_k",".","mul_","(","100.0","\/","batch_size",")",")","return","res"],"url":"https:\/\/github.com\/Cadene\/pretrained-models.pytorch\/blob\/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0\/examples\/imagenet_eval.py#L287-L300"}