sia_tp_sample / BIGBALLON__CIFAR-ZOO.jsonl
shahp7575's picture
commit files to HF hub
3a7f06a
{"nwo":"BIGBALLON\/CIFAR-ZOO","sha":"94b4c75e02d0c62ec1c7ce862863b0a810d9d0a5","path":"utils.py","language":"python","identifier":"mixup_data","parameters":"(x, y, alpha, device)","argument_list":"","return_statement":"return mixed_x, y_a, y_b, lam","docstring":"Returns mixed inputs, pairs of targets, and lambda","docstring_summary":"Returns mixed inputs, pairs of targets, and lambda","docstring_tokens":["Returns","mixed","inputs","pairs","of","targets","and","lambda"],"function":"def mixup_data(x, y, alpha, device):\n \"\"\"Returns mixed inputs, pairs of targets, and lambda\"\"\"\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n\n batch_size = x.size()[0]\n index = torch.randperm(batch_size).to(device)\n\n mixed_x = lam * x + (1 - lam) * x[index, :]\n y_a, y_b = y, y[index]\n return mixed_x, y_a, y_b, lam","function_tokens":["def","mixup_data","(","x",",","y",",","alpha",",","device",")",":","if","alpha",">","0",":","lam","=","np",".","random",".","beta","(","alpha",",","alpha",")","else",":","lam","=","1","batch_size","=","x",".","size","(",")","[","0","]","index","=","torch",".","randperm","(","batch_size",")",".","to","(","device",")","mixed_x","=","lam","*","x","+","(","1","-","lam",")","*","x","[","index",",",":","]","y_a",",","y_b","=","y",",","y","[","index","]","return","mixed_x",",","y_a",",","y_b",",","lam"],"url":"https:\/\/github.com\/BIGBALLON\/CIFAR-ZOO\/blob\/94b4c75e02d0c62ec1c7ce862863b0a810d9d0a5\/utils.py#L147-L159"}
{"nwo":"BIGBALLON\/CIFAR-ZOO","sha":"94b4c75e02d0c62ec1c7ce862863b0a810d9d0a5","path":"models\/sknet.py","language":"python","identifier":"SKConv.__init__","parameters":"(self, features, M, G, r, stride=1, L=32)","argument_list":"","return_statement":"","docstring":"Constructor\n Args:\n features: input channel dimensionality.\n M: the number of branchs.\n G: num of convolution groups.\n r: the radio for compute d, the length of z.\n stride: stride, default 1.\n L: the minimum dim of the vector z in paper, default 32.","docstring_summary":"Constructor\n Args:\n features: input channel dimensionality.\n M: the number of branchs.\n G: num of convolution groups.\n r: the radio for compute d, the length of z.\n stride: stride, default 1.\n L: the minimum dim of the vector z in paper, default 32.","docstring_tokens":["Constructor","Args",":","features",":","input","channel","dimensionality",".","M",":","the","number","of","branchs",".","G",":","num","of","convolution","groups",".","r",":","the","radio","for","compute","d","the","length","of","z",".","stride",":","stride","default","1",".","L",":","the","minimum","dim","of","the","vector","z","in","paper","default","32","."],"function":"def __init__(self, features, M, G, r, stride=1, L=32):\n \"\"\" Constructor\n Args:\n features: input channel dimensionality.\n M: the number of branchs.\n G: num of convolution groups.\n r: the radio for compute d, the length of z.\n stride: stride, default 1.\n L: the minimum dim of the vector z in paper, default 32.\n \"\"\"\n super(SKConv, self).__init__()\n d = max(int(features \/ r), L)\n self.convs = nn.ModuleList([])\n for i in range(M):\n self.convs.append(\n nn.Sequential(\n nn.Conv2d(\n features,\n features,\n kernel_size=1 + i * 2,\n stride=stride,\n padding=i,\n groups=G,\n ),\n nn.BatchNorm2d(features),\n nn.ReLU(inplace=False),\n )\n )\n self.gap = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Linear(features, d)\n self.fcs = nn.ModuleList([])\n for i in range(M):\n self.fcs.append(nn.Linear(d, features))\n self.softmax = nn.Softmax(dim=1)","function_tokens":["def","__init__","(","self",",","features",",","M",",","G",",","r",",","stride","=","1",",","L","=","32",")",":","super","(","SKConv",",","self",")",".","__init__","(",")","d","=","max","(","int","(","features","\/","r",")",",","L",")","self",".","convs","=","nn",".","ModuleList","(","[","]",")","for","i","in","range","(","M",")",":","self",".","convs",".","append","(","nn",".","Sequential","(","nn",".","Conv2d","(","features",",","features",",","kernel_size","=","1","+","i","*","2",",","stride","=","stride",",","padding","=","i",",","groups","=","G",",",")",",","nn",".","BatchNorm2d","(","features",")",",","nn",".","ReLU","(","inplace","=","False",")",",",")",")","self",".","gap","=","nn",".","AdaptiveAvgPool2d","(","1",")","self",".","fc","=","nn",".","Linear","(","features",",","d",")","self",".","fcs","=","nn",".","ModuleList","(","[","]",")","for","i","in","range","(","M",")",":","self",".","fcs",".","append","(","nn",".","Linear","(","d",",","features",")",")","self",".","softmax","=","nn",".","Softmax","(","dim","=","1",")"],"url":"https:\/\/github.com\/BIGBALLON\/CIFAR-ZOO\/blob\/94b4c75e02d0c62ec1c7ce862863b0a810d9d0a5\/models\/sknet.py#L10-L43"}
{"nwo":"BIGBALLON\/CIFAR-ZOO","sha":"94b4c75e02d0c62ec1c7ce862863b0a810d9d0a5","path":"models\/resnext.py","language":"python","identifier":"ResNeXt.__init__","parameters":"(self, cardinality, depth, num_classes, base_width, expansion=4)","argument_list":"","return_statement":"","docstring":"Constructor\n Args:\n cardinality: number of convolution groups.\n depth: number of layers.\n num_classes: number of classes\n base_width: base number of channels in each group.\n expansion: factor to adjust the channel dimensionality","docstring_summary":"Constructor\n Args:\n cardinality: number of convolution groups.\n depth: number of layers.\n num_classes: number of classes\n base_width: base number of channels in each group.\n expansion: factor to adjust the channel dimensionality","docstring_tokens":["Constructor","Args",":","cardinality",":","number","of","convolution","groups",".","depth",":","number","of","layers",".","num_classes",":","number","of","classes","base_width",":","base","number","of","channels","in","each","group",".","expansion",":","factor","to","adjust","the","channel","dimensionality"],"function":"def __init__(self, cardinality, depth, num_classes, base_width, expansion=4):\n \"\"\" Constructor\n Args:\n cardinality: number of convolution groups.\n depth: number of layers.\n num_classes: number of classes\n base_width: base number of channels in each group.\n expansion: factor to adjust the channel dimensionality\n \"\"\"\n super(ResNeXt, self).__init__()\n self.cardinality = cardinality\n self.depth = depth\n self.block_depth = (self.depth - 2) \/\/ 9\n self.base_width = base_width\n self.expansion = expansion\n self.num_classes = num_classes\n self.output_size = 64\n self.stages = [\n 64,\n 64 * self.expansion,\n 128 * self.expansion,\n 256 * self.expansion,\n ]\n\n self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)\n self.bn_1 = nn.BatchNorm2d(64)\n self.stage_1 = self.block(\"stage_1\", self.stages[0], self.stages[1], 1)\n self.stage_2 = self.block(\"stage_2\", self.stages[1], self.stages[2], 2)\n self.stage_3 = self.block(\"stage_3\", self.stages[2], self.stages[3], 2)\n self.fc = nn.Linear(self.stages[3], num_classes)\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight.data)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()","function_tokens":["def","__init__","(","self",",","cardinality",",","depth",",","num_classes",",","base_width",",","expansion","=","4",")",":","super","(","ResNeXt",",","self",")",".","__init__","(",")","self",".","cardinality","=","cardinality","self",".","depth","=","depth","self",".","block_depth","=","(","self",".","depth","-","2",")","\/\/","9","self",".","base_width","=","base_width","self",".","expansion","=","expansion","self",".","num_classes","=","num_classes","self",".","output_size","=","64","self",".","stages","=","[","64",",","64","*","self",".","expansion",",","128","*","self",".","expansion",",","256","*","self",".","expansion",",","]","self",".","conv_1_3x3","=","nn",".","Conv2d","(","3",",","64",",","3",",","1",",","1",",","bias","=","False",")","self",".","bn_1","=","nn",".","BatchNorm2d","(","64",")","self",".","stage_1","=","self",".","block","(","\"stage_1\"",",","self",".","stages","[","0","]",",","self",".","stages","[","1","]",",","1",")","self",".","stage_2","=","self",".","block","(","\"stage_2\"",",","self",".","stages","[","1","]",",","self",".","stages","[","2","]",",","2",")","self",".","stage_3","=","self",".","block","(","\"stage_3\"",",","self",".","stages","[","2","]",",","self",".","stages","[","3","]",",","2",")","self",".","fc","=","nn",".","Linear","(","self",".","stages","[","3","]",",","num_classes",")","for","m","in","self",".","modules","(",")",":","if","isinstance","(","m",",","nn",".","Conv2d",")",":","nn",".","init",".","kaiming_normal_","(","m",".","weight",".","data",")","elif","isinstance","(","m",",","nn",".","BatchNorm2d",")",":","m",".","weight",".","data",".","fill_","(","1",")","m",".","bias",".","data",".","zero_","(",")"],"url":"https:\/\/github.com\/BIGBALLON\/CIFAR-ZOO\/blob\/94b4c75e02d0c62ec1c7ce862863b0a810d9d0a5\/models\/resnext.py#L70-L105"}
{"nwo":"BIGBALLON\/CIFAR-ZOO","sha":"94b4c75e02d0c62ec1c7ce862863b0a810d9d0a5","path":"models\/preresnet.py","language":"python","identifier":"conv3x3","parameters":"(in_planes, out_planes, stride=1)","argument_list":"","return_statement":"return nn.Conv2d(\n in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False\n )","docstring":"3x3 convolution with padding","docstring_summary":"3x3 convolution with padding","docstring_tokens":["3x3","convolution","with","padding"],"function":"def conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(\n in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False\n )","function_tokens":["def","conv3x3","(","in_planes",",","out_planes",",","stride","=","1",")",":","return","nn",".","Conv2d","(","in_planes",",","out_planes",",","kernel_size","=","3",",","stride","=","stride",",","padding","=","1",",","bias","=","False",")"],"url":"https:\/\/github.com\/BIGBALLON\/CIFAR-ZOO\/blob\/94b4c75e02d0c62ec1c7ce862863b0a810d9d0a5\/models\/preresnet.py#L15-L19"}
{"nwo":"BIGBALLON\/CIFAR-ZOO","sha":"94b4c75e02d0c62ec1c7ce862863b0a810d9d0a5","path":"models\/resnet.py","language":"python","identifier":"conv3x3","parameters":"(in_planes, out_planes, stride=1)","argument_list":"","return_statement":"return nn.Conv2d(\n in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False\n )","docstring":"3x3 convolution with padding","docstring_summary":"3x3 convolution with padding","docstring_tokens":["3x3","convolution","with","padding"],"function":"def conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(\n in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False\n )","function_tokens":["def","conv3x3","(","in_planes",",","out_planes",",","stride","=","1",")",":","return","nn",".","Conv2d","(","in_planes",",","out_planes",",","kernel_size","=","3",",","stride","=","stride",",","padding","=","1",",","bias","=","False",")"],"url":"https:\/\/github.com\/BIGBALLON\/CIFAR-ZOO\/blob\/94b4c75e02d0c62ec1c7ce862863b0a810d9d0a5\/models\/resnet.py#L8-L12"}