# import torch
# import torch.nn as nn
# def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
#     """3x3 convolution with padding"""
#     return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
#                      padding=dilation, groups=groups, bias=False, dilation=dilation)

# class block(nn.Module):
#     expansion = 1
#     def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
#                  base_width=64, dilation=1):
#         super(block,self).__init__()
#         self.conv = conv3x3(inplanes,planes,stride)
#         self.bn = nn.BatchNorm2d(planes)
#         self.relu = nn.ReLU(inplace=True)
#     def forward(self,x):
#         out = self.conv(x)
#         out = self.bn(out)
#         out = self.relu(out)

#         return out
    
# class CNN(nn.Module):
#     def __init__(self, block, layers, num_classes=100, 
#                  groups=1, width_per_group=64, 
#                  norm_layer=None):
#         super(CNN,self).__init__()
#         self.inplanes = 3
#         self.dilation = 1
#         self.groups = groups
#         self.base_width = width_per_group
#         self.layer1 = self._make_layer(block,64,layers[0])
#         self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
#         self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
#         self.avgpool = nn.AdaptiveAvgPool2d((1,1))
#         self.fc = nn.Linear(256,num_classes)
#         self.relu = nn.ReLU(inplace=True)
#     def _make_layer(self,block,planes,blocks,stride = 1,dilate = False):
#         layers = []
#         downsample = None
#         previous_dilation = self.dilation
#         layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
#                             self.base_width, previous_dilation))
#         self.inplanes = planes * block.expansion
#         for _ in range(1,blocks):
#             layers.append(block(self.inplanes, planes, groups=self.groups,
#                                 base_width=self.base_width, dilation=self.dilation,
#                                 ))            
#         return nn.Sequential(*layers)

#     # def _forward(self,x):
#     #     x = self.layer1(x)
#     #     result  = {'representation0':x}
#     #     x = self.layer2(x)
#     #     result['representation1'] = x
#     #     x = self.layer3(x)
#     #     result['representation2'] = x
#     #     x = self.avgpool(x)
#     #     x = torch.flatten(x, 1)
#     #     x = self.fc(x)
#     #     x = self.relu(x)
#     #     result['output'] = x
#     #     return result
    
#     def forward(self,x,level = 0):
#         if level == 0 :
#             x = self.layer1(x)
#             result  = {'representation0':x}
#             # print(x.size())
#             x = self.layer2(x)
#             result['representation1'] = x
#             # print(x.size())
#             x = self.layer3(x)
#             result['representation2'] = x
#             # print(x.size())
#             x = self.avgpool(x)
#             x = torch.flatten(x, 1)
#             x = self.fc(x)
#             x = self.relu(x)
#             result['output'] = x
            
#         elif level == 1 :
#             x = self.layer2(x)
#             result = {'representation1':x}
            
#             x = self.layer3(x)
#             result['representation2'] = x
#             # print(x.size())
#             x = self.avgpool(x)
#             x = torch.flatten(x, 1)
#             x = self.fc(x)
#             x = self.relu(x)
#             result['output'] = x
            
#         elif level == 2:
#             x = self.layer3(x)
#             result = {'representation2':x}
#             x = self.avgpool(x)
#             x = torch.flatten(x, 1)
#             x = self.fc(x)
#             x = self.relu(x)
#             result['output'] = x
        
#         elif level == 3:
#             x = self.avgpool(x)
#             x = torch.flatten(x, 1)
#             x = self.fc(x)
#             x = self.relu(x)
#             result = {'output':x} 
#         else:
#             print("error")
#             exit()
#         return result
    
#     # def _forward_mlb(self,x,level = 0,return_feature=False):
#     #     if level <= 0:
#     #         out0 = self.layer1(x)
#     #     else:
#     #         out0 = x
#     #     if level <= 1:
#     #         out1 = self.layer2(out0)
#     #     else:
#     #         out1 = out0
#     #     if level <= 2:
#     #         out2 = self.layer3(out1)
#     #     else:
#     #         out2 = out1
#     #     if level <= 3:
#     #         x = self.avgpool(x)
#     #         x = torch.flatten(x, 1)
#     #         x = self.fc(x)
#     #         out3 = self.relu(x)
#     #     else:
#     #         out3 = out2
#     #     if return_feature == True:
#     #         return out0,out1,out2,out3
#     #     return out3
    

# def cnn1(**kwargs):
#     return CNN(block,[1,1,1],**kwargs)

# def cnn2(**kwargs):
#     return CNN(block,[1,1,2],**kwargs) 

# def cnn3(**kwargs):
#     return CNN(block,[1,2,2],**kwargs)

# def cnn4(**kwargs):
#     return CNN(block,[2,2,2],**kwargs)

# def cnn5(**kwargs):
#     return CNN(block,[3,3,3],**kwargs)


import torch
from torch import nn
import torch.nn.functional as F
class Flatten(nn.Module):
    def __init__(self):
        super(Flatten,self).__init__()
    def forward(self,x):
        shape = torch.prod(torch.tensor(x.shape[1:])).item()
        return x.reshape(-1,shape)
class CNNCifar100(nn.Module):
    def __init__(self, args):
        super(CNNCifar100, self).__init__()
        self.dataset = args.dataset
        # if self.dataset == 'widar':
        #     args.num_channels = 3
        #     self.reshape = nn.Sequential(
        #         nn.ConvTranspose2d(22,args.num_channels, 7, stride=1),
        #         nn.ReLU(),
        #         nn.ConvTranspose2d(args.num_channels, args.num_channels, kernel_size=7, stride=1),
        #         nn.ReLU()
        #     )

        self.conv1 = nn.Conv2d(args.num_channels, 16, 5)
        self.relu1 = nn.ReLU(True)
        self.pool1 = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(16, 32, 5)
        self.relu2 = nn.ReLU(True)
        self.pool2 = nn.MaxPool2d(2, 2)
        self.avgpool =  nn.AdaptiveAvgPool2d((4,4))
        self.flatten = Flatten()
        self.fc1 = nn.Linear(512, 256)
        self.relu3 = nn.ReLU(True)
        self.fc2 = nn.Linear(256, 256)
        self.relu4 = nn.ReLU(True)
        self.fc3 = nn.Linear(256, args.num_classes)

    def forward(self, x):
        # x = self.pool(F.relu(self.conv1(x)))
        # if self.dataset == 'widar':
        #     x = self.conv1(self.reshape(x))
        # else:
        #     x = self.conv1(x)
        x = self.conv1(x)
        x = self.relu1(x)
        x = self.pool1(x)
        # x = self.pool(F.relu(self.conv2(x)))
        x = self.conv2(x)
        x = self.relu2(x)
        x = self.pool2(x)
        x = self.avgpool(x)
        # x = self.avgpool(x)
        # print(x.shape)
        # x = x.view(-1, 16 * 5 * 5)
        x = self.flatten(x)
        x = self.fc1(x)
        x = self.relu3(x)
        x = self.fc2(x)
        x = self.relu4(x)
        x = self.fc3(x)
        result = {'output':x}
        return result
    

class CNNCifar(nn.Module):
    def __init__(self, args):
        super(CNNCifar, self).__init__()
        self.conv1 = nn.Conv2d(3, 16, 5)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(16, 32, 5)
        self.fc1 = nn.Linear(32 * 5 * 5, 240)
        self.fc2 = nn.Linear(240, 168)
        self.fc3 = nn.Linear(168, args.num_classes)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 32 * 5 * 5)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x