# import torch.nn as nn
# import torch.nn.functional as F


# class LinearBottleNeck(nn.Module):
#     def __init__(self, in_channels, out_channels, stride, t):
#         super(LinearBottleNeck, self).__init__()

#         self.residual = nn.Sequential(
#             nn.Conv2d(in_channels, in_channels * t, 1),
#             nn.BatchNorm2d(in_channels * t),
#             nn.ReLU6(inplace=True),

#             nn.Conv2d(in_channels * t, in_channels * t, 3, stride=stride, padding=1, groups=in_channels * t),
#             nn.BatchNorm2d(in_channels * t),
#             nn.ReLU6(inplace=True),

#             nn.Conv2d(in_channels * t, out_channels, 1),
#             nn.BatchNorm2d(out_channels)
#         )

#         self.stride = stride
#         self.in_channels = in_channels
#         self.out_channels = out_channels

#     def forward(self, x):
#         residual = self.residual(x)

#         if self.stride == 1 and self.in_channels == self.out_channels:
#             residual += x

#         return residual


# class MobileNetV2(nn.Module):
#     """
#         MobileMetV2 implementation
#     """
#     def __init__(self, num_classes = 10):
#         super(MobileNetV2, self).__init__()
#         self.pre = nn.Sequential(
#             nn.Conv2d(3, 32, 3, padding=1),
#             nn.BatchNorm2d(32),
#             nn.ReLU6(inplace=True)
#         )

#         self.stage1 = LinearBottleNeck(32, 16, 1, 1)
#         self.stage2 = self._make_stage(2, 16, 24, 2, 6)
#         self.stage3 = self._make_stage(3, 24, 32, 2, 6)
#         ##########################################
#         self.stage4 = self._make_stage(4, 32, 64, 2, 6)
#         self.stage5 = self._make_stage(3, 64, 96, 1, 6)
#         self.stage6 = self._make_stage(3, 96, 160, 2, 6)
#         self.stage7 = LinearBottleNeck(160, 320, 1, 6)

#         self.conv1 = nn.Sequential(
#             nn.Conv2d(320, 1280, 1),
#             nn.BatchNorm2d(1280),
#             nn.ReLU6(inplace=True)
#         )
#         self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
#         self.conv2 = nn.Conv2d(1280, num_classes, 1)

#     def _make_stage(self, n, in_channels, out_channels, stride, t):
#         layers = [LinearBottleNeck(in_channels, out_channels, stride, t)]

#         while n - 1:
#             layers.append(LinearBottleNeck(out_channels, out_channels, 1, t))
#             n -= 1

#         return nn.Sequential(*layers)

#     def forward(self, x):

#         x = self.pre(x)
#         x = self.stage1(x)
#         x = self.stage2(x)
#         x = self.stage3(x)
#         x = self.stage4(x)
#         x = self.stage5(x)
#         x = self.stage6(x)
#         x = self.stage7(x)
#         x = self.conv1(x)
#         # ?
#         x = self.avgpool(x)
#         # x = F.adaptive_max_pool2d(x, 1)
#         result = {'representation': x.view(x.size(0), -1)}
#         x = self.conv2(x)
#         x = x.view(x.size(0), -1)
#         result['output'] = x
#         return result

import torch.nn as nn
import torch.nn.functional as F
import torch

class Flatten(nn.Module):
    def __init__(self):
        super(Flatten,self).__init__()
    def forward(self,x):
        shape = torch.prod(torch.tensor(x.shape[1:])).item()
        return x.reshape(-1,shape)
class bottleneck(nn.Module):
    def __init__(self, inchannels, outchannels, stride, expansion):
        super().__init__()
        self.inchannels = inchannels
        self.outchannels = outchannels
        self.stride = stride

        self.residual = nn.Sequential(
            nn.Conv2d(in_channels=inchannels, out_channels=expansion*inchannels, kernel_size=1),
            nn.BatchNorm2d(expansion*inchannels),
            nn.ReLU6(inplace=True),

            nn.Conv2d(in_channels=inchannels*expansion, out_channels=inchannels*expansion, kernel_size=3, padding=1,
                      groups=inchannels*expansion, stride=stride),
            nn.BatchNorm2d(expansion*inchannels),
            nn.ReLU6(inplace=True),

            nn.Conv2d(in_channels=expansion * inchannels, out_channels=outchannels, kernel_size=1),
            nn.BatchNorm2d(outchannels)
        )

    def forward(self, x):
        out = self.residual(x)

        if self.inchannels == self.outchannels and self.stride == 1:
            out += x
        return out
class MobileNetV2(nn.Module):
    def __init__(self, args, alpha=1):
        super().__init__()

        self.dataset = args.dataset
        # if self.dataset == 'widar':
        #     args.num_channels = 3
        #     self.reshape = nn.Sequential(
        #         nn.ConvTranspose2d(22,args.num_channels, 7, stride=1),
        #         nn.ReLU(),
        #         nn.ConvTranspose2d(args.num_channels, args.num_channels, kernel_size=7, stride=1),
        #         nn.ReLU()
        #     )


        self.Conv1 = nn.Sequential(
            nn.Conv2d(in_channels=args.num_channels, out_channels=int(alpha*32), kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU6(inplace=True)
        )

        self.stage1 = bottleneck(int(alpha*32), 16, 1, 1)
        self.stage2 = self.make_layer(int(alpha*16), 6, int(alpha*24), 2, 2)
        self.stage3 = self.make_layer(int(alpha*24), 6, int(alpha*32), 3, 2)

        self.stage4 = self.make_layer(int(alpha*32), 6, int(alpha*64), 4, 2)
        self.stage5 = self.make_layer(int(alpha*64), 6, int(alpha*96), 3, 1)
        self.stage6 = self.make_layer(int(alpha*96), 6, int(alpha*160), 3, 1)
        self.stage7 = self.make_layer(int(alpha*160), 6, int(alpha*320), 1, 1)

        self.Conv2 = nn.Sequential(
            nn.Conv2d(in_channels=int(alpha*320), out_channels=1280, kernel_size=1),
            nn.BatchNorm2d(1280),
            nn.ReLU6(inplace=True)
        )

        self.avgpool = nn.AdaptiveAvgPool2d(1)
        self.drop = nn.Dropout(0.2)
        self.Conv3 = nn.Conv2d(in_channels=1280, out_channels=args.num_classes, kernel_size=1)
        self.flatten = Flatten()
    def make_layer(self, inchannels, t, outchannels, n, s):
        layer = []
        layer.append(bottleneck(inchannels, outchannels, s, t))
        n = n - 1
        while n:
            layer.append(bottleneck(outchannels, outchannels, 1, t))
            n -= 1
        return nn.Sequential(*layer)

    def forward(self, x):
        # if self.dataset == 'widar':
        #     out = self.Conv1(self.reshape(x))
        # else:
        #     out = self.Conv1(x)
        out = self.Conv1(x)
        out = self.stage1(out)
        out = self.stage2(out)
        out = self.stage3(out)
        out = self.stage4(out)
        out = self.stage5(out)
        out = self.stage6(out)
        out = self.stage7(out)
        out = self.Conv2(out)
        out = self.avgpool(out)
        out = self.drop(out)
        out = self.Conv3(out)
        # out = out.view(out.size(0), -1)
        out = self.flatten(out)
        result = {'output':out}
        return result
 
def mobilenetv2(args, alpha=1):
    return MobileNetV2(args, 1)