import torch
import torch.nn as nn
import torch.nn.functional as F

class VGG16(nn.Module):
    def __init__(self,in_channels, num_classes):
        super(VGG16, self).__init__()
        self.pool = nn.MaxPool2d(kernel_size=(2,2),stride=2)
        
        self.conv1 = nn.Conv2d(in_channels,64,kernel_size=(3,3),padding=(1,1),bias=True)
        nn.init.kaiming_normal_(self.conv1.weight)
        nn.init.constant_(self.conv1.bias,0)
        self.conv2 = nn.Conv2d(64,64,kernel_size=(3,3),padding=(1,1),bias=True)
        nn.init.kaiming_normal_(self.conv2.weight)
        nn.init.constant_(self.conv2.bias,0)     
        
        self.conv3 = nn.Conv2d(64,128,kernel_size=(3,3),padding=(1,1),bias=True)
        nn.init.kaiming_normal_(self.conv3.weight)
        nn.init.constant_(self.conv3.bias,0)
        self.conv4 = nn.Conv2d(128,128,kernel_size=(3,3),padding=(1,1),bias=True)
        nn.init.kaiming_normal_(self.conv4.weight)
        nn.init.constant_(self.conv4.bias,0)
        
        self.conv5 = nn.Conv2d(128,256,kernel_size=(3,3),padding=(1,1),bias=True)
        nn.init.kaiming_normal_(self.conv5.weight)
        nn.init.constant_(self.conv5.bias,0)
        self.conv6 = nn.Conv2d(256,256,kernel_size=(3,3),padding=(1,1),bias=True)
        nn.init.kaiming_normal_(self.conv6.weight)
        nn.init.constant_(self.conv6.bias,0)
        self.conv7 = nn.Conv2d(256,256,kernel_size=(3,3),padding=(1,1),bias=True)
        nn.init.kaiming_normal_(self.conv7.weight)
        nn.init.constant_(self.conv7.bias,0)
        
        self.conv8 = nn.Conv2d(256,512,kernel_size=(3,3),padding=(1,1),bias=True)
        nn.init.kaiming_normal_(self.conv8.weight)
        nn.init.constant_(self.conv8.bias,0)
        self.conv9 = nn.Conv2d(512,512,kernel_size=(3,3),padding=(1,1),bias=True)
        nn.init.kaiming_normal_(self.conv9.weight)
        nn.init.constant_(self.conv9.bias,0)
        self.conv10 = nn.Conv2d(512,512,kernel_size=(3,3),padding=(1,1),bias=True)
        nn.init.kaiming_normal_(self.conv10.weight)
        nn.init.constant_(self.conv10.bias,0)
        
        self.conv11 = nn.Conv2d(512,512,kernel_size=(3,3),padding=(1,1),bias=True)
        nn.init.kaiming_normal_(self.conv11.weight)
        nn.init.constant_(self.conv11.bias,0)
        self.conv12 = nn.Conv2d(512,512,kernel_size=(3,3),padding=(1,1),bias=True)
        nn.init.kaiming_normal_(self.conv12.weight)
        nn.init.constant_(self.conv12.bias,0)
        self.conv13 = nn.Conv2d(512,512,kernel_size=(3,3),padding=(1,1),bias=True)
        nn.init.kaiming_normal_(self.conv13.weight)
        nn.init.constant_(self.conv13.bias,0)
        
        self.fc1 = nn.Linear(512, 4096)
        nn.init.kaiming_normal_(self.fc1.weight)
        self.fc2 = nn.Linear(4096, 1000)
        nn.init.kaiming_normal_(self.fc2.weight)
        self.fc3 = nn.Linear(1000, num_classes)
        nn.init.kaiming_normal_(self.fc3.weight)
        
    def forward(self,x):
        out1 = F.relu(self.conv1(x))
        out2 = F.relu(self.conv2(out1))
        out3 = self.pool(out2)
        out4 = F.relu(self.conv3(out3))
        out5 = F.relu(self.conv4(out4))
        out6 = self.pool(out5)
        out7 = F.relu(self.conv5(out6))
        out8 = F.relu(self.conv6(out7))
        out9 = F.relu(self.conv7(out8))
        out10 = self.pool(out9)
        out11 = F.relu(self.conv8(out10))
        out12 = F.relu(self.conv9(out11))
        out13 = F.relu(self.conv10(out12))
        out14 = self.pool(out13)
        out15 = F.relu(self.conv11(out14))
        out16 = F.relu(self.conv12(out15))
        out17 = F.relu(self.conv13(out16))
        print(out17.shape)
        out18 = self.pool(out17)
        print(out18.shape)
        print(out18.flatten().shape)
        out19 = F.relu(self.fc1(out18.flatten()))
        out20 = F.relu(self.fc2(out19))
        out21 = self.fc3(out20)
        scores = out21
        return scores