import torch
import torchvision.datasets as dsets
import torchvision.transforms as transforms


torch.manual_seed(777)

epochs=5
batch_size=64

cifar10_train=dsets.CIFAR10(root='vgg/data',train=True,transform=transforms.ToTensor())
cifar10_test=dsets.CIFAR10(root='vgg/data',train=False,transform=transforms.ToTensor())

data_loader=torch.utils.data.DataLoader(dataset=cifar10_train,batch_size=batch_size,shuffle=True)

device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

class Cnn(torch.nn.Module):
    def __init__(self):
        super(Cnn, self).__init__()
        self._build_net()

    def _build_net(self):

        self.layer1=torch.nn.Sequential(
            torch.nn.Conv2d(3,64,kernel_size=3,stride=1,padding=1),
            torch.nn.BatchNorm2d(64),
            torch.nn.ReLU(),
            torch.nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
            torch.nn.BatchNorm2d(64),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(kernel_size=(2,2),stride=2),
        )

        self.layer2 = torch.nn.Sequential(
            torch.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
            torch.nn.BatchNorm2d(128),
            torch.nn.ReLU(),
            torch.nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
            torch.nn.BatchNorm2d(128),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2),
        )
        self.layer3 = torch.nn.Sequential(
            torch.nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
            torch.nn.BatchNorm2d(256),
            torch.nn.ReLU(),
            torch.nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            torch.nn.BatchNorm2d(256),
            torch.nn.ReLU(),
            torch.nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            torch.nn.BatchNorm2d(256),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2),
        )
        self.layer4 = torch.nn.Sequential(
            torch.nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),
            torch.nn.BatchNorm2d(512),
            torch.nn.ReLU(),
            torch.nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            torch.nn.BatchNorm2d(512),
            torch.nn.ReLU(),
            torch.nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            torch.nn.BatchNorm2d(512),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2),
        )
        self.layer5 = torch.nn.Sequential(
            torch.nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            torch.nn.BatchNorm2d(512),
            torch.nn.ReLU(),
            torch.nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            torch.nn.BatchNorm2d(512),
            torch.nn.ReLU(),
            torch.nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            torch.nn.BatchNorm2d(512),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2),
        )
        self.fc=torch.nn.Flatten()
        self.fc1=torch.nn.Linear(1*1*512,4096,bias=True)
        self.layer6=torch.nn.Sequential(
            self.fc1,
            torch.nn.ReLU(),
            torch.nn.Dropout(0.5)
        )
        self.fc2 = torch.nn.Linear(4096, 1000, bias=True)
        self.layer7 = torch.nn.Sequential(
            self.fc2,
            torch.nn.ReLU(),
            torch.nn.Dropout(0.5)
        )
        self.fc3 = torch.nn.Linear(1000, 10, bias=True)
        self.criterion=torch.nn.CrossEntropyLoss()
        self.optimizer=torch.optim.Adam(self.parameters(),lr=0.0001)

    def forward(self,x):
        out=self.layer1(x)
        out=self.layer2(out)
        out=self.layer3(out)
        out=self.layer4(out)
        out=self.layer5(out)
        out=self.fc(out)
        out=self.fc1(out)
        out=self.fc2(out)
        out=self.fc3(out)
        return out

    def predict(self,x):
        self.eval()
        return self.forward(x)

    def get_accuracy(self,x,y):
        prediction=self.predict(x)
        corect_predict=(torch.max(prediction.data,1)[1]==y.data)
        self.accuracy=corect_predict.float().mean()
        return self.accuracy
    def train_model(self,x,y):
        self.train()
        self.optimizer.zero_grad()
        h=self.forward(x)
        self.cost=self.criterion(h,y)
        self.cost.backward()
        self.optimizer.step()
        return self.cost

model=Cnn().to(device)

for i in range(epochs):
    avg=0
    total_batch=len(cifar10_train)//batch_size
    for j,(batch_x,batch_y) in enumerate(data_loader):
        X=torch.autograd.Variable(batch_x).cuda()
        Y=torch.autograd.Variable(batch_y).cuda()

        cost=model.train_model(X,Y)

        avg+=cost.data/total_batch

        acc=model.get_accuracy(X,Y)

    print(i+1,avg.item(),acc.item())

# X_test=torch.autograd.Variable(cifar10_test.test_data.view(len(cifar10_test),3,32,32).float()).cuda()
# Y_test=torch.autograd.Variable(cifar10_test.test_labels.float()).cuda()
#
# print('acc',model.get_accuracy(X_test,Y_test))
