import pandas as pd
import numpy as np
import torch
import torchvision.datasets as datasets
import torch.nn as nn
from torch.autograd import Variable
from PIL import Image
import torchvision.transforms as transforms

train_data=pd.read_csv('train.csv').values
X_train, y_train = train_data[:, 1:] / 255, train_data[:, 0]
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
X_train, y_train = torch.from_numpy(X_train).float(), torch.from_numpy(y_train).long()
train_dataset = torch.utils.data.TensorDataset(X_train, y_train)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=16, shuffle=False)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


class cnn(nn.Module):
    def __init__(self):
        super(cnn, self).__init__()

        self.cnn1 = nn.Conv2d(in_channels=1, out_channels=8, kernel_size=3, padding=1, stride=1)

        self.batch_norm1 = nn.BatchNorm2d(8)

        self.relu = nn.ReLU()

        self.max_pool = nn.MaxPool2d(kernel_size=2)

        self.cnn2 = nn.Conv2d(in_channels=8, out_channels=32, kernel_size=5, padding=2, stride=1)

        self.batch_norm2 = nn.BatchNorm2d(32)

        self.fc1 = nn.Linear(1568, 600)

        self.dropout = nn.Dropout(0.5)

        self.fc2 = nn.Linear(600, 10)

    def forward(self, x):
        out = self.cnn1(x)
        out = self.batch_norm1(out)
        out = self.relu(out)
        out = self.max_pool(out)
        out = self.cnn2(out)
        out = self.batch_norm2(out)
        out = self.relu(out)
        out = self.max_pool(out)
        out = out.view(-1, 1568)
        out = self.fc1(out)
        out = self.relu(out)
        out = self.dropout(out)
        out = self.fc2(out)

        return out

if torch.cuda.is_available():
    model=cnn().cuda()
else:
    model=cnn()
loss_fn=nn.CrossEntropyLoss()

optimizer=torch.optim.SGD(model.parameters(),lr=0.01)

num_epochs = 25

train_loss = []
test_loss = []
train_accuracy = []
test_accuracy = []

for epoch in range(num_epochs):
    correct = 0

    iteration = 0

    iter_loss = 0

    model.train()

    for i, (inputs, labels) in enumerate(train_loader):

        if torch.cuda.is_available():
            inputs = inputs.cuda()
            labels = labels.cuda()

        outputs = model(inputs)

        loss = loss_fn(outputs, labels)

        iter_loss += loss
        optimizer.zero_grad()

        loss.backward()

        optimizer.step()

        _, predicted = torch.max(outputs, 1)

        correct += (predicted == labels).sum()

        iteration += 1
    train_loss.append(iter_loss / iteration)

    train_accuracy.append((100 * correct) / len(train_dataset))

    print('Epochs: {}/{},train_loss = {:.3f},training_accuracy = {:.3f}'.format(epoch + 1, num_epochs, train_loss[-1],
                                                                                train_accuracy[-1]))
