#design a CNN-based network, using MNIST data to train a hand-written number AI model

import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import pandas as pd
import numpy as np

class ConvNet(nn.Module):
    #10 classifications
    def __init__(self,num_classes=10):
        super(ConvNet,self).__init__()

        self.layer1 = nn.Sequential(
            nn.Conv2d(1,16,kernel_size=5,stride=1,padding=2),
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2,stride=2)
        )

        self.layer2 = nn.Sequential(
            nn.Conv2d(16,32,kernel_size=5,stride=1,padding=2),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2,stride=2)
        )

        self.fc = nn.Sequential(
            nn.Linear(7*7*32,28*28),
            #nn.ReLU(),
            nn.LeakyReLU(0.02),

            nn.Linear(28*28,num_classes),
            #nn.ReLU()
            nn.LeakyReLU(0.02)
        )


        #create cost function
        self.loss_function = nn.CrossEntropyLoss()

        #creating optimiser -- using Adam
        self.optimiser = torch.optim.Adam(self.parameters(),lr=0.001)

        #define training progress data
        self.counter = 0
        self.progress = []

    def forward(self,x):
        out = self.layer1(x)
        out = self.layer2(out)
        out = out.reshape(out.size(0),-1)
        out = self.fc(out)
        return out
    
    def train(self,inputs,targets):
        #forward pass
        outputs = self.forward(inputs)[0]

        #calculate cost
        loss = self.loss_function(outputs,targets)

        #zero grad, backward propagation, weight updating
        self.optimiser.zero_grad()
        loss.backward()
        self.optimiser.step()

        #load training progress
        self.counter += 1
        if (self.counter % 10 == 0):
            self.progress.append(loss.item())

            if (self.counter % 1000 == 0):
                print("counter = {0}, loss = {1}".format(self.counter,loss))

    def plot_progress(self):
        if (len(self.progress)):
            df = pd.DataFrame(self.progress,columns=['loss'])
            df.plot(ylim=(0,1.0),figsize=(16,8),alpha=0.1,marker='',grid=True,yticks=(0,0.25,0.5))

def train():
    #train
    epochs = 5
    num_classes = 10
    learning_rate = 0.001

    #load data
    df = pd.read_csv('./mnist_data/mnist_train.csv',header=None)

    #create network
    cnn_net = ConvNet(num_classes)

    #train
    for epoch in range(epochs):
        print("training epoch {0} of {1}".format(epoch+1,epochs))

        num_rows = len(df)
        for i in range(num_rows):
            data = df.iloc[i]

            #acquire label
            label = data[0]
            target_tensor = torch.zeros((10))
            target_tensor[label] = 1.0

            #prepare pictures for convolution -- 1 batch, 1 channel, 28*28
            image_data_tensor = torch.zeros(1,1,28,28)
            image_data_tensor[0,0] = torch.FloatTensor(data[1:].values.reshape(28,28))/255.0

            cnn_net.train(image_data_tensor,target_tensor)

    torch.save(cnn_net,'cnn_mnist_parameters.pt')
    cnn_net.plot_progress()

def test():
    # load net parameters
    net = torch.load('cnn_mnist_parameters.pt')
    # load test data
    df = pd.read_csv('./mnist_data/mnist_test.csv',header=None)
    err = 0
    
    for i in range(1000):
        data = df.iloc[i]
        label = data[0]
        image_data_tensor = torch.zeros(1,1,28,28)
        image_data_tensor[0,0] = torch.FloatTensor(data[1:].values.reshape(28,28))/255.0
        # test 
        output = net(image_data_tensor)
        output_np = output.detach().numpy()
        output_label = np.argmax(output_np)
        print(output_np)
        print(output_label)
        if output_label != label:
            err += 1
    print(f'error={err}')

#train()
test()