import cv2 as cv
import numpy as np
import torch.nn as nn
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
import os.path
from matplotlib import pyplot as plt

DATA_DIR = 'deep-learning/CNN/data/'

class CNNnet(nn.Module):
    def __init__(self, name, common_size = 10):
        super(CNNnet,self).__init__()
        self.name = name
        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channels=1,
                            out_channels=16,
                            kernel_size=3,
                            stride=2,
                            padding=1),
            nn.BatchNorm2d(16),
            nn.ReLU()
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(16,32,3,2,1),
            nn.BatchNorm2d(32),
            nn.ReLU()
        )
        self.conv3 = nn.Sequential(
            nn.Conv2d(32,64,3,2,1),
            nn.BatchNorm2d(64),
            nn.ReLU()
        )
        self.conv4 = nn.Sequential(
            nn.Conv2d(64,64,2,2,0),
            nn.BatchNorm2d(64),
            nn.ReLU()
        )
        self.mlp1 = nn.Linear(2*2*64, 100)
        self.mlp2 = nn.Linear(100, common_size)

    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.conv3(x)
        x = self.conv4(x)
        x = self.mlp1(x.view(x.size(0),-1))
        x = self.mlp2(x)
        return x 
        
    def save_model(self):
        torch.save(self.state_dict(), DATA_DIR + self.name + '.pkl')

    @staticmethod
    def load_model(name, common_size = 10):
        net = CNNnet(name, common_size)
        if os.path.isfile(DATA_DIR + name + '.pkl'):
            net.load_state_dict(torch.load(DATA_DIR + name + '.pkl'))
        return net

class MNIST:
    def __init__(self, EPOCHS = 10, BATCH_SIZE = 200, LR = 0.001):
        self.epochs = EPOCHS
        self.batch_size = BATCH_SIZE

        #获取训练数据
        self.train_loader = torch.utils.data.DataLoader(
            datasets.MNIST(DATA_DIR, train=True, download=True,        
                        transform=transforms.Compose([
                            transforms.ToTensor(),
                            transforms.Normalize((0.1307,), (0.3081,))
                        ])),
            batch_size=BATCH_SIZE, shuffle=True)  
        #获取测试数据
        self.test_loader = torch.utils.data.DataLoader(
            datasets.MNIST(DATA_DIR, train=False, transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307,), (0.3081,))
            ])),
            batch_size=BATCH_SIZE, shuffle=True)

        self.net = CNNnet.load_model('mnist', 10)
        self.optimizer = torch.optim.Adam(self.net.parameters(), lr=LR)
        self.criteon = nn.CrossEntropyLoss()

    def train(self):
        for epoch in range(self.epochs):
            for batch_idx, (data, target) in enumerate(self.train_loader):
                logits = self.net(data)              # torch.Size([128, 1, 28, 28])
                loss = self.criteon(logits, target)  # target = torch.Size([128])

                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
                if batch_idx % 100 == 0:             # 每100个batch输出一次信息
                    print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                        epoch, batch_idx * len(data), len(self.train_loader.dataset),
                            100. * batch_idx / len(self.train_loader), loss.item()))

    def test(self):
        test_loss = 0
        correct = 0              
        for data, target in self.test_loader:
            logits = self.net(data)
            test_loss += self.criteon(logits, target).item() 
            pred = logits.data.max(dim=1)[1]
            correct += pred.eq(target.data).sum()

        test_loss = test_loss / len(self.test_loader.dataset)
        print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
                test_loss, correct, len(self.test_loader.dataset),
                100. * correct / len(self.test_loader.dataset)))
    
    def save_model(self):
        self.net.save_model()

class DIGITS_CV:
    def __init__(self, MLP = 256, EPOCHS = 20, BATCH_SIZE = 100, LR = 0.001):
        self.epochs = EPOCHS
        self.batch_size = BATCH_SIZE
        self.input_size = 28

        img = cv.imread(DATA_DIR + 'digits.png')
        gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
        # 现在我们将图像分割为5000个单元格，每个单元格为20x20
        cells = [np.hsplit(row,100) for row in np.vsplit(gray,50)]
        x = np.array(cells)
        x = np.pad(x,((0,0),(0,0),(4,4),(4,4)),'constant',constant_values=(0,0))  # 将每个单元格填充8位0， 变为 28*28
        
        self.train_data = x[:,:50].reshape(-1, self.input_size, self.input_size).astype(np.float32) # Size = (2500, 28, 28)
        self.test_data = x[:,50:100].reshape(-1, self.input_size, self.input_size).astype(np.float32)
        # 为训练和测试数据创建标签
        k = np.arange(10)
        self.train_labels = np.repeat(k,250)
        self.test_labels = self.train_labels.copy()
        self.batches = int(abs(len(self.train_data) / BATCH_SIZE))

        self.net = CNNnet.load_model('digits_cv', 10)
        self.optimizer = torch.optim.Adam(self.net.parameters(), lr=LR)
        self.criteon = nn.CrossEntropyLoss()
    
    def train(self):
        for epoch in range(self.epochs):
            for batch_idx in range(self.batches):
                inputs = np.zeros([self.batch_size, self.input_size, self.input_size])
                target = np.zeros([self.batch_size])
                inputs[0:self.batch_size,:] = self.train_data[batch_idx*self.batch_size:batch_idx*self.batch_size+self.batch_size,:]
                # data = np.exp(inputs)/sum(np.exp(inputs))  #log_softmax
                data = (inputs - inputs.mean()) / inputs.std() # 标准化数据的公式: (数据值 - 平均数) / 标准差,  这里不能执行归一化，会丢失临近的数据 #log_softmax
                data = torch.tensor(data, dtype=torch.float).unsqueeze(1)  # [100, 28, 28] => torch.Size([100, 1, 28, 28])
                target[0:self.batch_size] = self.train_labels[batch_idx*self.batch_size:batch_idx*self.batch_size+self.batch_size]
                target = torch.tensor(target, dtype=torch.long)
                logits = self.net(data)   
                loss = self.criteon(logits, target)

                pred = logits.data.max(dim=1)[1]
                # print(target, pred)
                
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
                if batch_idx*self.batch_size % 100 == 0: 
                    print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                        epoch, batch_idx*self.batch_size, len(self.train_data),
                            100.0 * batch_idx*self.batch_size / len(self.train_data), loss.item()))

    def test(self):
        test_loss = 0
        correct = 0              
        for batch_idx in range(self.batches):
            
            inputs = np.zeros([self.batch_size, self.input_size, self.input_size])
            target = np.zeros([self.batch_size])
            inputs[0:self.batch_size,:] = self.test_data[batch_idx*self.batch_size:batch_idx*self.batch_size+self.batch_size,:]
            data = (inputs - inputs.mean()) / inputs.std()
            data = torch.tensor(data, dtype=torch.float).unsqueeze(1) 
            target[0:self.batch_size] = self.test_labels[batch_idx*self.batch_size:batch_idx*self.batch_size+self.batch_size]
            target = torch.tensor(target, dtype=torch.long)

            logits = self.net(data)   
            test_loss += self.criteon(logits, target).item() 
            pred = logits.data.max(dim=1)[1]
            correct += pred.eq(target.data).sum()

        test_loss = test_loss / len(self.test_data)
        print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
                test_loss, correct, len(self.test_data),
                100. * correct / len(self.test_data)))

    def save_model(self):
        self.net.save_model()

mnist = MNIST()
mnist.train()
mnist.test()
mnist.save_model()

# digits_cv = DIGITS_CV()
# digits_cv.train()
# digits_cv.test()
# digits_cv.save_model()
