import cv2 as cv
import numpy as np
import torch.nn as nn
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
import os.path

DATA_DIR = 'deep-learning/MLP/data/'

class MLP(nn.Module):
    def __init__(self, name, input_size = 200, common_size = 10):
        super(MLP, self).__init__()
        self.name = name
        self.linear = nn.Sequential(
            nn.Linear(input_size, input_size // 2),
            nn.ReLU(inplace=True),
            nn.Linear(input_size // 2, input_size // 4),
            nn.ReLU(inplace=True),
            nn.Linear(input_size // 4, common_size)
        )
 
    def forward(self, x):
        x = self.linear(x)
        return x
        
        
    def save_model(self):
        torch.save(self.state_dict(), DATA_DIR + self.name + '.pkl')

    @staticmethod
    def load_model(name, input_size = 200, common_size = 10):
        net = MLP(name, input_size, common_size)
        if os.path.isfile(DATA_DIR + name + '.pkl'):
            net.load_state_dict(torch.load(DATA_DIR + name + '.pkl'))
        return net

class MNIST:
    def __init__(self, INPUT_SIZE = 784, EPOCHS = 1, BATCH_SIZE = 200, LR = 0.01):
        self.input_size = INPUT_SIZE
        self.epochs = EPOCHS
        self.batch_size = BATCH_SIZE

        #获取训练数据
        self.train_loader = torch.utils.data.DataLoader(
            datasets.MNIST(DATA_DIR, train=True, download=True,        
                        transform=transforms.Compose([
                            transforms.ToTensor(),
                            transforms.Normalize((0.1307,), (0.3081,))
                        ])),
            batch_size=BATCH_SIZE, shuffle=True)  
        #获取测试数据
        self.test_loader = torch.utils.data.DataLoader(
            datasets.MNIST(DATA_DIR, train=False, transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307,), (0.3081,))
            ])),
            batch_size=BATCH_SIZE, shuffle=True)

        self.net = MLP.load_model('mnist', INPUT_SIZE, 10)
        self.optimizer = torch.optim.SGD(self.net.parameters(), lr=LR)
        self.criteon = nn.CrossEntropyLoss()

    def train(self):
        for epoch in range(self.epochs):
            for batch_idx, (data, target) in enumerate(self.train_loader):
                data = data.view(-1, 28*28)          # 将二维的图片数据摊平[样本数,784]
                logits = self.net(data)             
                loss = self.criteon(logits, target)

                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
                if batch_idx % 100 == 0:             # 每100个batch输出一次信息
                    print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                        epoch, batch_idx * len(data), len(self.train_loader.dataset),
                            100. * batch_idx / len(self.train_loader), loss.item()))

    def test(self):
        test_loss = 0
        correct = 0              
        for data, target in self.test_loader:
            data = data.view(-1, 28 * 28)
            logits = self.net(data)
            test_loss += self.criteon(logits, target).item() 
            pred = logits.data.max(dim=1)[1]
            correct += pred.eq(target.data).sum()

        test_loss = test_loss / len(self.test_loader.dataset)
        print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
                test_loss, correct, len(self.test_loader.dataset),
                100. * correct / len(self.test_loader.dataset)))
    
    def save_model(self):
        self.net.save_model()

class DIGITS_CV:
    def __init__(self, INPUT_SIZE = 400, EPOCHS = 100, BATCH_SIZE = 100, LR = 0.01):
        self.input_size = INPUT_SIZE
        self.epochs = EPOCHS
        self.batch_size = BATCH_SIZE

        img = cv.imread(DATA_DIR + 'digits.png')
        gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
        # 现在我们将图像分割为5000个单元格，每个单元格为20x20
        cells = [np.hsplit(row,100) for row in np.vsplit(gray,50)]
        x = np.array(cells)
        # 现在我们准备train_data和test_data。
        self.train_data = x[:,:50].reshape(-1, INPUT_SIZE).astype(np.float32) # Size = (2500,400)
        self.test_data = x[:,50:100].reshape(-1, INPUT_SIZE).astype(np.float32) # Size = (2500,400)
        # 为训练和测试数据创建标签
        k = np.arange(10)
        self.train_labels = np.repeat(k,250)
        self.test_labels = self.train_labels.copy()
        self.batches = int(abs(len(self.train_data) / BATCH_SIZE))

        self.net = MLP.load_model('digits_cv', INPUT_SIZE, 10)
        self.optimizer = torch.optim.SGD(self.net.parameters(), lr=LR)
        self.criteon = nn.CrossEntropyLoss()
    

    def train(self):
        for epoch in range(self.epochs):
            for batch_idx in range(self.batches):
                inputs = np.zeros([self.batch_size, self.input_size])
                target = np.zeros([self.batch_size])
                inputs[0:self.batch_size,:] = self.train_data[batch_idx*self.batch_size:batch_idx*self.batch_size+self.batch_size,:]
                # data = np.exp(inputs)/sum(np.exp(inputs))  #log_softmax
                data = (inputs - inputs.mean()) / inputs.std() # 标准化数据的公式: (数据值 - 平均数) / 标准差,  这里不能执行归一化，会丢失临近的数据 #log_softmax

                target[0:self.batch_size] = self.train_labels[batch_idx*self.batch_size:batch_idx*self.batch_size+self.batch_size]
                target = torch.tensor(target, dtype=torch.long)
                logits = self.net(torch.tensor(data, dtype=torch.float))   
                loss = self.criteon(logits, target)

                pred = logits.data.max(dim=1)[1]
                # print(target, pred)
                
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
                if batch_idx*self.batch_size % 100 == 0: 
                    print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                        epoch, batch_idx*self.batch_size * len(data), len(self.train_data),
                            100. * batch_idx*self.batch_size / len(self.train_data), loss.item()))

    def test(self):
        test_loss = 0
        correct = 0              
        for batch_idx in range(self.batches):
            inputs = np.zeros([self.batch_size, self.input_size])
            target = np.zeros([self.batch_size])
            inputs[0:self.batch_size,:] = self.test_data[batch_idx*self.batch_size:batch_idx*self.batch_size+self.batch_size,:]
            # data = np.exp(inputs)/sum(np.exp(inputs))  #log_softmax
            data = (inputs - inputs.mean()) / inputs.std() # 标准化数据的公式: (数据值 - 平均数) / 标准差,  这里不能执行归一化，会丢失临近的数据 #log_softmax

            target[0:self.batch_size] = self.test_labels[batch_idx*self.batch_size:batch_idx*self.batch_size+self.batch_size]
            target = torch.tensor(target, dtype=torch.long)

            logits = self.net(torch.tensor(data, dtype=torch.float))   
            test_loss += self.criteon(logits, target).item() 
            pred = logits.data.max(dim=1)[1]
            correct += pred.eq(target.data).sum()

        test_loss = test_loss / len(self.test_data)
        print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
                test_loss, correct, len(self.test_data),
                100. * correct / len(self.test_data)))

    def save_model(self):
        self.net.save_model()

mnist = MNIST()
mnist.train()
mnist.test()
mnist.save_model()

# digits_cv = DIGITS_CV(400, 10, 100)
# digits_cv.train()
# digits_cv.test()
# digits_cv.save_model()
