import os
import os.path as osp
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.fusion import fuse_conv_bn_weights
import torchvision
import torchvision.transforms as transforms
from torch.quantization import QuantStub, DeQuantStub
import numpy as np
from tqdm import tqdm


class NetMNIST(nn.Module):
    def __init__(self,
                 class_num,
                 use_batch_norm=True,
                 onnx_compatible=True,
                 qat=False,
                 export_to_edgetpu=False):
        super(NetMNIST, self).__init__()
        self.export_to_edgetpu = export_to_edgetpu
        ReLU = nn.ReLU if onnx_compatible else nn.ReLU6
        padding = 0 if export_to_edgetpu else 1
        if qat:
            self.quant = QuantStub()
            self.dequant = DeQuantStub()
        self.conv1 = nn.Conv2d(1, 32, 3, padding=padding, bias=False)  # (B, 32, 28, 28)
        if use_batch_norm:
            self.bn1 = nn.BatchNorm2d(32)
        self.relu1 = ReLU(inplace=True)
        self.conv2 = nn.Conv2d(32, 64, 3, padding=padding, bias=False)  # (B, 64, 28, 28)
        if use_batch_norm:
            self.bn2 = nn.BatchNorm2d(64)
        self.relu2 = ReLU(inplace=True)
        self.pool = nn.MaxPool2d(2, 2)  # (B, 64, 14, 14)
        self.conv3 = nn.Conv2d(64, 128, 14, bias=False)  # (B, 128, 1, 1)
        if use_batch_norm:
            self.bn3 = nn.BatchNorm2d(128)
        self.relu3 = ReLU(inplace=True)
        self.conv4 = nn.Conv2d(128, class_num, 1)  # (B, 10, 1, 1)

    def forward(self, x):
        if hasattr(self, 'quant'):
            x = self.quant(x)
        if self.export_to_edgetpu:
            x = x.permute(0, 3, 1, 2)
            x = x.permute(0, 2, 3, 1)
            x = F.pad(x, [0, 0, 1, 1, 1, 1])
            x = x.permute(0, 3, 1, 2)
        x = self.conv1(x)
        if hasattr(self, 'bn1'):
            x = self.bn1(x)
        x = self.relu1(x)
        if self.export_to_edgetpu:
            x = x.permute(0, 2, 3, 1)
            x = F.pad(x, [0, 0, 1, 1, 1, 1])
            x = x.permute(0, 3, 1, 2)
        x = self.conv2(x)
        if hasattr(self, 'bn2'):
            x = self.bn2(x)
        x = self.relu2(x)
        x = self.pool(x)
        x = self.conv3(x)
        if hasattr(self, 'bn3'):
            x = self.bn3(x)
        x = self.relu3(x)
        x = self.conv4(x)
        if hasattr(self, 'dequant'):
            x = self.dequant(x)
        return x


class NetCIFAR10(nn.Module):
    def __init__(self,
                 class_num,
                 use_batch_norm=True,
                 onnx_compatible=True,
                 qat=False,
                 export_to_edgetpu=False):
        super(NetCIFAR10, self).__init__()
        self.export_to_edgetpu = export_to_edgetpu
        ReLU = nn.ReLU if onnx_compatible else nn.ReLU6
        padding = 0 if export_to_edgetpu else 1
        if qat:
            self.quant = QuantStub()
            self.dequant = DeQuantStub()
        self.conv1 = nn.Conv2d(3, 32, 3, padding=padding, bias=False)  # (B, 32, 32, 32)
        if use_batch_norm:
            self.bn1 = nn.BatchNorm2d(32)
        self.relu1 = ReLU(inplace=True)
        self.pool1 = nn.MaxPool2d(2, 2)  # (B, 64, 16, 16)
        self.conv2 = nn.Conv2d(32, 64, 3, padding=padding, bias=False)  # (B, 64, 16, 16)
        if use_batch_norm:
            self.bn2 = nn.BatchNorm2d(64)
        self.relu2 = ReLU(inplace=True)
        self.pool2 = nn.MaxPool2d(2, 2)  # (B, 64, 8, 8)
        self.conv3 = nn.Conv2d(64, 128, 8, bias=False)  # (B, 128, 1, 1)
        if use_batch_norm:
            self.bn3 = nn.BatchNorm2d(128)
        self.relu3 = ReLU(inplace=True)
        self.conv4 = nn.Conv2d(128, class_num, 1)  # (B, 10, 1, 1)

    def forward(self, x):
        if hasattr(self, 'quant'):
            x = self.quant(x)
        if self.export_to_edgetpu:
            x = x.permute(0, 3, 1, 2)
            x = x.permute(0, 2, 3, 1)
            x = F.pad(x, [0, 0, 1, 1, 1, 1])
            x = x.permute(0, 3, 1, 2)
        x = self.conv1(x)
        if hasattr(self, 'bn1'):
            x = self.bn1(x)
        x = self.relu1(x)
        x = self.pool1(x)
        if self.export_to_edgetpu:
            x = x.permute(0, 2, 3, 1)
            x = F.pad(x, [0, 0, 1, 1, 1, 1])
            x = x.permute(0, 3, 1, 2)
        x = self.conv2(x)
        if hasattr(self, 'bn2'):
            x = self.bn2(x)
        x = self.relu2(x)
        x = self.pool2(x)
        x = self.conv3(x)
        if hasattr(self, 'bn3'):
            x = self.bn3(x)
        x = self.relu3(x)
        x = self.conv4(x)
        if hasattr(self, 'dequant'):
            x = self.dequant(x)
        return x


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--device', type=str, default='cpu')
    parser.add_argument('--dataset', type=str, choices=['mnist', 'cifar10'], default='mnist')
    parser.add_argument('--pth_path', type=str, default=osp.join('data', 'qat.pth'))
    parser.add_argument('--onnx_path', type=str, default=osp.join('data', 'qat.onnx'))
    args = parser.parse_args()

    if not osp.exists('data'):
        os.makedirs('data')

    device = args.device

    train_loader = None
    test_loader = None
    class_num = None
    net = None
    net_qat = None
    trace_tensor = None
    if args.dataset == 'mnist':
        transform = transforms.Compose(
            [transforms.ToTensor(),
             transforms.Normalize(0.5, 0.5)])
        train_dataset = torchvision.datasets.MNIST(
            root='./data',
            train=True,
            download=True,
            transform=transform)
        train_loader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=100,
            shuffle=True,
            num_workers=2)
        test_dataset = torchvision.datasets.MNIST(
            root='./data',
            train=False,
            download=True,
            transform=transform)
        test_loader = torch.utils.data.DataLoader(
            test_dataset,
            batch_size=1,
            shuffle=False,
            num_workers=0)
        net = NetMNIST(
            class_num=10,
            use_batch_norm=False,
            onnx_compatible=True,
            qat=False,
            export_to_edgetpu=True)
        net_qat = NetMNIST(
            class_num=10,
            use_batch_norm=True,
            onnx_compatible=False,
            qat=True,
            export_to_edgetpu=False)
        trace_tensor = torch.randn(1, 28, 28, 1)
    elif args.dataset == 'cifar10':
        transform = transforms.Compose(
            [transforms.ToTensor(),
             transforms.Normalize(0.5, 0.5)])
        train_dataset = torchvision.datasets.CIFAR10(
            root='data',
            train=True,
            download=True,
            transform=transform)
        train_loader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=100,
            shuffle=True,
            num_workers=2)
        test_dataset = torchvision.datasets.CIFAR10(
            root='data',
            train=False,
            download=True,
            transform=transform)
        test_loader = torch.utils.data.DataLoader(
            test_dataset,
            batch_size=1,
            shuffle=False,
            num_workers=0)
        net = NetCIFAR10(
            class_num=10,
            use_batch_norm=False,
            onnx_compatible=True,
            qat=False,
            export_to_edgetpu=True)
        net_qat = NetCIFAR10(
            class_num=10,
            use_batch_norm=True,
            onnx_compatible=False,
            qat=True,
            export_to_edgetpu=False)
        trace_tensor = torch.randn(1, 32, 32, 3)
    else:
        print(f'dataset({args.dataset}) is invalid')
        exit(-1)

    net_qat.qconfig = torch.quantization.get_default_qat_qconfig('fbgemm')
    torch.quantization.prepare_qat(net_qat, inplace=True)

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(net_qat.parameters(), lr=0.001, momentum=0.9)

    net_qat = net_qat.to(device)
    criterion = criterion.to(device)

    loss_list = []

    it = 0
    for epoch in range(3):
        for inputs, labels in tqdm(train_loader):
            it += 1

            inputs = inputs.to(device)
            labels = labels.to(device)

            optimizer.zero_grad()
            outputs = net_qat(inputs)
            outputs = outputs.reshape(-1, 10)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            loss_list.append(loss.item())
            if it % 100 == 0:
                print('[epoch={}, it={}] loss: {:.3f}'.format(epoch + 1, it, np.mean(loss_list)))
                loss_list = []

    net.conv1.weight, net.conv1.bias = fuse_conv_bn_weights(
        net_qat.conv1.weight,
        net_qat.conv1.bias,
        net_qat.bn1.running_mean,
        net_qat.bn1.running_var,
        net_qat.bn1.eps,
        net_qat.bn1.weight,
        net_qat.bn1.bias)
    net.conv2.weight, net.conv2.bias = fuse_conv_bn_weights(
        net_qat.conv2.weight,
        net_qat.conv2.bias,
        net_qat.bn2.running_mean,
        net_qat.bn2.running_var,
        net_qat.bn2.eps,
        net_qat.bn2.weight,
        net_qat.bn2.bias)
    net.conv3.weight, net.conv3.bias = fuse_conv_bn_weights(
        net_qat.conv3.weight,
        net_qat.conv3.bias,
        net_qat.bn3.running_mean,
        net_qat.bn3.running_var,
        net_qat.bn3.eps,
        net_qat.bn3.weight,
        net_qat.bn3.bias)
    net.conv4.weight = net_qat.conv4.weight
    net.conv4.bias = net_qat.conv4.bias

    net.cpu()
    net.eval()

    torch.save(net.state_dict(), args.pth_path)
    torch.onnx.export(
        model=net,
        args=trace_tensor,
        f=args.onnx_path,
        opset_version=9,
        input_names=['input'],
        output_names=['output'],
        do_constant_folding=True,
        enable_onnx_checker=False)


if __name__ == '__main__':
    main()
