import mindspore
from mindspore import nn
from mindspore import ops


transform = mindspore.dataset.transforms.Compose([mindspore.dataset.vision.ToTensor(), mindspore.dataset.vision.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), mindspore.dataset.vision.Resize((224, 224))])
training_data = mindspore.dataset.Cifar10Dataset(dataset_dir='data', usage='train').map(operations=transform)
testing_data = mindspore.dataset.Cifar10Dataset(dataset_dir='data', usage='test').map(operations=transform)
batch_size = 64
train_data = training_data.batch(batch_size=batch_size).shuffle(buffer_size=4).batch(drop_remainder=True)
test_data = testing_data.batch(batch_size=batch_size).shuffle(buffer_size=4).batch(drop_remainder=True)


class Bottleneck(mindspore.nn.Cell):

    def __init__(self, in_channels, out_channels, stride=[1, 1, 1], padding=[0, 1, 0], first=False) -> None:
        super(Bottleneck, self).__init__()
        self.bottleneck = mindspore.nn.SequentialCell(mindspore.nn.Conv2d(in_channels, out_channels, 1, stride[0], pad_mode='pad', padding=padding[0], has_bias=False), mindspore.nn.BatchNorm2d(out_channels), mindspore.nn.ReLU(), mindspore.nn.Conv2d(out_channels, out_channels, 3, stride[1], pad_mode='pad', padding=padding[1], has_bias=False), mindspore.nn.BatchNorm2d(out_channels), mindspore.nn.ReLU(), mindspore.nn.Conv2d(out_channels, out_channels * 4, 1, stride[2], pad_mode='pad', padding=padding[2], has_bias=False), mindspore.nn.BatchNorm2d(out_channels * 4))
        self.shortcut = mindspore.nn.SequentialCell()
        if first:
            self.shortcut = mindspore.nn.SequentialCell(mindspore.nn.Conv2d(in_channels, out_channels * 4, 1, stride[1], pad_mode='pad', has_bias=False), mindspore.nn.BatchNorm2d(out_channels * 4))

    def construct(self, x):
        out = self.bottleneck(x)
        out += self.shortcut(x)
        out = mindspore.ops.ReLU()
        return out


class ResNet50(mindspore.nn.Cell):

    def __init__(self, Bottleneck, num_classes=10) -> None:
        super(ResNet50, self).__init__()
        self.in_channels = 64
        self.conv1 = mindspore.nn.SequentialCell(mindspore.nn.Conv2d(3, 64, 7, 2, pad_mode='pad', padding=3, has_bias=False), mindspore.nn.BatchNorm2d(64), mindspore.nn.MaxPool2d(3, 2))
        self.conv2 = self._make_layer(Bottleneck, 64, [[1, 1, 1]] * 3, [[0, 1, 0]] * 3)
        self.conv3 = self._make_layer(Bottleneck, 128, [[1, 2, 1]] + [[1, 1, 1]] * 3, [[0, 1, 0]] * 4)
        self.conv4 = self._make_layer(Bottleneck, 256, [[1, 2, 1]] + [[1, 1, 1]] * 5, [[0, 1, 0]] * 6)
        self.conv5 = self._make_layer(Bottleneck, 512, [[1, 2, 1]] + [[1, 1, 1]] * 2, [[0, 1, 0]] * 3)
        self.avgpool = mindspore.ops.AdaptiveAvgPool2D((1, 1))
        self.fc = mindspore.nn.Dense(2048, num_classes)

    def _make_layer(self, block, out_channels, strides, paddings):
        layers = []
        flag = True
        for i in range(0, len(strides)):
            layers.append(block(self.in_channels, out_channels, strides[i], paddings[i], first=flag))
            flag = False
            self.in_channels = out_channels * 4
        return mindspore.nn.SequentialCell(*layers)

    def construct(self, x):
        out = self.conv1(x)
        out = self.conv2(out)
        out = self.conv3(out)
        out = self.conv4(out)
        out = self.conv5(out)
        out = self.avgpool(out)
        out = out.reshape(x.shape[0], -1)
        out = self.fc(out)
        return out


def train_loop(model, dataset, loss_fn, optimizer):
    def forward_fn(data, label):
        logits = model(data)
        loss = loss_fn(logits, label)
        return loss, logits

    grad_fn = ops.value_and_grad(forward_fn, None, optimizer.parameters, has_aux=True)

    def train_step(data, label):
        (loss, _), grads = grad_fn(data, label)
        loss = ops.depend(loss, optimizer(grads))
        return loss

    size = dataset.get_dataset_size()
    model.set_train()
    for batch, (data, label) in enumerate(dataset.create_tuple_iterator()):
        loss = train_step(data, label)

        if batch % 100 == 0:
            loss, current = loss.asnumpy(), batch
            print(f"loss: {loss:>7f}  [{current:>3d}/{size:>3d}]")


def test_loop(model, dataset, loss_fn):
    num_batches = dataset.get_dataset_size()
    model.set_train(False)
    total, test_loss, correct = 0, 0, 0
    for data, label in dataset.create_tuple_iterator():
        pred = model(data)
        total += len(data)
        test_loss += loss_fn(pred, label).asnumpy()
        correct += (pred.argmax(1) == label).asnumpy().sum()
    test_loss /= num_batches
    correct /= total
    print(f"Test: \n Accuracy: {(100 * correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")


epochs = 10
learning_rate = 0.01
model = ResNet50(Bottleneck)
loss_fn = nn.CrossEntropyLoss()
optimizer = nn.SGD(model.trainable_params(), learning_rate=learning_rate)


for epoch in range(epochs):
    train_loop(model, train_data, loss_fn, optimizer)
    test_loop(model, test_data, loss_fn)

