import mindspore as ms
import mindspore.ops as msops
import mindspore.nn as msnn

import os
import sys
import traceback
from functools import partial, reduce
from typing import *

import sty
import tqdm
import numpy as np

ms.set_context(mode=ms.context.PYNATIVE_MODE, device_target="CPU")

class Model(msnn.Cell):
    def __init__(self):
        super().__init__()
        self.conv1 = msnn.Conv2d(3, 16, 3, stride=1, pad_mode="same")
        self.conv1.recompute()
        self.batchnorm1 = msnn.BatchNorm2d(16)

        self.conv2 = msnn.Conv2d(16, 32, 3, stride=1, pad_mode="same")

        self.conv2.recompute()
        self.batchnorm2 = msnn.BatchNorm2d(32)
        self.conv3 = msnn.Conv2d(32, 1, 1, stride=1, pad_mode="valid")
        self.conv3.recompute()
        self.batchnorm3 = msnn.BatchNorm2d(1)

        self.relu = msops.ReLU()
    def construct(self, x):
        x = self.conv1(x)
        x = self.batchnorm1(x)
        x = self.relu(x)

        x = self.conv2(x)
        x = self.batchnorm2(x)
        x = self.relu(x)

        x = self.conv3(x)
        x = self.batchnorm3(x)
        x = self.relu(x)
        return x


def loss_fn(y_pred, y_true):
    return ms.numpy.mean(ms.numpy.square(y_pred - y_true))


if __name__ == "__main__":
    print(__file__)

    model = Model()
    optimizer = msnn.Adam(model.trainable_params(), learning_rate=0.001)
    XXs = np.random.randn(1000, 3, 32, 32).astype(np.float32)
    YYs = np.random.randn(1000, 1, 32, 32).astype(np.float32)
    ds = ms.dataset.NumpySlicesDataset({"data": XXs, "label": YYs}, shuffle=False)
    ds = ds.batch(2)

    mm = ms.train.Model(
        network=model,
        loss_fn=msnn.loss.MSELoss(),
        optimizer=optimizer,
    )
    mm.train(train_dataset=ds, epoch=10, initial_epoch=1)

    # def step(xx, y_true):
    #     y_pred = model(xx)
    #     loss = loss_fn(y_pred, y_true)
    #     return loss, y_pred

    # grad_fn = ms.grad(step, grad_position=None, weights=model.trainable_params(), has_aux=False)
    #
    # xx = ms.Tensor(np.random.randn(1, 3, 32, 32).astype(np.float32))
    # y_true = ms.Tensor(np.random.randn(1, 1, 32, 32).astype(np.float32))
    # gradients, y_pred = grad_fn()
    # optimizer()




