import numpy as np
import mindspore as ms
from mindspore import nn, dataset, ops
from utils import MySets, MAPE, split_set
from mindvision.engine.callback import ValAccMonitor


class Net(nn.Cell):
    def __init__(self, depth=4, hidden=16, auto_prefix=True, flags=None):
        super().__init__(auto_prefix, flags)
        self.depth = depth
        self.hidden = hidden

        self.rnn = nn.LSTM(input_size=1, hidden_size=self.hidden,
                           num_layers=1, batch_first=True)

        self.norm = nn.BatchNorm1d(self.depth * hidden)
        self.fc = nn.Dense(self.depth*hidden, 16)

        self.act = nn.ReLU()
        self.norm1 = nn.BatchNorm1d(16)
        self.output = nn.Dense(16, 1)

    def construct(self, *inputs):
        o, _ = self.rnn(*inputs)
        o = self.norm(o.reshape(-1, self.hidden*self.depth))

        o = self.act(self.fc(o))
        o = self.norm1(o)
        o = self.output(o)
        return o


net = Net()

loss_fn = nn.MSELoss()
optims = nn.optim.SGD(net.trainable_params(), 1e-3, 0.9)

# eval_net = nn.WithEvalCell(net, loss_fn)


model = ms.Model(net, loss_fn, optims, metrics={
                 "MAPE": MAPE(), "MAE": nn.MAE()})



if __name__ == "__main__":
    train, test = split_set()
    sets = {"train": dataset.GeneratorDataset(train, ['x', 'y'], shuffle=True).batch(256),
            "test": dataset.GeneratorDataset(test, ['x', 'y']).batch(512)}

    model.train(10, sets['train'], callbacks=[ms.LossMonitor(
        10), ValAccMonitor(model, sets['test'], 1, metric_name='MAE')])
    # res = model.eval(sets['test'],callbacks=ms.LossMonitor(5))
    # print(res)
