import mindspore as ms
from mindspore import nn, dataset, ops
from mindvision.engine.callback import LossMonitor
import numpy as np

ms.context.set_context(device_target='CPU',mode = ms.PYNATIVE_MODE)

class Cnn_Rnn(nn.Cell):
    def __init__(self,depth=4, hidden=64,auto_prefix=True, flags=None):
        super().__init__(auto_prefix, flags)
        self.depth = depth
        self.hidden = hidden
        self.kers = [1,3,5,7]

        self.rnn = nn.LSTM(1,hidden, 1, batch_first=True)

        self.convs = nn.CellList([nn.SequentialCell(
            nn.Conv1d(hidden, 16, ker, padding=int((ker - 1)/2),pad_mode='pad'),
            nn.MaxPool1d(self.depth),
            nn.ReLU()) 
            for ker in self.kers])

        self.fc = nn.SequentialCell(
            nn.BatchNorm1d(16 * len(self.kers)),
            nn.Dense(16 * len(self.kers), 16 * len(self.kers)),
            nn.ReLU()
        )
        self.fc1 = nn.SequentialCell(
            nn.BatchNorm1d(16 * len(self.kers)),
            nn.Dense(16 * len(self.kers), 16),
            nn.ReLU()
        )

        self.output = nn.Dense(16,1)
        


    def construct(self, *inputs, **kwargs):
        x,_ = self.rnn(*inputs)
        x = x.transpose(0,2,1)
        
        
        x =  ops.concat([conv(x) for conv in self.convs], 1)

        x = self.fc(x.squeeze())
        x = self.fc1(x)
        x = self.output(x)

        return x



net = Cnn_Rnn()
loss_fn = nn.MSELoss()
optim = nn.optim.SGD(net.trainable_params(),learning_rate=1e-3,momentum=0.9)



if __name__ == "__main__":
    from utils import split_set,MAPE
    train,test = split_set()
    sets = {"train": dataset.GeneratorDataset(train, ['data', 'target'], shuffle=True).batch(256),
        "test": dataset.GeneratorDataset(test, ['data', 'target']).batch(256)}

    model = ms.Model(net,loss_fn,optim,{"MAE":nn.MAE(),"MAPE":MAPE()})

    model.train(10,sets['train'],callbacks=ms.LossMonitor(10))

    out = model.eval(sets['test'],callbacks=ms.LossMonitor(10))
    print(out)
