import torch
from torch import Tensor
from torch.autograd import Variable
from torch.nn import Module
from torch.utils.data import DataLoader

from fc.torch.iris.TorchDataset import MyDataset


def train(model: Module, x: Tensor, y: Tensor, save_path: str):
    dataset = MyDataset(x, y)
    data_loader = DataLoader(dataset=dataset, batch_size=32, shuffle=True)
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
    for index in range(10):
        for x, y in data_loader:
            x = Variable(x)
            x = torch.transpose(x, 0, 1)
            len_by_batch = []
            for each_y in y:
                len_each = 0
                for each_y_item in each_y:
                    if each_y_item >= 0:
                        len_each = len_each + 1
                len_by_batch.append(len_each)
            # 一个batch中的样本间的长度可能不一样，需要pack
            inputs = torch.nn.utils.rnn.pack_padded_sequence(input=x, lengths=len_by_batch, enforce_sorted=False)
            # 后面pad回来的数据，可能会失去一些没用到的列，此处先把y没用的列去掉
            y = y[:, 0:max(len_by_batch)]
            target = Variable(y)
            # 此处的结果经过pad，可能会失去一些没用到的列
            predict: Tensor = model(inputs, y.shape[0])
            predict = predict.reshape(predict.shape[0] * predict.shape[1], -1)
            target = target.reshape(target.shape[0] * target.shape[1])
            predict = predict[target >= 0]
            target = target[target >= 0]
            # todo 暂时没找到多维计算的语法，只能展开为一维数据
            loss = criterion(predict, target)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            print(loss.item())

    torch.save(model, save_path)
