from data import make_data
import argparse
import torch
from danet import get_danet
from utils import Segmentational_Loss

dev = torch.device('cuda')
loss_func = Segmentational_Loss.FocalLoss()
model = get_danet()
model = model.to(dev)
opt = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)

def validation():
    model.eval()
    val_loss = 0.0
    epoch = 0
    for sample in valid_dl:
        epoch += 1
        xb, yb = sample['image'], sample['label']
        xb, yb = xb.to(dev), yb.to(dev)
        yb = yb.squeeze(1)
        with torch.no_grad():
            pred = model(xb)[0]

        loss = loss_func(pred, yb).item()
        val_loss += loss
        print('epoch:{},val loss:{}'.format(epoch, val_loss / epoch))


if __name__ == '__main__':

    parser = argparse.ArgumentParser(description='make dataloader')
    parser.add_argument('-batch_size', type=int, help='批尺寸', dest='batch_size', nargs='+', default=1)
    args = parser.parse_args()
    kwargs = {}
    train_dl, valid_dl = make_data(args, **kwargs)
    nums = 0
    for epoch in range(5):
        for sample in train_dl:
            model.train()
            xb = sample['image']
            yb = sample['label']
            yb = yb.squeeze(1)
            xb, yb = xb.to(dev), yb.to(dev)
            pred = model(xb)[0]
            loss = loss_func(pred, yb)

            loss.backward()
            opt.step()
            opt.zero_grad()
        print('epoch{}'.format(nums))
        nums += 1
    torch.save(model, './danet_focal_SGD.pth')
