from model import Unet_vgg
from utils import *
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

train_set_path = "./VOCdevkit/VOC2007/ImageSets/Segmentation/train.txt"
val_set_path = "./VOCdevkit/VOC2007/ImageSets/Segmentation/val.txt"

with open(train_set_path, "r") as f:
    train_lines = f.readlines()
with open(val_set_path, "r") as f:
    val_lines = f.readlines()

input_shape = [512, 512]
num_classes = 21
num_channels = 3
batch_size = 5

train_dataset = UnetDataset(train_lines, input_shape, num_classes, True, '.')
val_dataset = UnetDataset(val_lines, input_shape, num_classes, False, '.')

train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=1, pin_memory=True,
                          drop_last=True, collate_fn=unet_dataset_collate)
val_loader = DataLoader(val_dataset, shuffle=True, batch_size=batch_size, num_workers=1, pin_memory=True,
                        drop_last=True, collate_fn=unet_dataset_collate)

# save weights
# cls_weights = torch.zeros(num_classes)
# for d, l, _ in tqdm(train):
#     for i in range(num_classes+1):
#         cls_weights[i] += (l == i).sum()

# cls_weights = cls_weights[1:-1].mean() / cls_weights
# np.save('cls_weights.npy', cls_weights.cpu().numpy())
# cls_weights = torch.Tensor(np.load("cls_weights.npy", allow_pickle=True)).to(device)

model = Unet_vgg(num_classes=num_classes, pretrained=True).to(device).train()
loss_fn = nn.CrossEntropyLoss(ignore_index=num_classes)
def lambdaf(epoch): return 0.8 ** epoch


# # checkpoint
# checkpoint = torch.load('vgg_pretrain.pth')
# model.load_state_dict(checkpoint)

# frozen backbone
epoch = 6
lr = 1e-4
optimizer = optim.Adam(model.parameters(), lr)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambdaf)

model.freeze_backbone()
train_proc(model, train_loader, loss_fn, optimizer,
           scheduler, epoch, num_classes, device)

# unfrozen backbone
epoch = 8
lr = 1e-4
optimizer = optim.Adam(model.parameters(), lr)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambdaf)

model.unfreeze_backbone()
train_proc(model, train_loader, loss_fn, optimizer,
           scheduler, epoch, num_classes, device)

torch.save(model.state_dict(), 'vgg_pretrain.pth')
