from utils.preprocessing import *
from utils.train_mltscl import train, trainlog
from models.fucknet import fuckSeg, fuckSeg2
from torch.nn import BCEWithLogitsLoss
from torch.optim import lr_scheduler,Adam,RMSprop
from utils.preprocessing import gen_dataloader
from models.GCN import *
from models.losses import CrossEntropyLoss2d
from models.icnet_cut import icnet_portrait

os.environ["CUDA_VISIBLE_DEVICES"] = '0'
img_root = '/media/hszc/data1/seg_data/'
bs = 24
resume = None
start_epoch = 0
save_dir = '/home/hszc/zhangchi/models/FigureSeg/icnet_cut-385'

# saving dir
save_dir = save_dir
if not os.path.exists(save_dir):
    os.makedirs(save_dir)

logfile = '%s/trainlog.log' % save_dir
trainlog(logfile)


data_set, data_loader = gen_dataloader(img_root, validation_split=0.1, train_bs=bs, val_bs=24)
print len(data_set['train']), len(data_set['val'])

model = icnet_portrait(num_classes=2)
# model = fuckSeg3(num_classes=2)
logging.info(model)
criterion = CrossEntropyLoss2d()

# learning scheduler
step1_bs_rate = 25. / 24.
step2_bs_rate = 50. / 24.
step3_bs_rate = 75. / 24.
steps_bs_rate = 100. / 24.
step1 = int(bs * step1_bs_rate)
step2 = int(bs * step2_bs_rate)
step3 = int(bs * step3_bs_rate)
steps = int(bs * steps_bs_rate)
logging.info('lr steps1: %d' % step1)
logging.info('lr steps2: %d' % step2)
logging.info('lr steps3: %d' % step3)
logging.info('total steps: %d' % steps)

def lr_lambda(epoch):
    if epoch < step1:
        return 1
    elif epoch < step2:
        return 0.1
    elif epoch < step3:
        return 0.05
    else:
        return 0.01
optimizer = Adam(model.parameters(), lr=1e-4, weight_decay=1e-5)
exp_lr_scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)

if resume:
    model.eval()
    logging.info('resuming finetune from %s' % resume)
    try:
        model.load_state_dict(torch.load(resume))
    except KeyError:
        model = torch.nn.DataParallel(model)
        model.load_state_dict(torch.load(resume))
    optimizer.load_state_dict(torch.load(os.path.join(save_dir, 'optimizer-state.pth')))
model.cuda()

train(model,
      epoch_num=steps,
      start_epoch=start_epoch,
      optimizer=optimizer,
      criterion=criterion,
      exp_lr_scheduler=exp_lr_scheduler,
      data_set=data_set,
      data_loader=data_loader,
      save_dir=save_dir,
      print_inter=200,
      val_inter=2000,
      )