import os
import torch
import argparse
from torch.utils.data import DataLoader
from src.models.modnet import MODNet
from src.utils import filter_parameters_by_type
from src.trainer import supervised_training_iter, soc_adaptation_iter
from dataset.data import DatasetParamsMatting
from tensorboardX import SummaryWriter
import torch.nn.functional as F
import logging
logging.getLogger().setLevel(logging.INFO)

# 输入参数
parser = argparse.ArgumentParser(
    'SOC Training'
)

parser.add_argument('--log', 
    default='./logs/tb_stablize',
    help='tensorboard log path'
)

parser.add_argument('--save_folder', 
    default='./ckpt_stable/',
    help='checkpoint path'
)

parser.add_argument('--data_path', 
    default='/home/chengk/params-photo-serving/chk/gaze_human_reason/raw/lock/',
    help='image data folder'
)

parser.add_argument('--anno_path',
    default='/home/chengk/params-photo-serving/chk/gaze_human_reason/raw_matting',
    help='anno data folder'
)

parser.add_argument('--log_image_every_step',
    default=200,
    help='as param name shows'
)

parser.add_argument('--lr',
    default=1e-5,
    help='learning rate'
)

parser.add_argument('--pretrained_model', 
    default='./modnet_webcam_portrait_matting.ckpt',
    help='pretrained model path'
)

args = parser.parse_args()

tb_logger = SummaryWriter(args.log)
folder_ckpt = args.save_folder
if not os.path.exists(folder_ckpt):
    os.makedirs(folder_ckpt)

bs = 4        # batch size
lr = args.lr       # learn rate
epochs = 50     # total epochs
log_image_every_step = args.log_image_every_step

device = torch.device('cuda:0')
ckpt_pretrain = args.pretrained_model
ckpt = torch.load(ckpt_pretrain, map_location='cpu')

modnet = torch.nn.DataParallel(MODNet(backbone_pretrained=False)).cuda()
modnet_freeze = torch.nn.DataParallel(MODNet(backbone_pretrained=False)).cuda()
modnet.load_state_dict(ckpt)
modnet.module.freeze_norm()
modnet_freeze.load_state_dict(ckpt)
modnet_freeze.eval()
# 停止BatchNorm的更新
# 停止detail和fusion分支
# train_params = filter_parameters_by_type(modnet, torch.nn.modules.batchnorm.BatchNorm2d)
# train_params = modnet.module.lr_branch.parameters()
train_params = modnet.parameters()
optimizer = torch.optim.Adam(train_params, lr=lr, betas=(0.9, 0.99))
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=int(0.25 * epochs), gamma=0.1)

# dataset = DatasetNoLabel(root=args.data_path)
dataset = DatasetParamsMatting(root_img=args.data_path, root_anno=args.anno_path)
print('数据集数量:', len(dataset))
dataloader = DataLoader(dataset, bs, num_workers=4, drop_last=True, shuffle=True)     # NOTE: please finish this function

step_scalar, step_image = 0, 0
for epoch in range(0, epochs):
    for idx, data in enumerate(dataloader):
        image, alpha, receptive = data
        
        image_cpu = image
        image = image.to(device)
        alpha = alpha.to(device)
        receptive = receptive.to(device)
        # print('alpha.range:', alpha.min(), alpha.max())
        alpha_mask = ((alpha==1) | (alpha==0)).float()
        transition_mask = 1-alpha_mask
        # print('alpha_mask.range:', alpha_mask.min(), alpha_mask.max())

        pred_semantic, pred_detail, pred_matte = modnet(image, False) # same as soc training
        with torch.no_grad():
            _, _, pred_matte_freeze = modnet_freeze(image, True)

        # print('alpha_mask:', alpha_mask.size(), '\talpha:', alpha.size())
        # print('pred:', pred_semantic.size(), pred_detail.size(), pred_matte.size())
        # print('receptive:', receptive.size(), '\t alpha_mask:', alpha_mask.size())
        # print('receptive.range:', receptive.min(), receptive.max())
        matte_loss = receptive * alpha_mask * F.l1_loss(pred_matte, alpha, reduction='none')
        detail_loss = receptive * transition_mask * F.l1_loss(pred_matte, pred_matte_freeze, reduction='none')
        assert matte_loss.size() == detail_loss.size()
        matte_loss = torch.sum(matte_loss, dim=(1, 2, 3)) / (1e-6+torch.sum(alpha_mask, dim=(1, 2, 3))) + \
                        0.1*torch.sum(detail_loss, dim=(1, 2, 3)) / (1e-6+torch.sum(transition_mask, dim=(1, 2, 3)))

        matte_loss = torch.mean(matte_loss)

        loss = matte_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        # print('loss: %.2f'%loss.item())

        if (idx+1) % log_image_every_step == 0:
            img_plot = torch.cat([(image_cpu+1)/2, 
                            torch.cat([pred_matte_freeze.cpu()]*3, 1),
                            torch.cat([pred_matte.cpu()]*3, 1), 
                            torch.cat([alpha.cpu()]*3, 1),
                            torch.cat([alpha_mask.cpu()]*3, 1),
                            torch.cat([receptive.cpu()]*3, 1)
                            ], 0)
            # print('img_plot:', img_plot.size())
            # assert img_plot.size(0) == 3, img_plot.size()
            tb_logger.add_images('image', img_plot, step_image)
            # tb_logger.add_image('image', (image[0]+1)/2, step_image)
            # tb_logger.add_image('fg', pred_fg[0], step_image)
            # tb_logger.add_image('fg_backup', pred_backup_fg[0], step_image)
            step_image += 1
        # Tensorboard loss log
        if (idx+1) % 10 == 0:
            tb_logger.add_scalar('soc_loss', loss.item(), step_scalar)
            step_scalar += 1
    # lr_scheduler.step()
    # 每个Epoch checkpoint一次
    torch.save(modnet.state_dict(), os.path.join(folder_ckpt, 'epoch_%d.pth'%(epoch)))
    
