import os
import torch
import argparse
from torch.utils.data import DataLoader
from src.models.modnet import MODNet
from src.utils import filter_parameters_by_type
from src.trainer import supervised_training_iter, soc_adaptation_iter
from dataset.data import DatasetNoLabel, DatasetPairCrop
from tensorboardX import SummaryWriter
import logging
logging.getLogger().setLevel(logging.INFO)

# 输入参数
parser = argparse.ArgumentParser(
    'SOC Training'
)

parser.add_argument('--log', 
    default='./logs/tb',
    help='tensorboard log path'
)

parser.add_argument('--save_folder', 
    default='./ckpt_soc/',
    help='checkpoint path'
)

parser.add_argument('--data_path', 
    default='/home/chengk/params-photo-serving/chk/gaze_human_reason/raw/lock/',
    help='image data folder'
)

parser.add_argument('--log_image_every_step',
    default=100,
    help='as param name shows'
)

parser.add_argument('--lr',
    default=1e-5,
    help='learning rate'
)

parser.add_argument('--pretrained_model', 
    default='./modnet_webcam_portrait_matting.ckpt',
    help='pretrained model path'
)

args = parser.parse_args()

tb_logger = SummaryWriter(args.log)
folder_ckpt = args.save_folder
if not os.path.exists(folder_ckpt):
    os.makedirs(folder_ckpt)

bs = 1         # batch size
lr = args.lr       # learn rate
epochs = 10     # total epochs
log_image_every_step = args.log_image_every_step

device = torch.device('cuda:0')
ckpt_pretrain = args.pretrained_model
ckpt = torch.load(ckpt_pretrain, map_location='cpu')

modnet = torch.nn.DataParallel(MODNet(backbone_pretrained=False)).cuda()
modnet_freeze = torch.nn.DataParallel(MODNet(backbone_pretrained=False)).cuda()
modnet.load_state_dict(ckpt)
modnet_freeze.load_state_dict(ckpt)
modnet_freeze.eval()
# 停止BatchNorm的更新
# 停止detail和fusion分支
# train_params = filter_parameters_by_type(modnet, torch.nn.modules.batchnorm.BatchNorm2d)
train_params = modnet.module.lr_branch.parameters()
optimizer = torch.optim.Adam(train_params, lr=lr, betas=(0.9, 0.99))
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=int(0.25 * epochs), gamma=0.1)

# dataset = DatasetNoLabel(root=args.data_path)
dataset = DatasetPairCrop(root=args.data_path)
dataloader = DataLoader(dataset, bs, num_workers=4, drop_last=True, shuffle=True)     # NOTE: please finish this function

step_scalar, step_image = 0, 0
for epoch in range(0, epochs):
    for idx, data in enumerate(dataloader):
        img_anchor, bnd_box_anchor, img_target, bnd_box_target, size_ori = data
        
        
        image = image.to(device)
        output_soc = soc_adaptation_iter(modnet, modnet_freeze, optimizer, image,
                                                return_mattes=(idx+1)%log_image_every_step==0)
        if (idx+1) % log_image_every_step == 0:
            soc_semantic_loss, soc_detail_loss, (pred_fg, pred_backup_fg, pred_semantic, pred_backup_semantic, boundaries) = output_soc
            # Tensorboard image & matte log
            # print(pred_fg.size(), image.size())
            # print(pred_fg.min(), pred_fg.max())
            # tb_logger.add_images
            img_plot = torch.cat([(image.cpu()+1)/2, 
                            torch.cat([pred_fg.cpu()]*3, 1), 
                            torch.cat([pred_backup_fg.cpu()]*3, 1),
                            torch.cat([pred_semantic.cpu()]*3, 1),
                            torch.cat([pred_backup_semantic.cpu()]*3, 1),
                            torch.cat([boundaries.cpu()]*3, 1)
                            ], 0)
            # assert img_plot.size(0) == 3, img_plot.size()
            tb_logger.add_images('image', img_plot, step_image)
            # tb_logger.add_image('image', (image[0]+1)/2, step_image)
            # tb_logger.add_image('fg', pred_fg[0], step_image)
            # tb_logger.add_image('fg_backup', pred_backup_fg[0], step_image)
            step_image += 1
        else:
            soc_semantic_loss, soc_detail_loss = output_soc
        # Tensorboard loss log
        if (idx+1) % 10 == 0:
            tb_logger.add_scalars('soc_loss', {'semantic': soc_semantic_loss.item(),
                                                'detail': soc_detail_loss.item()},
                                                step_scalar)
            step_scalar += 1
    # lr_scheduler.step()
    # 每个Epoch checkpoint一次
    torch.save(modnet.state_dict(), os.path.join(folder_ckpt, 'epoch_%d.pth'%(epoch)))
    
