import os
import torch
import argparse
import torch.nn.functional as F
from torch.utils.data import DataLoader
from src.models.modnet import MODNet
from src.utils import filter_parameters_by_type
from src.trainer import supervised_training_iter, soc_adaptation_iter
from dataset.data import DatasetNoLabel, DatasetPairCrop
from tensorboardX import SummaryWriter
import logging
logging.getLogger().setLevel(logging.INFO)

# 输入参数
parser = argparse.ArgumentParser(
    'SOC Training'
)

parser.add_argument('--log', 
    default='./logs/tb_soc2',
    help='tensorboard log path'
)

parser.add_argument('--save_folder', 
    default='./ckpt_soc2/',
    help='checkpoint path'
)

parser.add_argument('--data_path', 
    default='/home/chengk/chk/data/removebg-align/image',
    help='image data folder'
)

parser.add_argument('--log_image_every_step',
    default=100,
    help='as param name shows'
)

parser.add_argument('--lr',
    default=1e-5,
    help='learning rate'
)

parser.add_argument('--pretrained_model', 
    # default='./modnet_webcam_portrait_matting.ckpt',
    default='./ckpt_stable/epoch_9.pth',
    help='pretrained model path'
)

args = parser.parse_args()

tb_logger = SummaryWriter(args.log)
folder_ckpt = args.save_folder
if not os.path.exists(folder_ckpt):
    os.makedirs(folder_ckpt)

bs = 4         # batch size
lr = args.lr       # learn rate
epochs = 100     # total epochs
log_image_every_step = args.log_image_every_step

device = torch.device('cuda:0')
ckpt_pretrain = args.pretrained_model
ckpt = torch.load(ckpt_pretrain, map_location='cpu')

modnet = torch.nn.DataParallel(MODNet(backbone_pretrained=False)).cuda()
modnet_freeze = torch.nn.DataParallel(MODNet(backbone_pretrained=False)).cuda()
modnet.load_state_dict(ckpt)
modnet.module.freeze_norm()
modnet_freeze.load_state_dict(ckpt)
modnet_freeze.eval()
# 停止BatchNorm的更新
# 停止detail和fusion分支
# train_params = filter_parameters_by_type(modnet, torch.nn.modules.batchnorm.BatchNorm2d)
train_params = modnet.module.parameters()
optimizer = torch.optim.Adam(train_params, lr=lr, betas=(0.9, 0.99))
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=int(0.25 * epochs), gamma=0.1)

# dataset = DatasetNoLabel(root=args.data_path)
dataset = DatasetPairCrop(root=args.data_path)
dataloader = DataLoader(dataset, bs, num_workers=4, drop_last=True, shuffle=True)     # NOTE: please finish this function

step_scalar, step_image = 0, 0
for epoch in range(0, epochs):
    for idx, data in enumerate(dataloader):
        log_step = (idx+1) % log_image_every_step == 0
        img_anchor, overlap_anchor, img_target, overlap_target = data
        
        # print('overlap_anchor:', overlap_anchor, '\toverlap_target:', overlap_target)
        img_anchor = img_anchor.to(device)
        img_target = img_target.to(device)
        _, _, pred_matte_freeze = modnet_freeze(img_anchor, True)
        _, _, pred_matte = modnet(img_target, False)

        loss = 0

        # Extract ROI for both anchor & target
        # Accumulate roi loss
        h, w = pred_matte_freeze.size()[-2:]
        b = img_anchor.size(0)
        plot_roi = []
        for i in range(b):
            x_anchor_min, x_anchor_max, y_anchor_min, y_anchor_max = overlap_anchor[0][i], \
                                                        overlap_anchor[1][i], \
                                                        overlap_anchor[2][i], \
                                                        overlap_anchor[3][i]
            x_target_min, x_target_max, y_target_min, y_target_max = overlap_target[0][i], \
                                                        overlap_target[1][i], \
                                                        overlap_target[2][i], \
                                                        overlap_target[3][i]
            # print('当前anchor顶点:', (int(h*y_anchor_min), int(h*y_anchor_max), int(w*x_anchor_min), int(w*x_anchor_max)))
            # print('anchor.matte大小:', pred_matte_freeze.size())
            roi_anchor = pred_matte_freeze[i:i+1, ..., int(h*y_anchor_min): int(h*y_anchor_max), int(w*x_anchor_min): int(w*x_anchor_max)]
            roi_target = pred_matte[i:i+1, ..., int(h*y_target_min): int(h*y_target_max), int(w*x_target_min): int(w*x_target_max)]
            # print('roi_anchor:', roi_anchor.size(), '\troi_target:', roi_target.size())

            if x_anchor_max-x_anchor_min > x_target_max-x_target_min:
                # 将target interpolate到anchor大小
                roi_target = F.interpolate(roi_target, roi_anchor.size()[-2:], mode='bilinear')
            else:
                # 将anchor interpolate到target大小
                roi_anchor = F.interpolate(roi_anchor, roi_target.size()[-2:], mode='bilinear')

            if log_step:
                roi_anchor = F.interpolate(roi_anchor, (h, w))
                roi_target = F.interpolate(roi_target, (h, w))
                plot_roi.extend([torch.cat([roi_anchor.cpu()]*3, 1),
                                torch.cat([roi_target.cpu()]*3, 1)])

            assert roi_target.size() == roi_anchor.size()
            loss += F.l1_loss(roi_target, roi_anchor)
        loss /= b

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if log_step:
            # Tensorboard image & matte log
            # tb_logger.add_images
            img_plot = torch.cat([(img_anchor.cpu()+1)/2, 
                            (img_target.cpu()+1)/2,
                            torch.cat([pred_matte.cpu()]*3, 1),
                            torch.cat([pred_matte_freeze.cpu()]*3, 1),
                            *plot_roi
                            ], 0)
            # assert img_plot.size(0) == 3, img_plot.size()
            tb_logger.add_images('anchor-target-pred-freeze-roi_anchor-roi_target', img_plot, step_image)
            # tb_logger.add_image('image', (image[0]+1)/2, step_image)
            # tb_logger.add_image('fg', pred_fg[0], step_image)
            # tb_logger.add_image('fg_backup', pred_backup_fg[0], step_image)
            step_image += 1
        # Tensorboard loss log
        if (idx+1) % 10 == 0:
            tb_logger.add_scalar('soc2_loss', loss.item(),
                                                step_scalar)
            step_scalar += 1
            
    # 每个Epoch checkpoint一次
    torch.save(modnet.state_dict(), os.path.join(folder_ckpt, 'epoch_%d.pth'%(epoch)))
    
