import argparse
import os.path
import random
import logging
import sys

import numpy as np
import torch
from dataloaders.dataset import StickerDataset, StickerRandomCrop
from torchvision import transforms
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from torch.nn.modules.loss import CrossEntropyLoss
from torch import optim
from networks.net_factory import net_factory
from copy import deepcopy
import torch.nn.functional as F
from tqdm import tqdm
import shutil
from utils import losses

parser = argparse.ArgumentParser()
parser.add_argument('--exp', type=str,
                    default='SYSU_CDD/Distillation', help='experiment_name')
parser.add_argument('--model', type=str,
                    default='fcfmnet', help='model_name')
parser.add_argument('--max_iterations', type=int,
                    default=2000, help='maximum epoch number to train')
parser.add_argument('--num_classes', type=int, default=7,
                    help='output channel of network')
parser.add_argument('--step', type=int,
                    default=2, help='incremental step')
parser.add_argument('--root_path', type=str,
                    default='../data/SYSU_CDD', help='name of experiment')
parser.add_argument('--sub_set', type=str,
                    default="sub_set1", choices=['set0', 'sub_set1', 'sub_set2', 'sub_set3', 'val'])
parser.add_argument('--patch_size', type=list, default=[256, 256],
                    help='patch size of network input')
parser.add_argument('--batch_size', type=int, default=4,
                    help='batch_size per gpu')
parser.add_argument('--seed', type=int, default=1337,
                    help='random seed')
parser.add_argument('--T', type=int, default=2,
                    help='distillation temperature')
parser.add_argument('--iter_save', type=int,
                    default=2000, help='each iter_save iteration save the model and image')
parser.add_argument('--iter_eval', type=int,
                    default=10000, help='each iter_save iteration save the model and image')
parser.add_argument('--distill', type=str,
                    default='fm_soft', help='the type of distillation between teacher and student')
args = parser.parse_args()


def weighted_BCE_logits(logit_pixel, truth_pixel, weight_pos=0.25, weight_neg=0.75):
    logit = logit_pixel.view(-1)
    truth = truth_pixel.view(-1)
    assert (logit.shape == truth.shape)
    loss = F.binary_cross_entropy_with_logits(logit, truth, reduction='none')

    pos = (truth > 0.5).float()
    neg = (truth < 0.5).float()
    pos_num = pos.sum().item() + 1e-12
    neg_num = neg.sum().item() + 1e-12
    loss = (weight_pos * pos * loss / pos_num + weight_neg * neg * loss / neg_num).sum()

    return loss


def train(args, snapshot_path):
    step = args.step
    batch_size = args.batch_size
    num_classes = args.num_classes
    T = args.T
    max_iterations = args.max_iterations
    seen_cls = 0
    total_cls = 2 # Not include background
    mode = ['set0', 'sub_set1', 'sub_set2', 'sub_set3', 'val']
    model = net_factory(net_type=args.model, in_chns=3, class_num=num_classes)

    def worker_init_fn(worker_id):
        random.seed(args.seed + worker_id)

    def save_model(step_b):
        """Save model weight to disk
        """
        save_mode_path = os.path.join(
            snapshot_path, 'step' + str(step_b+1) + '_iter_' + str(iter_num[0]) + '.pth')
        torch.save(model.state_dict(), save_mode_path)
        logging.info("save model to {}".format(save_mode_path))


    def stage1(step_b, train_data, criterion, optimizer, model_lr_scheduler):
        for i_batch, sampled_batch in enumerate(train_data):
            image_A, image_B, label = sampled_batch['image_A'], sampled_batch['image_B'], sampled_batch['label']
            image_A, image_B, label = image_A.cuda(), image_B.cuda(), label.float().cuda()


            outputs_change, feature_map = model(image_A, image_B)
            optimizer.zero_grad()
            loss_bn = weighted_BCE_logits(outputs_change, label)
            loss = loss_bn
            loss.backward()
            optimizer.step()

            iter_num[0] = iter_num[0] + 1

            writer.add_scalar('info/total_loss', loss, iter_num[0])
            writer.add_scalar('info/loss_bn', loss_bn, iter_num[0])

            logging.info(
                'iteration %d : loss : %f, loss_bn: %f' %
                (iter_num[0], loss.item(), loss_bn.item())
            )

            # save the image
            if iter_num[0] % args.iter_save == 0:
                print('----- save the image -----')
                pass


            # evaluation
            if iter_num[0] > 0 and iter_num[0] % args.iter_eval == 0:
                print('---- evaluation -------')
                model.eval()

            if iter_num[0] % args.iter_save == 0:
                save_model(step_b)

        model_lr_scheduler.step()



    def stage1_nodistill(step_b, train_data, criterion, optimizer, model_lr_scheduler):
        for i_batch, sampled_batch in enumerate(train_data):
            image_A, image_B, label = sampled_batch['image_A'], sampled_batch['image_B'], sampled_batch['label']
            image_A, image_B, label = image_A.cuda(), image_B.cuda(), label.float().cuda()
            outputs_change, feature_map = model(image_A, image_B)

            loss_bn = weighted_BCE_logits(outputs_change, label)
            loss = loss_bn
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            iter_num[0] = iter_num[0] + 1

            writer.add_scalar('info/total_loss', loss, iter_num[0])
            writer.add_scalar('info/loss_bn', loss_bn, iter_num[0])
            logging.info(
                'iteration %d : loss : %f, bn: %f' %
                (iter_num[0], loss.item(), loss_bn.item())
            )

            # save the image
            if iter_num[0] % args.iter_save == 0:
                print('----- save the image -----')
                pass

            # evaluation
            if iter_num[0] > 0 and iter_num[0] % args.iter_save == 0:
                print('---- evaluation -------')
                model.eval()

            if iter_num[0] % args.iter_save == 0:
                save_model(step_b)

        model_lr_scheduler.step()

    def stage1_distill(step_b, train_data, criterion, optimizer, model_lr_scheduler):
        beta = 0.5
        for i_batch, sampled_batch in enumerate(train_data):
            image_A, image_B, label = sampled_batch['image_A'], sampled_batch['image_B'], sampled_batch['label']
            image_A, image_B, label = image_A.cuda(), image_B.cuda(), label.float().cuda()
            outputs_change, feature_map = model(image_A, image_B)
            # cat_fm = [torch.cat((feature_map[x], feature_map[x+3]), dim=0) for x in range(3)]
            fm = [F.log_softmax(x / T, dim=1) for x in feature_map]

            '''
            distillation loss + crossentropy loss + change loss
            '''
            with torch.no_grad():
                pre_outputs_change, pre_feature_map = previous_model(image_A, image_B)
                # pre_cat_fm = [torch.cat((pre_feature_map[x], pre_feature_map[x+3]), dim=0) for x in range(3)]
                pre_fm = [F.softmax(x / T, dim=1) for x in pre_feature_map]

            # fm_loss_group = [losses.cos_similarity_loss(x, y) for x, y in zip(feature_map, pre_feature_map)]
            # loss_fm = sum(fm_loss_group)

            if args.distill == 'fm_soft':
                loss_fm = 0.0
                for pre_x, x in zip(pre_fm, fm):
                    loss_single = -torch.mean(torch.sum(pre_x * x, dim=1))
                    loss_fm += loss_single
                loss_fm = loss_fm / len(pre_fm)

            if args.distill == 'l2_dist':
                pass

            loss_bn = weighted_BCE_logits(outputs_change, label)
            loss = 0.9 * loss_fm + 0.1 * loss_bn
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            iter_num[0] = iter_num[0] + 1

            writer.add_scalar('info/total_loss', loss, iter_num[0])
            writer.add_scalar('info/loss_bn', loss_bn, iter_num[0])
            writer.add_scalar('info/loss_fm', loss_fm, iter_num[0])
            logging.info(
                'iteration %d : loss : %f, bn: %f, distll: %f' %
                (iter_num[0], loss.item(), loss_bn.item(), loss_fm.item())
            )

            # save the image
            if iter_num[0] % args.iter_save == 0:
                print('----- save the image -----')
                pass

            # evaluation
            if iter_num[0] > 0 and iter_num[0] % args.iter_save == 0:
                print('---- evaluation -------')
                model.eval()

            if iter_num[0] % args.iter_save == 0:
                save_model(step_b)

        model_lr_scheduler.step()

    for step_b in range(step):
        logging.info("Incremental Step {}".format(step_b + 1))
        train_set = StickerDataset(base_dir=args.root_path, split="train", sub_set=mode[step_b+1], transform=transforms.Compose([
            StickerRandomCrop(args.patch_size)
        ]))
        trainloader = DataLoader(train_set, batch_size=batch_size, shuffle=True,
                                 num_workers=0, pin_memory=True, worker_init_fn=worker_init_fn, drop_last=True)

        model.train()
        criterion = CrossEntropyLoss()
        optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=0.01,
                              weight_decay=5e-4, momentum=0.9, nesterov=True)
        model_lr_scheduler = optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.95, last_epoch=-1)

        seen_cls += total_cls // 1
        print("seen classes: ", seen_cls + 1) # Add the background

        writer = SummaryWriter(snapshot_path + '/log')
        logging.info("{} iterations per epoch".format(len(trainloader)))


        max_epoch = max_iterations // len(trainloader) + 1
        iterator = tqdm(range(max_epoch), ncols=70)
        # Train loop
        iter_num = [0]
        for epoch_num in iterator:
            if step_b >= 1:
                stage1_distill(step_b, trainloader, criterion, optimizer, model_lr_scheduler)
            else:
                stage1(step_b, trainloader, criterion, optimizer, model_lr_scheduler)

        previous_model = deepcopy(model)




if __name__ == '__main__':

    snapshot_path = "../model/{}/{}".format(
        args.exp, args.model
    )
    # print(snapshot_path)
    if not os.path.exists(snapshot_path):
        os.makedirs(snapshot_path)
    if os.path.exists(snapshot_path + '/code'):
        shutil.rmtree(snapshot_path + '/code')
    shutil.copytree('.', snapshot_path + '/code',
                    shutil.ignore_patterns(['.git', '__pycache__']))

    # logging: record the parameters
    logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
                        format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
    logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
    logging.info(str(args))
    train(args, snapshot_path)
