import os
import torch
import sys
import numpy as np
import torch.nn.functional as F
import torchvision.models as models
from torch.utils.data.dataloader import DataLoader
from src import utils
from src.model import Discriminator
from src.model import Generator
from src.model import Vgg
from src.ImgDataset import ImgDataset

try:
    from termcolor import cprint
except ImportError:
    cprint = None


def log_print(text, color='blue', on_color=None, attrs=['bold']):
    if cprint is not None:
        cprint(text, color=color, on_color=on_color, attrs=attrs)
    else:
        print(text)


if __name__ == '__main__':
    dataset_name = 'shtechB'
    output_dir = './saved_modules/'
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    train_path = './data/shtechB/train_data/images'
    train_gt_path = './data/shtechB/train_data/ground_truth_csv'

    val_path = './data/shtechB/test_data/images'
    val_gt_path = './data/shtechB/test_data/ground_truth_csv'
    # load_module = './saved_modules/MCNN_UCSD_30.pkl'
    '''
    val_path = './data/partB_test_data/images/'
    val_gt_path = './data/partB_test_data/ground_truth_csv/'
    load_module = './saved_modules/MCNN_SE_shtechB_198.pkl'
    vgg16_module = './vgg16.pth'
    '''
    vgg16_module = './vgg16.pth'
    start_step = 0
    end_step = 400
    lr = 0.00005
    is_cuda = True

    vgg16 = models.vgg16(pretrained=False)
    vgg16.load_state_dict(torch.load(vgg16_module))

    # net = MCNN()
    net_D = Discriminator()
    net_G = Generator()
    net_Vgg = Vgg()
    utils.weights_init(net_D)
    utils.weights_init(net_G)

    vgg16_dict = vgg16.state_dict()
    net_dict = net_Vgg.state_dict()
    vgg16_dict = {k: v for k, v in vgg16_dict.items() if k in net_dict}
    net_dict.update(vgg16_dict)
    net_Vgg.load_state_dict(net_dict)
    for param in net_Vgg.features.parameters():
        param.requires_grad = False

    # net.load_state_dict(torch.load(load_module))
    print('init net done!')
    if is_cuda:
        # net.cuda()
        net_D.cuda()
        net_G.cuda()
        net_Vgg.cuda()
    # net.train()
    net_D.train()
    net_G.train()
    net_Vgg.eval()

    # optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr)
    optimizer_D = torch.optim.Adam(filter(lambda p: p.requires_grad, net_D.parameters()), lr=lr)
    optimizer_G = torch.optim.Adam(filter(lambda p: p.requires_grad, net_G.parameters()), lr=lr)

    # mse_loss = torch.nn.MSELoss()
    loss_D = torch.nn.MSELoss()
    loss_G = torch.nn.MSELoss()
    loss_Vgg = torch.nn.MSELoss()
    step_cnt = 0

    img_dataset = ImgDataset(train_path, train_gt_path, gt_downsample=False, is_training=True)
    data_loader = DataLoader(img_dataset, shuffle=True, num_workers=1, collate_fn=utils.collate_fn, drop_last=False)
    val_img_dataset = ImgDataset(val_path, val_gt_path, gt_downsample=False, is_training=False)
    val_data_loader = DataLoader(val_img_dataset, shuffle=False, num_workers=1, collate_fn=utils.collate_fn,
                                 drop_last=False)
    fake_label = -1
    real_label = 1
    best_mae = sys.maxsize
    best_mse = sys.maxsize
    loss_list_d = []
    loss_list_g = []
    epoch_x = []
    best_G_model = '{}_{}_{}.pkl'.format('G', dataset_name, 0)
    print('begin training!')

    for epoch in range(start_step, end_step):
        epoch_x.append(epoch+1)
        train_D_loss = 0
        train_G_loss = 0
        for blob in data_loader:
            im_data = blob['img_data']
            gt_data = blob['gt_data']
            if is_cuda:
                im_data = im_data.cuda()
                gt_data = gt_data.cuda()
            # train D with real
            optimizer_D.zero_grad()
            out_real = net_D(im_data, gt_data)
            real_label = torch.ones(out_real.shape, dtype=torch.float)
            if is_cuda:
                real_label = real_label.cuda()
            err_real = loss_D(out_real, real_label)
            # err_real.backward()

            # train D with fake
            density_map = net_G(im_data)
            out_fake = net_D(im_data, density_map.detach())
            fake_label = torch.ones(out_fake.shape, dtype=torch.float) * -1
            if is_cuda:
                fake_label = fake_label.cuda()
            err_fake = loss_D(out_fake, fake_label)
            # err_fake.backward()
            err_D = err_real + err_fake
            err_D.backward()
            train_D_loss += err_D
            optimizer_D.step()

            # train G
            optimizer_G.zero_grad()
            density_map = net_G(im_data)
            out_label = net_D(im_data, density_map)
            err_G = loss_D(out_label.detach(), real_label)
            # train_G_loss += err_G
            # err_G.backward()

            mse_loss = loss_G(density_map, gt_data)
            err_G += mse_loss
            # mse_loss.backward()

            ge_vgg = net_Vgg(density_map)
            gt_vgg = net_Vgg(gt_data)
            vgg_loss = loss_Vgg(ge_vgg.detach(), gt_vgg.detach())
            err_G += vgg_loss
            err_G.backward()
            train_G_loss += err_G
            optimizer_G.step()

        log_text = 'EPOCH: %d, D_loss: %.5f, G_loss: %.5f' % (epoch+1, train_D_loss/img_dataset.size(),
                                                              train_G_loss/img_dataset.size())
        log_print(log_text)
        loss_list_d.append(train_D_loss/img_dataset.size())
        loss_list_g.append(train_G_loss/img_dataset.size())
        utils.show_loss_acc(epoch_x, loss_list_d, loss_list_g, './loss.png')

        if epoch % 2 == 1:
            G_save_name = os.path.join(output_dir, '{}_{}_{}.pkl'.format('G', dataset_name, epoch+1))
            torch.save(net_G.state_dict(), G_save_name)
            D_save_name = os.path.join(output_dir, '{}_{}_{}.pkl'.format('D', dataset_name, epoch+1))
            torch.save(net_D.state_dict(), D_save_name)
            mae, mse = utils.evaluate_model(G_save_name, val_data_loader, is_cuda)
            mae = mae / val_img_dataset.size()
            mse = np.sqrt(mse / val_img_dataset.size())
            if mae * 0.95 + mse * 0.05 < best_mae * 0.95 + best_mse * 0.05:
                best_mae = mae
                best_mse = mse
                best_G_model = '{}_{}_{}.pkl'.format('G', dataset_name, epoch+1)
            log_text = 'EPOCH: %d, MAE: %.1f, MSE: %.1f' % (epoch+1, mae, mse)
            log_print(log_text)
            log_text = 'BEST MAE: %.1f, BEST MSE: %.1f, BEST MODEL: %s' % (best_mae, best_mse, best_G_model)
            log_print(log_text)
