# -*- coding: utf-8 -*-
# @Time    : 2021/7/7 14:46
# @Author  : LuoTianHang


# ####################trainddp.py 说明##########################
# this script is used to accerlate the training procedure
import  numpy as np
from otherutils import blue_print
import argparse
import os
import random
import time
from otherutils import green_print
import torch.cuda
import torch.distributed as dist
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
import torch.nn.functional as F

from datalist import MCNNDataset2
from model import MCNN2 as MCNN

parser = argparse.ArgumentParser(description="mcnn ddp")
parser.add_argument("--local_rank", default=-1, type=int, help="node rank for distributed training")
parser.add_argument("--seed", default=None, type=int, help="seed for initializing training")
parser.add_argument("--batch_size", default=4, type=int)
parser.add_argument("--epochs", default=400, type=int)
parser.add_argument("--lr", default=0.0002, type=float)
parser.add_argument("--ip", default="127.0.0.1", type=str)
parser.add_argument("--port", default='23456', type=str)


def main():
    args = parser.parse_args()
    args.nprocs = torch.cuda.device_count()

    main_worker(args.local_rank, args.nprocs, args)

def reduce_mean(tensor, nprocs):
    rt = tensor.clone()
    dist.all_reduce(rt, op=dist.ReduceOp.SUM)
    rt /= nprocs
    return rt

def get_lines():
    ans = []
    dir = ['UCF', 'UCF_CC', "WE", "shanghaiA", "shanghaiB"]
    for d in dir:
        for root, dirs, files in os.walk("/workspace/mcnn/" + d):
            for name in files:
                if name.endswith(".jpg"):
                    ans.append(os.path.join(root, name))

    return ans


def main_worker(local_rank, nprocs, args):
    args.local_rank = local_rank
    init_method = "env://"
    cudnn.benchmark = True
    dist.init_process_group(backend="nccl", init_method=init_method, world_size=args.nprocs,
                            rank=local_rank)
    model = MCNN()



    torch.cuda.set_device(local_rank)
    model.cuda(local_rank)
    model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(local_rank)
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank])

    blue_print("load the weight from pretrained-weight file")
    model_dict = model.state_dict()
    pretrained_dict = torch.load("weights/best.pth")['model_state_dict']
    pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)
    green_print("Finished to load the weight")


    criterion1 = nn.MSELoss()
    criterion2=nn.L1Loss()
    optimizer = torch.optim.Adam(model.parameters(), args.lr)

    batch_size = int(args.batch_size / nprocs)

    lines = get_lines()
    random.shuffle(lines)
    num_train = int(len(lines) * 0.8)

    train_dataset = MCNNDataset2(data_type="train",
                                 data=lines[:num_train])

    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)


    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=False, num_workers=4, pin_memory=True,
                                  sampler=train_sampler)


    for epoch in range(args.epochs):
        start=time.time()
        model.train()
        train_sampler.set_epoch(epoch)

        for step, (images,labels) in enumerate(train_dataloader):
            images=images.cuda(local_rank,non_blocking=True)
            labels=labels.cuda(local_rank,non_blocking=True)

            outputs=F.relu(model(images))


            loss=0.5*criterion1(outputs,labels)+0.5*criterion2(outputs,labels)

            torch.distributed.barrier()
            reduced_loss=reduce_mean(loss,args.nprocs)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if args.local_rank==0:
                print(
                    'Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.8f}\tLR: {:0.8f}'.format(
                        reduced_loss,
                        optimizer.param_groups[0]['lr'],
                        epoch=epoch + 1,
                        trained_samples=step * args.batch_size + len(images),
                        total_samples=len(train_dataloader.dataset)
                    ))
        finish=time.time()
        if args.local_rank==0:
            print('epoch {} training time consumed: {:.2f}s'.format(epoch, finish - start))
            torch.save({
                'model_state_dict': model.state_dict(),
            }, 'weights/temp/best.pth')
            green_print("model saved")
if __name__=="__main__":
    main()