import torch
import argparse
from voc_dataset import VocDataset
from transforms import RandomFlip, Resize, ToTensor, Normalize, Compose
from torch.utils.data import DataLoader, ConcatDataset
from backbone import Resnet50
from Presnet import PResNet
from hybrid_encoder import HybridEncoder
from transformer import RTDETRTransformer
from rtdetr import RTDETR
from utils import collate_fn_voc, eval_voc
from hungarianloss import HungarianMatch
from tqdm import tqdm
# from torchvision.ops import box_convert
import os
import re
import torch.distributed as tdist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
import random
import numpy as np


class Trainer:
    def __init__(self, model, train_dataloader, 
                 val_dataloader, optimizer, 
                 gpu_id, criterion, steplr=None):
        self.train_dataloader = train_dataloader
        self.val_dataloader = val_dataloader
        self.optimizer = optimizer
        self.steplr = steplr
        self.device = gpu_id
        self.criterion = criterion
        if tdist.is_initialized():
            model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
            self.model = DDP(model.to(gpu_id), device_ids=[gpu_id])
        else:
            self.model = model.to(gpu_id)

    def train(self, max_epoch):
        os.makedirs(args.save_pth_root, exist_ok=True)
        for epoch in range(max_epoch):
            self.model.train()
            if tdist.is_initialized():
                self.train_dataloader.sampler.set_epoch(epoch) #新增加的修改 实现数据集的打乱
            self._run_epoch(epoch)

    def _run_epoch(self, epoch):
        for samples, targets, _, _ in tqdm(self.train_dataloader, \
                                           desc=f'\033[1;34mtrain epoch[{epoch}]\033[0m', ncols=100):
            samples = samples.to(self.device)
            self._run_batch(samples, targets)
        # self.steplr.step()

        if tdist.is_initialized():
            tdist.barrier()
            torch.save(self.model.module.state_dict(), f'{args.save_pth_root}/epoch{epoch}.pth')
        else:
            torch.save(self.model.state_dict(), f'{args.save_pth_root}/epoch{epoch}.pth')
            
        self.model.eval()
        if (epoch + 1) % 5 == 0:
            eval_voc(self.val_dataloader, self.model, self.device, \
                    args.num_query, args.save_json_root, epoch)

    def _run_batch(self, samples, targets):
        print(self.device, samples.shape)
        dec_out = self.model(samples)
        loss_dict = self.criterion(dec_out, targets)
        loss = sum(loss_dict.values())
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()


def main(args):
    if args.dist:
        rank = dist_init()
        set_seed(args.seed + rank)
    else:
        rank = "cuda"
        set_seed(args.seed)

    train_transforms = Compose([Resize((480, 480)),
                                RandomFlip(),
                                ToTensor(),
                                Normalize()])
    val_transforms = Compose([Resize((480, 480)),
                                ToTensor(),
                                Normalize()])
    # train_dataset_1 = VocDataset(args.train_root, year='2012', image_set='trainval', voc_transforms=train_transforms)
    train_dataset = VocDataset(args.train_root, year='2007', image_set='trainval', voc_transforms=train_transforms)
    # train_dataset = ConcatDataset([train_dataset_1, train_dataset_2])
    val_dataset = VocDataset(args.val_root, year='2007', image_set='test', voc_transforms=val_transforms)

    if args.dist:
        train_dataloader = DataLoader(train_dataset, args.batch_size, shuffle=False, \
            num_workers=8, collate_fn=collate_fn_voc, pin_memory=True, sampler=DistributedSampler(train_dataset))
        val_dataloader = DataLoader(val_dataset, args.batch_size, shuffle=False, \
            num_workers=8, collate_fn=collate_fn_voc, pin_memory=True, sampler=DistributedSampler(val_dataset))
    else:
        train_dataloader = DataLoader(train_dataset, args.batch_size, shuffle=True, num_workers=8, collate_fn=collate_fn_voc)
        val_dataloader = DataLoader(val_dataset, args.batch_size, shuffle=False, num_workers=8, collate_fn=collate_fn_voc)

    backbone, encoder, decoder = PResNet(), HybridEncoder(), RTDETRTransformer()
    criterion = HungarianMatch()
    model = RTDETR(backbone, encoder, decoder)

    param = [{'params': [p for n, p in model.named_parameters() \
                if 'backbone' in n and p.requires_grad], 'lr': args.backbone_lr},
             {'params': [p for n, p in model.named_parameters() \
                if 'encoder' in n and ('bias' in n or 'norm' in n) and p.requires_grad], 'weight_decay': 0.},
             {'params': [p for n, p in model.named_parameters() \
                if 'decoder' in n and ('bias' in n or 'norm' in n) and p.requires_grad], 'weight_decay': 0.},
             {'params': [p for n, p in model.named_parameters() \
                if 'backbone' not in n and 'norm' not in n and 'bias' not in n and p.requires_grad]}]
    # param = [{'params': [p for n, p in model.named_parameters() \
    #         if 'backbone' in n and p.requires_grad], 'lr': args.backbone_lr},
    #         {'params': [p for n, p in model.named_parameters() \
    #         if 'backbone' not in n and p.requires_grad]}]

    optimizer = torch.optim.SGD(param, args.lr, weight_decay=0.0001)
    steplr = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizer, milestones=[1000], gamma=0.1)

    
    trainer = Trainer(model, train_dataloader, val_dataloader, \
                                optimizer, rank, criterion, steplr)
    trainer.train(max_epoch=args.epochs)
    tdist.destroy_process_group()


def dist_init():
    tdist.init_process_group(backend='nccl', init_method="env://")
    rank = tdist.get_rank()
    torch.cuda.set_device(rank)
    tdist.barrier()
    if rank == 0: 
        print("Initialized distributed mode!")
    return rank


def set_seed(seed):
    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--batch_size", default=16, type=int)
    # parser.add_argument("--device", default='cuda', type=str)
    parser.add_argument("--train_root", default='/mnt/sdb2/ray/rtdetr-implement/', type=str)
    parser.add_argument("--val_root", default='/mnt/sdb2/ray/rtdetr-implement/', type=str)

    parser.add_argument("--save_json_root", default='/mnt/sdb2/ray/rtdetr-implement/voc/output/prediction/', type=str)
    parser.add_argument("--save_pth_root", default='/mnt/sdb2/ray/rtdetr-implement/voc/output/weight/', type=str)
    parser.add_argument("--backbone_lr", default=0.01, type=float)
    parser.add_argument("--lr", default=0.01, type=float)
    parser.add_argument("--epochs", default=100, type=int)
    parser.add_argument("--num_query", default=300, type=int)
    parser.add_argument("--dist", action='store_true', help="启用分布式训练")
    parser.add_argument("--seed", default=42, type=int, help="fix for reproduce")
    # parser.add_argument("--resume", default=None, type=str)
    args = parser.parse_args()
    main(args)
    # SGD(lr=0.01, momentum=0.9) with parameter groups 145 weight(decay=0.0), 208 weight(decay=0.0005), 228 bias(decay=0.0)