
from config import args as args_config
import time
import random
import os
os.environ["CUDA_VISIBLE_DEVICES"] = args_config.gpus
os.environ["MASTER_ADDR"] = args_config.address
os.environ["MASTER_PORT"] = args_config.port

import json
import numpy as np
from tqdm import tqdm

import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
torch.autograd.set_detect_anomaly(True)

import utility
from model.completionformer import CompletionFormer
from summary.cfsummary import CompletionFormerSummary
from metric.cfmetric import CompletionFormerMetric
from data import get as get_data
from loss.l1l2loss import L1L2Loss

# Multi-GPU and Mixed precision supports
# NOTE : Only 1 process per GPU is supported now
import torch.multiprocessing as mp
import torch.distributed as dist
import apex
from apex.parallel import DistributedDataParallel as DDP
from apex import amp

torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False

# Minimize randomness
def init_seed(seed=None):
    if seed is None:
        seed = args_config.seed

    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.cuda.manual_seed_all(seed)

# wkl
def count_parameters(model):
    total_params = sum(p.numel() for p in model.parameters())  # 总参数量
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)  # 可训练参数量
    return total_params, trainable_params

def check_args(args):
    new_args = args
    if args.pretrain is not None:
        assert os.path.exists(args.pretrain), \
            "file not found: {}".format(args.pretrain)
        
        # 从预先训练模型中恢复参数出来，包括学习率lr，原来的代码中的内容
        # if args.resume:
        #     checkpoint = torch.load(args.pretrain)
        #     new_args = checkpoint['args']
        #     new_args.test_only = args.test_only
        #     new_args.pretrain = args.pretrain
        #     new_args.dir_data = args.dir_data
        #     new_args.resume = args.resume


        # 打印 args 中的所有参数
    print("\n === Arguments === 打印 args 中的所有参数 ====")
    for arg in vars(new_args):
        print(f"{arg}: {getattr(new_args, arg)}")

    return new_args


def train(gpu, args):
    # Initialize workers
    # NOTE : the worker with gpu=0 will do logging
    dist.init_process_group(backend='nccl', init_method='env://',
                            world_size=args.num_gpus, rank=gpu)
    torch.cuda.set_device(gpu)

    # Prepare dataset
    data = get_data(args)

    data_train = data(args, 'train')
    data_val = data(args, 'val')

    sampler_train = DistributedSampler(
        data_train, num_replicas=args.num_gpus, rank=gpu)

    batch_size = args.batch_size

    loader_train = DataLoader(
        dataset=data_train, batch_size=batch_size, shuffle=False,
        num_workers=args.num_threads, pin_memory=True, sampler=sampler_train,
        drop_last=True)
    loader_val = DataLoader(
        dataset=data_val, batch_size=1, shuffle=False,
        num_workers=4, drop_last=False)

    if gpu == 0:
        print(f'Each GPU with training data {len(loader_train)}, validation data {len(loader_val)}!')

    print("----------------building network------------------")
    # Network
    if args.model == 'CompletionFormer':
        net = CompletionFormer(args)
    else:
        raise TypeError(args.model, ['CompletionFormer',])
    net.cuda(gpu)

    print("---------------end ok -------------------")

    if gpu == 0:
        # # 打印 args 中的所有参数
        # print("\n === -------------- args -------------------- ====")
        # for arg in vars(args):
        #     print(f"{arg}: {getattr(args, arg)}")

        if args.pretrain is not None:
            assert os.path.exists(args.pretrain), \
                "file not found: {}".format(args.pretrain)
            checkpoint = torch.load(args.pretrain)
            net.load_state_dict(checkpoint['net'])
            print('Load network parameters from : {}'.format(args.pretrain))

            # # 打印 args 中的所有参数
            # print("\n === Arguments === 打印 args 中的所有参数 ====")
            # for arg in vars(args):
            #     print(f"{arg}: {getattr(args, arg)}")

    # Loss
    loss = L1L2Loss(args)
    loss.cuda(gpu)

    # Optimizer
    optimizer, scheduler = utility.make_optimizer_scheduler(args, net, len(loader_train))

    net = apex.parallel.convert_syncbn_model(net)
    net, optimizer = amp.initialize(net, optimizer, opt_level=args.opt_level, verbosity=0)

    if gpu == 0:
        if args.pretrain is not None:
            if args.resume:
                print(f"中断后继续训练----")

                optimizer, scheduler = utility.make_optimizer_scheduler(args, net, len(loader_train))
#下面是原本的代码已经被注释 上面一行是后来添加的

                # try:
                #     optimizer.load_state_dict(checkpoint['optimizer'])
                #     scheduler.load_state_dict(checkpoint['scheduler'])
                #     amp.load_state_dict(checkpoint['amp'])

                #     print('Resume optimizer, scheduler and amp '
                #           'from : {}'.format(args.pretrain))
                # except KeyError:
                #     print('State dicts for resume are not saved. '
                #           'Use --save_full argument')

            del checkpoint

    net = DDP(net)

    metric = CompletionFormerMetric(args)

    total_params, trainable_params = count_parameters(net)
    print(f"Total parameters 参数。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。: {total_params}")
    print(f"Trainable parameters: {trainable_params}")
    # return 0

    # wkl 1mamba 下采样 no1111  1mambadec nlspn
    # Total parameters 参数。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。: 814266
    # Trainable parameters: 814255


    # wkl 1mamba 下采样 no1111  2mambadec nlspn
    # Total parameters 参数。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。: 847034
    # Trainable parameters: 847023

    # swin
    # Total parameters 参数。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。: 94564570
    # Trainable parameters: 94564559

    # umamba
    # Total parameters 参数。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。: 92149338   
    # Trainable parameters: 92149327

    # no tranformer and mlp but have mamba
    # 69442138  

    # no mamba have transformer mlp
    # 82441818
    # return 0

    if gpu == 0:
        print('\n' + '='*40 + '\n')
        # print(net)
        print('\n' + '='*40 + '\n')
        utility.backup_source_code(args.save_dir + '/code')
        try:
            os.makedirs(args.save_dir, exist_ok=True)
            os.makedirs(args.save_dir + '/train', exist_ok=True)
            os.makedirs(args.save_dir + '/val', exist_ok=True)
        except OSError:
            pass

    if gpu == 0:
        writer_train = CompletionFormerSummary(args.save_dir, 'train', args,
                            loss.loss_name, metric.metric_name)
        writer_val = CompletionFormerSummary(args.save_dir, 'val', args,
                            loss.loss_name, metric.metric_name)

        with open(args.save_dir + '/args.json', 'w') as args_json:
            json.dump(args.__dict__, args_json, indent=4)

    if args.warm_up:
        warm_up_cnt = 0.0
        warm_up_max_cnt = len(loader_train)+1.0

    for epoch in range(1, args.epochs+1):
        # Train
        net.train()

        sampler_train.set_epoch(epoch)

        # if gpu == 0:
        #     current_time = time.strftime('%y%m%d@%H:%M:%S')
        #     list_lr = []
        #     for g in optimizer.param_groups:
        #         list_lr.append(g['lr'])
        #     print('=== Epoch {:5d} / {:5d} | Lr : {} | {} | {} ==='.format(
        #         epoch, args.epochs, list_lr, current_time, args.save_dir))

        if gpu == 0:
            current_time = time.strftime('%y%m%d@%H:%M:%S')
            list_lr = []
            for g in optimizer.param_groups:
                list_lr.append(f"{g['lr']:.10f}")  # 保留 10 位小数
            print('=== Epoch {:5d} / {:5d} | Lr : {} | {} | {} ==='.format(
                epoch, args.epochs, list_lr, current_time, args.save_dir))

        num_sample = len(loader_train) * loader_train.batch_size * args.num_gpus

        if gpu == 0:
            pbar = tqdm(total=num_sample)
            log_cnt = 0.0
            log_loss = 0.0

        init_seed(seed=int(time.time()))
        for batch, sample in enumerate(loader_train):
            sample = {key: val.cuda(gpu) for key, val in sample.items()
                      if val is not None}

            if epoch == 1 and args.warm_up:
                warm_up_cnt += 1
# initial_lr        lr_warm_up = param_group['initial_lr'] \         预热训练
                for param_group in optimizer.param_groups:
                    lr_warm_up = param_group['initial_lr'] \
                                 * warm_up_cnt / warm_up_max_cnt
                    param_group['lr'] = lr_warm_up

            optimizer.zero_grad()

            output = net(sample)

            loss_sum, loss_val = loss(sample, output)

            # Divide by batch size
            loss_sum = loss_sum / loader_train.batch_size
            loss_val = loss_val / loader_train.batch_size

            with amp.scale_loss(loss_sum, optimizer) as scaled_loss:
                scaled_loss.backward()

            optimizer.step()

            if gpu == 0:
                metric_val = metric.evaluate(sample, output, 'train')
                writer_train.add(loss_val, metric_val)

                log_cnt += 1
                log_loss += loss_sum.item()

                current_time = time.strftime('%y%m%d@%H:%M:%S')
                error_str = '{:<10s}| {} | Loss = {:.4f}'.format(
                    'Train', current_time, log_loss / log_cnt)

                # if epoch == 1 and args.warm_up:
                #     list_lr = []
                #     for g in optimizer.param_groups:
                #         list_lr.append(round(g['lr'], 6))
                #     error_str = '{} | Lr Warm Up : {}'.format(error_str, list_lr)
                                                             
                if epoch == 1 and args.warm_up:
                    list_lr = []
                    for g in optimizer.param_groups:
                        formatted_lr = f"{g['lr']:.15f}"  # 使用格式化字符串保留 15 位小数
                        list_lr.append(formatted_lr)
                    error_str = '{} | Lr Warm Up : {}'.format(error_str, list_lr)

                else:
                    list_lr = []
                    for g in optimizer.param_groups:
                        formatted_lr = f"{g['lr']:.15f}"  # 使用格式化字符串保留 15 位小数
                        list_lr.append(formatted_lr)
                    error_str = '{} | Lr : {}'.format(error_str, list_lr)

                if batch % args.print_freq == 0:
                    pbar.set_description(error_str)
                    pbar.update(loader_train.batch_size * args.num_gpus)

        if gpu == 0:
            pbar.close()

            writer_train.update(epoch, sample, output)

            if args.save_full or epoch == args.epochs:
                # print("全部保存")
                state = {
                    'net': net.module.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'scheduler': scheduler.state_dict(),
                    'amp': amp.state_dict(),
                    'args': args
                }
            else:
                # print("非全保存")
                state = {
                    'net': net.module.state_dict(),
                    'args': args
                }

            torch.save(state, '{}/model_{:05d}.pt'.format(args.save_dir, epoch))

        # Val
        torch.set_grad_enabled(False)
        net.eval()

        num_sample = len(loader_val) * loader_val.batch_size
        if gpu == 0:
            pbar = tqdm(total=num_sample)
        log_cnt = 0.0
        log_loss = 0.0

        init_seed()
        for batch, sample in enumerate(loader_val):
            sample = {key: val.cuda(gpu) for key, val in sample.items()
                      if val is not None}

            with torch.no_grad():
                output = net(sample)

            loss_sum, loss_val = loss(sample, output)

            # Divide by batch size
            loss_sum = loss_sum / loader_val.batch_size
            loss_val = loss_val / loader_val.batch_size

            if gpu == 0:
                metric_val = metric.evaluate(sample, output, 'val')
                writer_val.add(loss_val, metric_val)

                log_cnt += 1
                log_loss += loss_sum.item()

                current_time = time.strftime('%y%m%d@%H:%M:%S')
                error_str = '{:<10s}| {} | Loss = {:.4f}'.format(
                    'Val', current_time, log_loss / log_cnt)
                if batch % args.print_freq == 0:
                    pbar.set_description(error_str)
                    pbar.update(loader_val.batch_size)

        if gpu == 0:
            pbar.close()

            writer_val.update(epoch, sample, output)
            writer_val.save(epoch, batch, sample, output)

        torch.set_grad_enabled(True)
        
    #     print(f"Loss Tensor: {loss_val}")
    #     print(f"Loss Tensor Shape: {loss_val.shape}")
    #     print(f"Loss Tensor Mean: {torch.mean(loss_val).item()}")
    #     print(f"Loss Tensor Sum: {torch.sum(loss_val).item()}")

        # scheduler.step(loss_val.mean().item())
        scheduler.step()

        # print(f"Total parameters 参数。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。: {total_params}")
        # print(f"Trainable parameters: {trainable_params}")


def test(args):
    # Prepare dataset
    data = get_data(args)

    data_test = data(args, 'test')

    loader_test = DataLoader(dataset=data_test, batch_size=1,
                             shuffle=False, num_workers=args.num_threads)

    # Network
    if args.model == 'CompletionFormer':
        net = CompletionFormer(args)
    else:
        raise TypeError(args.model, ['CompletionFormer',])
    net.cuda()

    if args.pretrain is not None:
        assert os.path.exists(args.pretrain), \
            "file not found: {}".format(args.pretrain)

        checkpoint = torch.load(args.pretrain)
        key_m, key_u = net.load_state_dict(checkpoint['net'], strict=False)

        if key_u:
            print('Unexpected keys :')
            print(key_u)

        if key_m:
            print('Missing keys :')
            print(key_m)
            raise KeyError
        print('Checkpoint loaded from {}!'.format(args.pretrain))

    net = nn.DataParallel(net)

    # total_params, trainable_params = count_parameters(net)
    # print(f"Total parameters 参数。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。: {total_params}")
    # print(f"Trainable parameters: {trainable_params}")
    # return 0

    metric = CompletionFormerMetric(args)

    try:
        os.makedirs(args.save_dir, exist_ok=True)
        os.makedirs(args.save_dir + '/test', exist_ok=True)
    except OSError:
        pass

    writer_test = CompletionFormerSummary(args.save_dir, 'test', args, None, metric.metric_name)

    net.eval()

    num_sample = len(loader_test)*loader_test.batch_size

    pbar = tqdm(total=num_sample)

    t_total = 0

    init_seed()
    for batch, sample in enumerate(loader_test):
        sample = {key: val.cuda() for key, val in sample.items()
                  if val is not None}

        t0 = time.time()
        with torch.no_grad():
            output = net(sample)
        
        # wkl
        # torch.cuda.empty_cache()

        t1 = time.time()

        t_total += (t1 - t0)

        metric_val = metric.evaluate(sample, output, 'test')

        writer_test.add(None, metric_val)

        # Save data for analysis
        if args.save_image:
            writer_test.save(args.epochs, batch, sample, output)

        current_time = time.strftime('%y%m%d@%H:%M:%S')
        error_str = '{} | Test'.format(current_time)
        if batch % args.print_freq == 0:
            pbar.set_description(error_str)
            pbar.update(loader_test.batch_size)
    # del output
    pbar.close()

    writer_test.update(args.epochs, sample, output)
    t_avg = t_total / num_sample
    print('Elapsed time : {} sec, '
          'Average processing time : {} sec'.format(t_total, t_avg))
    
    # total_params, trainable_params = count_parameters(net)
    # print(f"Total parameters 参数。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。: {total_params}")
    # print(f"Trainable parameters: {trainable_params}")
    

# def test(args):
#     # Prepare dataset
#     data = get_data(args)

#     data_test = data(args, 'test')

#     loader_test = DataLoader(dataset=data_test, batch_size=1,
#                              shuffle=False, num_workers=args.num_threads)

#     # Network
#     if args.model == 'CompletionFormer':
#         net = CompletionFormer(args)
#     else:
#         raise TypeError(args.model, ['CompletionFormer',])
#     net.cuda()

#     if args.pretrain is not None:
#         assert os.path.exists(args.pretrain), \
#             "file not found: {}".format(args.pretrain)

#         checkpoint = torch.load(args.pretrain)
#         key_m, key_u = net.load_state_dict(checkpoint['net'], strict=False)

#         if key_u:
#             print('Unexpected keys :')
#             print(key_u)

#         if key_m:
#             print('Missing keys :')
#             print(key_m)
#             raise KeyError
#         print('Checkpoint loaded from {}!'.format(args.pretrain))

#     net = nn.DataParallel(net)

#     # total_params, trainable_params = count_parameters(net)
#     # print(f"Total parameters 参数。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。: {total_params}")
#     # print(f"Trainable parameters: {trainable_params}")
#     # return 0

#     metric = CompletionFormerMetric(args)

#     try:
#         os.makedirs(args.save_dir, exist_ok=True)
#         os.makedirs(args.save_dir + '/test', exist_ok=True)
#     except OSError:
#         pass

#     writer_test = CompletionFormerSummary(args.save_dir, 'test', args, None, metric.metric_name)

#     net.eval()
#     num_sample = len(loader_test)*loader_test.batch_size
#     pbar = tqdm(total=num_sample)
#     t_total = 0
#     init_seed()

#     with torch.profiler.profile(
#         activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA],
#         record_shapes=True,
#         profile_memory=True,
#         with_stack=True
#     ) as prof:
#         for batch, sample in enumerate(loader_test):
#             sample = {key: val.cuda() for key, val in sample.items()
#                     if val is not None}
#             t0 = time.time()
#             with torch.no_grad():
#                 output = net(sample)
#             # wkl
#             # torch.cuda.empty_cache()
#             t1 = time.time()
#             t_total += (t1 - t0)

#             # 记录 Profiler 的步骤
#             # prof.step()

#             metric_val = metric.evaluate(sample, output, 'test')
#             writer_test.add(None, metric_val)
#             # Save data for analysis
#             if args.save_image:
#                 writer_test.save(args.epochs, batch, sample, output)

#             current_time = time.strftime('%y%m%d@%H:%M:%S')
#             error_str = '{} | Test'.format(current_time)
#             if batch % args.print_freq == 0:
#                 pbar.set_description(error_str)
#                 pbar.update(loader_test.batch_size)

#     # 打印 Profiler 的结果
#     # print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))

#     # del output
#     pbar.close()

#     writer_test.update(args.epochs, sample, output)
#     t_avg = t_total / num_sample
#     print('Elapsed time : {} sec, '
#           'Average processing time : {} sec'.format(t_total, t_avg))
    
#     # total_params, trainable_params = count_parameters(net)
#     # print(f"Total parameters 参数。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。: {total_params}")
#     # print(f"Trainable parameters: {trainable_params}")
    


def main(args):
    init_seed()

    if not args.test_only:
        if args.no_multiprocessing:
            train(0, args)
        else:
            assert args.num_gpus > 0

            spawn_context = mp.spawn(train, nprocs=args.num_gpus, args=(args,),
                                     join=False)

            while not spawn_context.join():
                pass

            for process in spawn_context.processes:
                if process.is_alive():
                    process.terminate()
                process.join()

        args.pretrain = '{}/model_{:05d}.pt'.format(args.save_dir, args.epochs)

    test(args)

 
if __name__ == '__main__':
    args_main = check_args(args_config)

    print('\n\n=== Arguments ===')
    cnt = 0
    for key in sorted(vars(args_main)):
        print(key, ':',  getattr(args_main, key), end='  |  ')
        cnt += 1
        if (cnt + 1) % 5 == 0:
            print('')
    print('\n')

    main(args_main)


































# """
#     CompletionFormer
#     ======================================================================

#     main script for training and testing.
# """


# from config import args as args_config
# import time
# import random
# import os
# os.environ["CUDA_VISIBLE_DEVICES"] = args_config.gpus
# os.environ["MASTER_ADDR"] = args_config.address
# os.environ["MASTER_PORT"] = args_config.port

# import json
# import numpy as np
# from tqdm import tqdm

# import torch
# from torch import nn
# from torch.utils.data import DataLoader
# from torch.utils.data.distributed import DistributedSampler
# torch.autograd.set_detect_anomaly(True)

# import utility
# from model.completionformer import CompletionFormer
# from summary.cfsummary import CompletionFormerSummary
# from metric.cfmetric import CompletionFormerMetric
# from data import get as get_data
# from loss.l1l2loss import L1L2Loss

# # Multi-GPU and Mixed precision supports
# # NOTE : Only 1 process per GPU is supported now
# import torch.multiprocessing as mp
# import torch.distributed as dist
# # import apex
# # from apex.parallel import DistributedDataParallel as DDP
# # from apex import amp
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False

# # Minimize randomness
# def init_seed(seed=None):
#     if seed is None:
#         seed = args_config.seed

#     torch.manual_seed(seed)
#     np.random.seed(seed)
#     random.seed(seed)
#     torch.cuda.manual_seed_all(seed)

# # wkl
# def count_parameters(model):
#     total_params = sum(p.numel() for p in model.parameters())  # 总参数量
#     trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)  # 可训练参数量
#     return total_params, trainable_params

# def check_args(args):
#     new_args = args
#     if args.pretrain is not None:
#         assert os.path.exists(args.pretrain), \
#             "file not found: {}".format(args.pretrain)

#         if args.resume:
#             checkpoint = torch.load(args.pretrain)

#             new_args = checkpoint['args']
#             new_args.test_only = args.test_only
#             new_args.pretrain = args.pretrain
#             new_args.dir_data = args.dir_data
#             new_args.resume = args.resume

#     return new_args


# def train(gpu, args):
#     # Initialize workers
#     # # NOTE : the worker with gpu=0 will do logging
#     # dist.init_process_group(backend='nccl', init_method='env://',
#     #                         world_size=args.num_gpus, rank=gpu)
#     torch.cuda.set_device(gpu)

#     # Prepare dataset
#     data = get_data(args)

#     data_train = data(args, 'train')
#     data_val = data(args, 'val')

#     sampler_train = DistributedSampler(
#         data_train, num_replicas=args.num_gpus, rank=gpu)

#     batch_size = args.batch_size

#     loader_train = DataLoader(
#         dataset=data_train, batch_size=batch_size, shuffle=False,
#         num_workers=args.num_threads, pin_memory=True, sampler=sampler_train,
#         drop_last=True)
#     loader_val = DataLoader(
#         dataset=data_val, batch_size=1, shuffle=False,
#         num_workers=4, drop_last=False)

#     if gpu == 0:
#         print(f'Each GPU with training data {len(loader_train)}, validation data {len(loader_val)}!')

#     # Network
#     if args.model == 'CompletionFormer':
#         net = CompletionFormer(args)
#     else:
#         raise TypeError(args.model, ['CompletionFormer',])
#     net.cuda(gpu)


#     if gpu == 0:
#         if args.pretrain is not None:
#             assert os.path.exists(args.pretrain), \
#                 "file not found: {}".format(args.pretrain)
#             checkpoint = torch.load(args.pretrain)
#             net.load_state_dict(checkpoint['net'])
#             print('Load network parameters from : {}'.format(args.pretrain))

#     # Loss
#     loss = L1L2Loss(args)
#     loss.cuda(gpu)

#     # Optimizer
#     optimizer, scheduler = utility.make_optimizer_scheduler(args, net, len(loader_train))
#     # net = apex.parallel.convert_syncbn_model(net)
#     # net, optimizer = amp.initialize(net, optimizer, opt_level=args.opt_level, verbosity=0)
#     # wkl
#     # scaler = torch.cuda.amp.GradScaler()

#     if gpu == 0:
#         if args.pretrain is not None:
#             if args.resume:
#                 try:
#                     optimizer.load_state_dict(checkpoint['optimizer'])
#                     scheduler.load_state_dict(checkpoint['scheduler'])
#                     # amp.load_state_dict(checkpoint['amp'])

#                     print('Resume optimizer, scheduler and amp '
#                           'from : {}'.format(args.pretrain))
#                 except KeyError:
#                     print('State dicts for resume are not saved. '
#                           'Use --save_full argument')

#             del checkpoint

#     # net = DDP(net)
#     # wkl
#     net = net.cuda()


#     metric = CompletionFormerMetric(args)

#     total_params, trainable_params = count_parameters(net)
#     print(f"Total parameters 参数。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。: {total_params}")
#     print(f"Trainable parameters: {trainable_params}")
#     # return 0

#     # wkl 1mamba 下采样 no1111  1mambadec nlspn
#     # Total parameters 参数。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。: 814266
#     # Trainable parameters: 814255


#     # wkl 1mamba 下采样 no1111  2mambadec nlspn
#     # Total parameters 参数。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。: 847034
#     # Trainable parameters: 847023

#     # swin
#     # Total parameters 参数。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。: 94564570
#     # Trainable parameters: 94564559

#     # umamba
#     # Total parameters 参数。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。: 92149338   
#     # Trainable parameters: 92149327

#     # no tranformer and mlp but have mamba
#     # 69442138  

#     # no mamba have transformer mlp
#     # 82441818
#     # return 0

#     if gpu == 0:
#         print('\n' + '='*40 + '\n')
#         print(net)
#         print('\n' + '='*40 + '\n')
#         utility.backup_source_code(args.save_dir + '/code')
#         try:
#             os.makedirs(args.save_dir, exist_ok=True)
#             os.makedirs(args.save_dir + '/train', exist_ok=True)
#             os.makedirs(args.save_dir + '/val', exist_ok=True)
#         except OSError:
#             pass

#     if gpu == 0:
#         writer_train = CompletionFormerSummary(args.save_dir, 'train', args,
#                             loss.loss_name, metric.metric_name)
#         writer_val = CompletionFormerSummary(args.save_dir, 'val', args,
#                             loss.loss_name, metric.metric_name)

#         with open(args.save_dir + '/args.json', 'w') as args_json:
#             json.dump(args.__dict__, args_json, indent=4)

#     if args.warm_up:
#         warm_up_cnt = 0.0
#         warm_up_max_cnt = len(loader_train)+1.0

#     for epoch in range(1, args.epochs+1):
#         # Train
#         net.train()

#         sampler_train.set_epoch(epoch)

#         if gpu == 0:
#             current_time = time.strftime('%y%m%d@%H:%M:%S')

#             list_lr = []
#             for g in optimizer.param_groups:
#                 list_lr.append(g['lr'])

#             print('=== Epoch {:5d} / {:5d} | Lr : {} | {} | {} ==='.format(
#                 epoch, args.epochs, list_lr, current_time, args.save_dir
#             ))

#         num_sample = len(loader_train) * loader_train.batch_size * args.num_gpus

#         if gpu == 0:
#             pbar = tqdm(total=num_sample)
#             log_cnt = 0.0
#             log_loss = 0.0

#         init_seed(seed=int(time.time()))
#         for batch, sample in enumerate(loader_train):
#             sample = {key: val.cuda(gpu) for key, val in sample.items()
#                       if val is not None}

#             if epoch == 1 and args.warm_up:
#                 warm_up_cnt += 1

#                 for param_group in optimizer.param_groups:
#                     lr_warm_up = param_group['initial_lr'] \
#                                  * warm_up_cnt / warm_up_max_cnt
#                     param_group['lr'] = lr_warm_up

#             optimizer.zero_grad()

#             output = net(sample)

#             loss_sum, loss_val = loss(sample, output)

#             # Divide by batch size
#             loss_sum = loss_sum / loader_train.batch_size
#             loss_val = loss_val / loader_train.batch_size

#             # with amp.scale_loss(loss_sum, optimizer) as scaled_loss:
#             #     scaled_loss.backward()
#             # wkl
#             # with torch.cuda.amp.autocast():
#             #     output = net(sample)
#             #     loss_sum, loss_val = loss(sample, output)
#             #     loss_sum = loss_sum / loader_train.batch_size
#             #     loss_val = loss_val / loader_train.batch_size
#             output = net(sample)
#             loss_sum, loss_val = loss(sample, output)
#             loss_sum = loss_sum / loader_train.batch_size
#             loss_val = loss_val / loader_train.batch_size

#             # scaler.scale(loss_sum).backward()
#             # scaler.step(optimizer)
#             # scaler.update()
#             # wkl
#             loss_sum.backward()
#             optimizer.step()

#             optimizer.step()

#             if gpu == 0:
#                 metric_val = metric.evaluate(sample, output, 'train')
#                 writer_train.add(loss_val, metric_val)

#                 log_cnt += 1
#                 log_loss += loss_sum.item()

#                 current_time = time.strftime('%y%m%d@%H:%M:%S')
#                 error_str = '{:<10s}| {} | Loss = {:.4f}'.format(
#                     'Train', current_time, log_loss / log_cnt)

#                 if epoch == 1 and args.warm_up:
#                     list_lr = []
#                     for g in optimizer.param_groups:
#                         list_lr.append(round(g['lr'], 6))
#                     error_str = '{} | Lr Warm Up : {}'.format(error_str,
#                                                               list_lr)
#                 else:
#                     list_lr = []
#                     for g in optimizer.param_groups:
#                         list_lr.append(round(g['lr'], 6))
#                     error_str = '{} | Lr : {}'.format(error_str, list_lr)

#                 if batch % args.print_freq == 0:
#                     pbar.set_description(error_str)
#                     pbar.update(loader_train.batch_size * args.num_gpus)

#         if gpu == 0:
#             pbar.close()

#             writer_train.update(epoch, sample, output)

#             # if args.save_full or epoch == args.epochs:
#             #     state = {
#             #         'net': net.module.state_dict(),
#             #         'optimizer': optimizer.state_dict(),
#             #         'scheduler': scheduler.state_dict(),
#             #         # 'amp': amp.state_dict(),
#             #         'args': args
#             #     }
#             # else:
#             #     state = {
#             #         'net': net.module.state_dict(),
#             #         'args': args
#             #     }
#             # 检查 net 是否有 module 属性
#             if hasattr(net, 'module'):
#                 net_state_dict = net.module.state_dict()
#             else:
#                 net_state_dict = net.state_dict()

#             if args.save_full or epoch == args.epochs:
#                 state = {
#                     'net': net_state_dict,
#                     'optimizer': optimizer.state_dict(),
#                     'scheduler': scheduler.state_dict(),
#                     'args': args
#                 }
#             else:
#                 state = {
#                     'net': net_state_dict,
#                     'args': args
#                 }

#             torch.save(state, '{}/model_{:05d}.pt'.format(args.save_dir, epoch))

#         # Val
#         # torch.set_grad_enabled(False)
#         # net.eval()

#         num_sample = len(loader_val) * loader_val.batch_size
#         if gpu == 0:
#             pbar = tqdm(total=num_sample)
#         log_cnt = 0.0
#         log_loss = 0.0

#         init_seed()
#         for batch, sample in enumerate(loader_val):
#             sample = {key: val.cuda(gpu) for key, val in sample.items()
#                       if val is not None}

#             with torch.no_grad():
#                 output = net(sample)

#             loss_sum, loss_val = loss(sample, output)

#             # Divide by batch size
#             loss_sum = loss_sum / loader_val.batch_size
#             loss_val = loss_val / loader_val.batch_size

#             if gpu == 0:
#                 metric_val = metric.evaluate(sample, output, 'val')
#                 writer_val.add(loss_val, metric_val)

#                 log_cnt += 1
#                 log_loss += loss_sum.item()

#                 current_time = time.strftime('%y%m%d@%H:%M:%S')
#                 error_str = '{:<10s}| {} | Loss = {:.4f}'.format(
#                     'Val', current_time, log_loss / log_cnt)
#                 if batch % args.print_freq == 0:
#                     pbar.set_description(error_str)
#                     pbar.update(loader_val.batch_size)

#         if gpu == 0:
#             pbar.close()

#             writer_val.update(epoch, sample, output)
#             writer_val.save(epoch, batch, sample, output)

#         torch.set_grad_enabled(True)

#         scheduler.step()

#         # print(f"Total parameters 参数。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。: {total_params}")
#         # print(f"Trainable parameters: {trainable_params}")


# def test(args):
#     # Prepare dataset
#     data = get_data(args)

#     data_test = data(args, 'test')

#     loader_test = DataLoader(dataset=data_test, batch_size=1,
#                              shuffle=False, num_workers=args.num_threads)

#     # Network
#     if args.model == 'CompletionFormer':
#         net = CompletionFormer(args)
#     else:
#         raise TypeError(args.model, ['CompletionFormer',])
#     net.cuda()

#     if args.pretrain is not None:
#         assert os.path.exists(args.pretrain), \
#             "file not found: {}".format(args.pretrain)

#         checkpoint = torch.load(args.pretrain)
#         key_m, key_u = net.load_state_dict(checkpoint['net'], strict=False)

#         if key_u:
#             print('Unexpected keys :')
#             print(key_u)

#         if key_m:
#             print('Missing keys :')
#             print(key_m)
#             raise KeyError
#         print('Checkpoint loaded from {}!'.format(args.pretrain))

#     net = nn.DataParallel(net)

#     # total_params, trainable_params = count_parameters(net)
#     # print(f"Total parameters 参数。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。: {total_params}")
#     # print(f"Trainable parameters: {trainable_params}")
#     # return 0

#     metric = CompletionFormerMetric(args)

#     try:
#         os.makedirs(args.save_dir, exist_ok=True)
#         os.makedirs(args.save_dir + '/test', exist_ok=True)
#     except OSError:
#         pass

#     writer_test = CompletionFormerSummary(args.save_dir, 'test', args, None, metric.metric_name)

#     net.eval()

#     num_sample = len(loader_test)*loader_test.batch_size

#     pbar = tqdm(total=num_sample)

#     t_total = 0

#     init_seed()
#     for batch, sample in enumerate(loader_test):
#         sample = {key: val.cuda() for key, val in sample.items()
#                   if val is not None}

#         t0 = time.time()
#         with torch.no_grad():
#             output = net(sample)
        
#         # wkl
#         # torch.cuda.empty_cache()

#         t1 = time.time()

#         t_total += (t1 - t0)

#         metric_val = metric.evaluate(sample, output, 'test')

#         writer_test.add(None, metric_val)

#         # Save data for analysis
#         if args.save_image:
#             writer_test.save(args.epochs, batch, sample, output)

#         current_time = time.strftime('%y%m%d@%H:%M:%S')
#         error_str = '{} | Test'.format(current_time)
#         if batch % args.print_freq == 0:
#             pbar.set_description(error_str)
#             pbar.update(loader_test.batch_size)
#     # del output
#     pbar.close()

#     writer_test.update(args.epochs, sample, output)
#     t_avg = t_total / num_sample
#     print('Elapsed time : {} sec, '
#           'Average processing time : {} sec'.format(t_total, t_avg))
    
#     # total_params, trainable_params = count_parameters(net)
#     # print(f"Total parameters 参数。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。: {total_params}")
#     # print(f"Trainable parameters: {trainable_params}")
    


# def main(args):
#     init_seed()
#     if not args.test_only:
#         if args.no_multiprocessing:
#             train(0, args)
#         else:
#             assert args.num_gpus > 0

#             spawn_context = mp.spawn(train, nprocs=args.num_gpus, args=(args,),
#                                      join=False)

#             while not spawn_context.join():
#                 pass

#             for process in spawn_context.processes:
#                 if process.is_alive():
#                     process.terminate()
#                 process.join()

#         args.pretrain = '{}/model_{:05d}.pt'.format(args.save_dir, args.epochs)

#     test(args)


# if __name__ == '__main__':
#     args_main = check_args(args_config)

#     print('\n\n=== Arguments ===')
#     cnt = 0
#     for key in sorted(vars(args_main)):
#         print(key, ':',  getattr(args_main, key), end='  |  ')
#         cnt += 1
#         if (cnt + 1) % 5 == 0:
#             print('')
#     print('\n')

#     main(args_main)





