import argparse

import config
from models import build_model, build_mtl_model
from mtl_loss_schemes import compute_dwa_coefficients
import torch
import pynvml
import torch.nn as nn
import torch.nn.functional as F

def test_DWA():
    all_loss_weights = {
        # 'depth': 1.0,
        'semseg': 1.0,
        'normals': 10.0,
        'sal': 5.0,
        'human_parts': 2.0,
        # 'edge': 50.0,
    }
    print(all_loss_weights)
    history = {"semseg": [2.9293, 2.8871], "normals": [0.6741, 0.7031], "sal": [0.1785
        , 0.2150], "human_parts": [1.9094, 1.9725]}
    current = {"semseg": 2.9056, "normals": 0.6488, "sal": 0.1508, "human_parts": 1.8127}
    dwa_coefficients = compute_dwa_coefficients(history, current, T=1.0)
    print(dwa_coefficients)
    for task, w in all_loss_weights.items():
        all_loss_weights[task] = dwa_coefficients[task] * w
    print(all_loss_weights)


def test_model():
    """加载模型, 查看模型架构的代码"""
    parser = argparse.ArgumentParser(
        'Swin Transformer training and evaluation script', add_help=False)
    parser.add_argument('--cfg', type=str,
                        default="configs/mtlora/tiny_448/mtlora_tiny_448_r16_scale4_pertask.yaml",
                        metavar="FILE", help='', )
    parser.add_argument(
        "--opts",
        help="Modify config options by adding 'KEY VALUE' pairs. ",
        default=None,
        nargs='+',
    )
    parser.add_argument("--local_rank", type=int, default=0,
                        help='local rank for DistributedDataParallel')
    parser.add_argument('--tasks', type=str, default='depth',
                        help='List of tasks to run in MTL setup.')
    args = parser.parse_args()
    conf = config._C.clone()
    config.update_config(conf, args)
    model = build_model(conf)
    if conf.MTL:
        model = build_mtl_model(model, conf)
    print(model)


def get_least_used_gpu():
    pynvml.nvmlInit()
    device_count = pynvml.nvmlDeviceGetCount()
    free_mem = []
    for i in range(device_count):
        handle = pynvml.nvmlDeviceGetHandleByIndex(i)
        info = pynvml.nvmlDeviceGetMemoryInfo(handle)
        free_mem.append((info.free, i))
    # 按 free memory 降序，取第一个
    _, best_gpu = sorted(free_mem, reverse=True)[0]
    pynvml.nvmlShutdown()
    return best_gpu


if __name__ == '__main__':
    test_model()
