# import timm
# import importlib
# from utils.toolkit import NamespaceDict


# def get_learner(learner_name, args):
#     try:
#         module = importlib.import_module(f"learners.{learner_name}")
#         Learner = getattr(module, "Learner")
#     except ModuleNotFoundError:
#         raise ValueError(f"Unsupported learner name: {learner_name}")
#     except AttributeError:
#         raise ValueError(
#             f"Learner class not found in module: learners.{learner_name}"
#         )

#     return Learner(args)


# def get_backbone(
#     args,
# ):
#     name = args["backbone_type"]

#     tuning_config = get_tuning_config(args)
#     model = timm.create_model(
#         name,
#         num_classes=0,
#         global_pool=False,
#         drop_path_rate=0.0,
#         tuning_config=tuning_config,
#     )
#     model.out_dim = 768
#     return model.eval()


# def get_tuning_config(args, extra_params=None):
#     config = {
#         "embed_dim": 768,
#         "peft_name": args.get("peft_name", None),
#         "ffn_option": args.get("ffn_option", "none"),
#         "ffn_rank": args["ffn_rank"],
#         "ffn_adapter_scalar": args.get("ffn_adapter_scalar", 1.0),
#         "ffn_adapter_layernorm_option": args.get(
#             "ffn_adapter_layernorm_option", "none"
#         ),
#         "moe_topk": args.get("moe_topk", None),
#         "cam_visual": args["cam_visual"],
#         "_device": args["device"][0],
#     }
#     if extra_params:
#         config.update(extra_params)
#     return NamespaceDict(config)


# TODO JOKE
# if name in ["pretrained_vit_b16_224", "vit_base_patch16_224"]:
#     model = timm.create_model(
#         "vit_base_patch16_224", pretrained=True, num_classes=0
#     )
#     model.out_dim = 768
#     return model.eval()


# def get_backbone1(args):
#     name = args["backbone_type"]
#     peft_name = args.get("peft_name", "lora")
#     ffn_adapter_scalar = args.get("ffn_adapter_scalar", 1.0)
#     ffn_adapter_layernorm_option = args.get(
#         "ffn_adapter_layernorm_option", "none"
#     )
#     ffn_rank = args["ffn_rank"]
#     ffn_adapt = args.get("ffn_adapt", True)
#     cam_visual = args["cam_visual"]

#     if name == "pretrained_vit_b16_224" or name == "vit_base_patch16_224":
#         model = timm.create_model(
#             "vit_base_patch16_224", pretrained=True, num_classes=0
#         )
#         model.out_dim = 768
#         return model.eval()

#     elif (
#         name == "pretrained_vit_b16_224_in21k"
#         or name == "vit_base_patch16_224.augreg_in21k"
#         or name == "vit_base_patch16_224_in21k_slca"
#     ):
#         # model = timm.create_model(
#         #     "vit_base_patch16_224.augreg_in21k", pretrained=True, num_classes=0
#         # )
#         # model.out_dim = 768
#         # return model.eval()
#         from backbone import vit
#         from easydict import EasyDict

#         tuning_config = EasyDict(
#             vpt_on=False,
#             vpt_num=0,
#             _device=args["device"][0],
#             # visualization
#             cam_visual=cam_visual,
#         )
#         model_func = getattr(vit, name, None)
#         if model_func is not None:
#             model = model_func(
#                 num_classes=0,
#                 global_pool=False,
#                 drop_path_rate=0.0,
#                 tuning_config=tuning_config,
#             )
#             model.out_dim = 768
#             return model.eval()
#         else:
#             raise NotImplementedError("Unknown type {}".format(name))

#     elif "_baseSingle" in name:
#         ffn_rank = args["ffn_rank"]
#         if args["learner_name"] == "baseline":
#             from backbone import vit_baseSingle
#             from easydict import EasyDict

#             tuning_config = EasyDict(
#                 # AdaptFormer
#                 ffn_adapt=True,
#                 ffn_option="parallel",
#                 ffn_adapter_layernorm_option=ffn_adapter_layernorm_option,
#                 ffn_adapter_scalar=ffn_adapter_scalar,
#                 ffn_rank=ffn_rank,
#                 embed_dim=768,
#                 # VPT related
#                 vpt_on=False,
#                 vpt_num=0,
#                 _device=args["device"][0],
#                 # visualization
#                 cam_visual=cam_visual,
#             )
#             model_func = getattr(vit_baseSingle, name, None)
#             if model_func is not None:
#                 model = model_func(
#                     num_classes=0,
#                     global_pool=False,
#                     drop_path_rate=0.0,
#                     tuning_config=tuning_config,
#                 )
#                 model.out_dim = 768
#                 return model.eval()
#             else:
#                 raise NotImplementedError("Unknown type {}".format(name))

#     elif "_baseMulti" in name:
#         ffn_rank = args["ffn_rank"]
#         if args["learner_name"] == "baseline":
#             from backbone import vit_baseMulti
#             from easydict import EasyDict

#             tuning_config = EasyDict(
#                 # AdaptFormer
#                 ffn_adapt=True,
#                 ffn_option="parallel",
#                 ffn_adapter_layernorm_option=ffn_adapter_layernorm_option,
#                 ffn_adapter_scalar=ffn_adapter_scalar,
#                 ffn_rank=ffn_rank,
#                 embed_dim=768,
#                 # VPT related
#                 vpt_on=False,
#                 vpt_num=0,
#                 _device=args["device"][0],
#                 # visualization
#                 cam_visual=cam_visual,
#             )
#             model_func = getattr(vit_baseMulti, name, None)
#             if model_func is not None:
#                 model = model_func(
#                     num_classes=0,
#                     global_pool=False,
#                     drop_path_rate=0.0,
#                     tuning_config=tuning_config,
#                 )
#                 model.out_dim = 768
#                 return model.eval()
#             else:
#                 raise NotImplementedError("Unknown type {}".format(name))

#     elif "_sdm" in name:
#         ffn_rank = args["ffn_rank"]
#         if (
#             args["learner_name"] == "sdm"
#             or args["learner_name"] == "sdm_slca"
#             or args["learner_name"] == "sdm_pfr"
#         ):
#             from backbone import vit_sdm
#             from easydict import EasyDict

#             tuning_config = EasyDict(
#                 embed_dim=768,
#                 # adapter config
#                 ffn_adapt=True,
#                 ffn_option="parallel",
#                 ffn_adapter_layernorm_option=ffn_adapter_layernorm_option,
#                 ffn_adapter_scalar=ffn_adapter_scalar,
#                 ffn_rank=ffn_rank,
#                 # subspace merge config
#                 topk=args["topk"],
#                 # device
#                 _device=args["device"][0],
#                 # visualization
#                 cam_visual=cam_visual,
#             )
#             model_func = getattr(vit_sdm, name, None)
#             if model_func is not None:
#                 model = model_func(
#                     num_classes=0,
#                     global_pool=False,
#                     drop_path_rate=0.0,
#                     tuning_config=tuning_config,
#                 )
#                 model.out_dim = 768
#                 return model.eval()
#             else:
#                 raise NotImplementedError("Unknown type {}".format(name))

#     elif "_ssiat" in name:
#         from backbone import vit_ssiat
#         from easydict import EasyDict

#         tuning_config = EasyDict(
#             embed_dim=768,
#             # adapter config
#             ffn_adapt=ffn_adapt,
#             ffn_option="parallel",
#             ffn_adapter_layernorm_option=ffn_adapter_layernorm_option,
#             ffn_adapter_scalar=ffn_adapter_scalar,
#             ffn_rank=ffn_rank,
#             # device
#             _device=args["device"][0],
#             # visualization
#             cam_visual=cam_visual,
#         )
#         model_func = getattr(vit_ssiat, name, None)
#         if model_func is not None:
#             model = model_func(
#                 num_classes=0,
#                 global_pool=False,
#                 drop_path_rate=0.0,
#                 tuning_config=tuning_config,
#             )
#             model.out_dim = 768
#             return model.eval()
#         else:
#             raise NotImplementedError("Unknown type {}".format(name))

#     elif "_multiNet" in name:
#         from backbone import vit_multi
#         from easydict import EasyDict

#         tuning_config = EasyDict(
#             embed_dim=768,
#             # adapter config
#             peft_name=peft_name,
#             ffn_adapt=ffn_adapt,
#             ffn_option="parallel",
#             ffn_adapter_layernorm_option=ffn_adapter_layernorm_option,
#             ffn_adapter_scalar=ffn_adapter_scalar,
#             ffn_rank=ffn_rank,
#             # device
#             _device=args["device"][0],
#             # visualization
#             cam_visual=cam_visual,
#         )
#         # model = timm.create_model(
#         #     name,
#         #     num_classes=0,
#         #     global_pool=False,
#         #     drop_path_rate=0.0,
#         #     tuning_config=tuning_config,
#         # )
#         # model.out_dim = 768
#         # return model.eval()
#         model_func = getattr(vit_multi, name, None)
#         if model_func is not None:
#             model = model_func(
#                 num_classes=0,
#                 global_pool=False,
#                 drop_path_rate=0.0,
#                 tuning_config=tuning_config,
#             )
#             model.out_dim = 768
#             return model.eval()

#         else:
#             raise NotImplementedError("Unknown type {}".format(name))
#     elif "_ease" in name:
#         from backbone import vit_ease
#         from easydict import EasyDict

#         tuning_config = EasyDict(
#             embed_dim=768,
#             # adapter config
#             peft_name=peft_name,
#             ffn_adapt=ffn_adapt,
#             ffn_option="parallel",
#             ffn_adapter_layernorm_option=ffn_adapter_layernorm_option,
#             ffn_adapter_scalar=ffn_adapter_scalar,
#             ffn_rank=ffn_rank,
#             # device
#             _device=args["device"][0],
#             # visualization
#             cam_visual=cam_visual,
#         )
#         model_func = getattr(vit_ease, name, None)
#         if model_func is not None:
#             model = model_func(
#                 num_classes=0,
#                 global_pool=False,
#                 drop_path_rate=0.0,
#                 tuning_config=tuning_config,
#             )
#             model.out_dim = 768
#             return model.eval()
#         else:
#             raise NotImplementedError("Unknown type {}".format(name))

#     elif "_svdmoe" in name:
#         ffn_rank = args["ffn_rank"]
#         from backbone import vit_svdmoe
#         from easydict import EasyDict

#         tuning_config = EasyDict(
#             # AdaptFormer
#             ffn_adapt=True,
#             ffn_option="parallel",
#             ffn_adapter_layernorm_option="none",
#             ffn_adapter_init_option="lora",
#             ffn_adapter_scalar=ffn_adapter_scalar,
#             ffn_rank=ffn_rank,
#             embed_dim=768,
#             # VPT related
#             vpt_on=False,
#             vpt_num=0,
#             _device=args["device"][0],
#             # visualization
#             cam_visual=cam_visual,
#         )
#         if name == "vit_base_patch16_224_svdmoe":
#             model = vit_svdmoe.vit_base_patch16_224_svdmoe(
#                 num_classes=0,
#                 global_pool=False,
#                 drop_path_rate=0.0,
#                 tuning_config=tuning_config,
#             )
#             model.out_dim = 768
#         elif name == "vit_base_patch16_224_in21k_svdmoe":
#             model = vit_svdmoe.vit_base_patch16_224_in21k_svdmoe(
#                 num_classes=0,
#                 global_pool=False,
#                 drop_path_rate=0.0,
#                 tuning_config=tuning_config,
#             )
#             model.out_dim = 768
#         else:
#             raise NotImplementedError("Unknown type {}".format(name))
#         return model.eval()

#     elif "_moe" in name:
#         ffn_rank = args["ffn_rank"]
#         from backbone import vit_moe
#         from easydict import EasyDict

#         tuning_config = EasyDict(
#             # AdaptFormer
#             peft_name=peft_name,
#             ffn_adapt=True,
#             ffn_option="parallel",
#             ffn_adapter_layernorm_option="none",
#             ffn_adapter_init_option="lora",
#             ffn_adapter_scalar=ffn_adapter_scalar,
#             ffn_rank=ffn_rank,
#             embed_dim=768,
#             # device
#             _device=args["device"][0],
#             # visualization
#             cam_visual=cam_visual,
#         )
#         model_func = getattr(vit_moe, name, None)
#         if model_func is not None:
#             model = model_func(
#                 num_classes=0,
#                 global_pool=False,
#                 drop_path_rate=0.0,
#                 tuning_config=tuning_config,
#             )
#             model.out_dim = 768
#             return model.eval()
#         else:
#             raise NotImplementedError("Unknown type {}".format(name))
#         return model.eval()

#     elif "_svd" in name:
#         ffn_rank = args["ffn_rank"]
#         from backbone import vit_svd_bp
#         from easydict import EasyDict

#         tuning_config = EasyDict(
#             # AdaptFormer
#             ffn_adapt=True,
#             ffn_option="parallel",
#             ffn_adapter_layernorm_option="none",
#             ffn_adapter_init_option="lora",
#             ffn_adapter_scalar=ffn_adapter_scalar,
#             ffn_rank=ffn_rank,
#             embed_dim=768,
#             bias=args["bias"],
#             _device=args["device"][0],
#         )
#         if name == "vit_base_patch16_224_svd":
#             model = vit_svd_bp.vit_base_patch16_224_svd(
#                 num_classes=0,
#                 global_pool=False,
#                 drop_path_rate=0.0,
#                 tuning_config=tuning_config,
#             )
#             model.out_dim = 768
#         elif name == "vit_base_patch16_224_in21k_svd":
#             model = vit_svd_bp.vit_base_patch16_224_in21k_svd(
#                 num_classes=0,
#                 global_pool=False,
#                 drop_path_rate=0.0,
#                 tuning_config=tuning_config,
#             )
#             model.out_dim = 768
#         else:
#             raise NotImplementedError("Unknown type {}".format(name))
#         return model.eval()

#     else:
#         raise NotImplementedError("Unknown type {}".format(name))

#     raise NotImplementedError("Unknown type {}".format(name))
