import copy
import gc
import json
import logging
import math
import os
import sys
from abc import abstractmethod
from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Union, cast  # noqa: F401

import torch
import torch.nn as nn
from lightning.fabric.utilities.rank_zero import rank_zero_only
from omegaconf import DictConfig
from torch import Tensor
from torch.utils.data import DataLoader
from tqdm.autonotebook import tqdm

from fusion_bench.compat.method import ModelFusionAlgorithm
from fusion_bench.compat.modelpool import ModelPool
from fusion_bench.mixins.lightning_fabric import LightningFabricMixin
from fusion_bench.mixins.simple_profiler import SimpleProfilerMixin
from fusion_bench.models.wrappers.layer_wise_fusion import (
    LayerWiseMergedModel,
    get_layer_wise_weights,
)
from fusion_bench.taskpool import CLIPVisionModelTaskPool
from fusion_bench.utils.data import load_tensor_from_file

from .entropy_loss import entropy_loss

log = logging.getLogger(__name__)


# obtain the current GPU memory usage
def get_memory_usage(desc):
    allocated = torch.cuda.memory_allocated() / 1024**2  # 转换为 MB
    cached = torch.cuda.memory_reserved() / 1024**2  # 转换为 MB
    return (
        f"{desc}\nAllocated Memory: {allocated:.2f} MB\nCached Memory: {cached:.2f} MB"
    )


class ModelScheduler:
    """
    Manage the storage of models, schedule the order in which models are loaded to GPU
    transfer data between the CPU and GPu
    """

    def __init__(
        self,
        modelpool: ModelPool,
        config: DictConfig,
    ):
        self.pretrained_model = modelpool.load_model("_pretrained_")
        self.finetuned_models = [
            modelpool.load_model(name) for name in modelpool.model_names
        ]
        self.num_finetuned_models = len(self.finetuned_models)
        self.new_finetuned_models = copy.deepcopy(self.finetuned_models)
        self.finetuned_models_name = [name for name in modelpool.model_names]

        self.config = config
        self.orig_index = [i for i in range(self.num_finetuned_models)]
        self.btree_matrix = [0, 1]
        self.btree_matrix_rev = [0, 1]
        self.count_list = len(self.finetuned_models) * [1.0]
        self.count_all = len(self.finetuned_models)
        self.reward_list = len(self.finetuned_models) * [1.0]
        self.reward_part1 = len(self.finetuned_models) * [0.0]
        self.pareto_pre = {}
        self.name2index = {}

    # def construct_complete_binary_tree(self):
    #     num = self.num_finetuned_models
    #     result = []
    #     for i in range(num):
    #         node_list = [i]
    #         left = 2 * i + 1
    #         right = 2 * i + 2
    #         if left < num:
    #             node_list.append(left)
    #         if right < num:
    #             node_list.append(right)
    #         if len(node_list) > 1:
    #             result.append(node_list)
    #     result.sort(key=lambda x: x[0], reverse=True)
    #     self.btree_matrix = result
    #     self.btree_matrix_rev = list(reversed(result))
    #     return result
    def construct_complete_binary_tree(self, n=2):
        if not isinstance(n, int) or n <= 0:
            raise ValueError("n必须是一个正整数。")

        num_nodes = self.num_finetuned_models
        result = []

        for parent in range(num_nodes):
            children = [parent]
            # 计算当前父节点的所有子节点索引
            child_start = n * parent + 1
            child_end = n * parent + n + 1  # 不包括child_end

            # 遍历所有可能的子节点
            for child in range(child_start, child_end):
                if child < num_nodes:
                    children.append(child)
                else:
                    break  # 超出节点数量，停止添加子节点
                
            if len(children) > 1:
                result.append(children)

        # 按父节点索引降序排序
        result.sort(key=lambda x: x[0], reverse=True)

        # 赋值给实例变量
        self.btree_matrix = result
        self.btree_matrix_rev = result[::-1]
        return result

    def count_update(self, model_id):
        self.count_list[model_id] = self.count_list[model_id] + 1
        self.count_all += 1

    def update_rewards_pareto(self, scores_list, update_id):
        # print("##############################update-rewards-Pareto#####################################")
        # update_models = [{'name':self.finetuned_models_name[update_id], 'model':self.finetuned_models[update_id]}] #for i in self.btree_matrix]
        pre_acc = copy.deepcopy(self.pareto_pre[self.finetuned_models_name[update_id]])
        # scores_rpt = evaluate_merged_model(taskpool, update_models)[0]
        # del pre_rpt["average"]
        # del scores_rpt['model_info']
        self.pareto_pre[self.finetuned_models_name[update_id]] = scores_list
        new_acc = scores_list
        pre_acc = torch.tensor(pre_acc)
        new_acc = torch.tensor(new_acc)
        reward = torch.sum((new_acc - pre_acc) / pre_acc).item()
        self.reward_part1[update_id] = self.reward_part1[update_id] + reward
        self.count_list[update_id] = self.count_list[update_id] + 1
        self.count_all += 1
        self.reward_list = torch.tensor(self.reward_part1) - 0.05 * torch.sqrt(
            (torch.log(torch.tensor(self.count_all)))
            / 2
            * len(self.count_list)
            * torch.tensor(self.count_list)
        )
        self.reward_list = self.reward_list.tolist()
        self.move_to("cpu")



    """ 
    def update_rewards(self, taskpool, update_id, evaluate_merged_model: Callable):
        print("##############################update-rewards#####################################")
        update_models = [{'name':self.finetuned_models_name[update_id], 'model':self.finetuned_models[update_id]}] #for i in self.btree_matrix]
        scores_rpt = evaluate_merged_model(taskpool, update_models)
        reward = scores_rpt[0]["average"]["accuracy"]
        rewards = torch.zeros(len(self.finetuned_models))
        rewards[update_id] = reward
        temp = torch.tensor(self.reward_part1) * torch.tensor(self.count_list)
        temp[update_id] = temp[update_id]+reward
        self.count_list[update_id] = self.count_list[update_id] + 1
        self.count_all += 1
        self.reward_part1 = temp/torch.tensor(self.count_list)
        log.info("part1: {self.reward_part1}")
        log.info("part2: {torch.sqrt((torch.log(torch.tensor(self.count_all)))/2*len(self.count_list)*torch.tensor(self.count_list))}")
        #self.reward_list = self.reward_part1+torch.sqrt((2*torch.log(torch.tensor(self.count_all)))/torch.tensor(self.count_list))*100
        self.reward_list = self.reward_part1-torch.sqrt((torch.log(torch.tensor(self.count_all)))/2*len(self.count_list)*torch.tensor(self.count_list))
        self.reward_part1 = self.reward_part1.tolist()
        #self.reward_list[self.btree_matrix[0]] = (self.reward_list[self.btree_matrix[0]]*self.count_list[self.btree_matrix[0]]+reward)/(self.count_list[self.btree_matrix[0]]+1)+torch.sqrt((2*torch.log(torch.tensor(self.count_all)))/self.count_list[self.btree_matrix[0]]).item()/100
        #_, indexes = torch.topk(self.reward_list,len(self.btree_matrix))
        self.reward_list = self.reward_list.tolist()
        self.move_to("cpu")
    """

    def update_btree(self, modelpool):
        sorted_index = (
            torch.tensor(self.reward_list).argsort(dim=0, descending=True).tolist()
        )  # indexed_list.argsorted(reverse=True)
        mapping = dict(zip(self.orig_index, sorted_index))
        self.btree_matrix = [
            [mapping[item] for item in sublist] for sublist in self.btree_matrix
        ]
        self.btree_matrix_rev = list(reversed(self.btree_matrix))
        self.orig_index = sorted_index
        self.reward_part1 = [0 for _ in self.reward_part1]

    """
    def update_rewards(self, taskpool, evaluate_merged_model: Callable):
        update_models = [{'name':self.finetuned_models_name[self.btree_matrix[0]], 'model':self.finetuned_models[self.btree_matrix[0]]}] #for i in self.btree_matrix]
        scores_rpt = evaluate_merged_model(taskpool, update_models)
        reward = scores_rpt[0]["average"]["accuracy"]
        rewards = torch.zeros(len(self.finetuned_models))
        rewards[self.btree_matrix[0]] = reward
        temp = torch.tensor(self.reward_part1) * torch.tensor(self.count_list)
        temp[self.btree_matrix[0]] = temp[self.btree_matrix[0]]+reward
        self.count_list[self.btree_matrix[0]] = self.count_list[self.btree_matrix[0]] + 1
        self.count_all += 1
        self.reward_part1 = temp/torch.tensor(self.count_list)
        self.reward_list = self.reward_part1+torch.sqrt((2*torch.log(torch.tensor(self.count_all)))/torch.tensor(self.count_list))#/100
        self.reward_part1 = self.reward_part1.tolist()
        #self.reward_list[self.btree_matrix[0]] = (self.reward_list[self.btree_matrix[0]]*self.count_list[self.btree_matrix[0]]+reward)/(self.count_list[self.btree_matrix[0]]+1)+torch.sqrt((2*torch.log(torch.tensor(self.count_all)))/self.count_list[self.btree_matrix[0]]).item()/100
        _, indexes = torch.topk(self.reward_list,len(self.btree_matrix))
        self.reward_list = self.reward_list.tolist()
        self.btree_matrix = indexes.tolist()
        self.move_to("cpu")
    """

    @torch.no_grad()  # not sure whether to use this
    def __call__(self, btree, model_id):
        """
        return models and relevant data in each step
        """
        # TODO: use a mixing matrix to determine which models to use in step idx

        # merge three models
        # pretrained_model = copy.deepcopy(self.finetuned_models[model_id])
        # finetuned_models = [
        #    copy.deepcopy(self.finetuned_models[(model_id+1)%self.num_finetuned_models]),
        #    copy.deepcopy(self.finetuned_models[(model_id-1)%self.num_finetuned_models])
        # ]
        # merge four models
        """
        pretrained_model = copy.deepcopy(self.pretrained_model)
        finetuned_models = [
            copy.deepcopy(self.finetuned_models[(model_id+1)%self.num_finetuned_models]),
            copy.deepcopy(self.finetuned_models[model_id]),
            copy.deepcopy(self.finetuned_models[(model_id-1)%self.num_finetuned_models])
        ]
        """
        pretrained_model = copy.deepcopy(self.pretrained_model)
        finetuned_models = [
            copy.deepcopy(self.finetuned_models[i % self.num_finetuned_models])
            for i in btree[model_id]
        ]


        # initialize layer-wise weights using the provided configuration `init_values` or load from file if `weights` is provided
        if self.config.weights is None:
            layer_wise_weight = get_layer_wise_weights(
                num_models=len(finetuned_models),
                num_layers=len(
                    tuple(
                        filter(lambda p: p.requires_grad, pretrained_model.parameters())
                    )
                ),
                init_values=self.config.init_values,
            )
        else:
            if isinstance(self.config.weights, str):
                # self.config.weights is a path to a saved tensor
                layer_wise_weight = load_tensor_from_file(self.config.weights)
            else:
                raise ValueError(f"Unsupported weights format: {self.config.weights}")
        module = LayerWiseMergedModel(
            layer_wise_weight=layer_wise_weight,
            pretrained_model=pretrained_model,
            finetuned_models=finetuned_models,
            clamp_weights=self.config.clamp_weights,
            tie_weights=self.config.tie_weights,
            strict=self.config.strict,
        )
        print(f"{layer_wise_weight.size()=}, {layer_wise_weight.numel()=}")
        return module

    def store_model(self, new_finetuned_model_dict, model_id):
        """
        store new finetuned model after every turn of adamerging
        """
        self.new_finetuned_models[model_id].load_state_dict(new_finetuned_model_dict)

    def update_models(self):
        self.finetuned_models = copy.deepcopy(self.new_finetuned_models)

    def update_pretrain(self, model_id):
        self.pretrained_model = copy.deepcopy(self.finetuned_models[model_id])

    def get_final_models(self):
        # need a check
        final_models = [
            {"name": name, "model": model}
            for name, model in zip(self.finetuned_models_name, self.finetuned_models)
        ]
        num_finetuned_models = len(self.finetuned_models)

        average_model = copy.deepcopy(self.pretrained_model)
        state_dict = average_model.state_dict(keep_vars=True)
        for name, _ in self.finetuned_models[0].named_parameters():
            state_dict[name].data.zero_()
        for model in self.finetuned_models:
            for name, param in model.named_parameters():
                state_dict[name] = state_dict[name] + 1 / num_finetuned_models * param

        average_model.load_state_dict(state_dict)
        final_models += [{"name": "average model", "model": average_model}]

        return final_models

    def move_to(self, device):
        # self.pretrained_model.to(device=device)
        for model in self.finetuned_models:
            model.to(device=device)

class LayerWiseBtreemergeAlgorithm(
    ModelFusionAlgorithm,
    LightningFabricMixin,
    SimpleProfilerMixin,
):
    """
    Implements the Layer-Wise AdaMerging Algorithm.

    This class merges the layers of a pretrained model with those of several fine-tuned models.
    The merging is controlled by layer-wise weights, which can be initialized based on a provided configuration or loaded from a file.
    """

    def __init__(self, algorithm_config: DictConfig):
        """
        Initialize the LayerWiseAdaMergingAlgorithm with the given configuration.

        Args:
            algorithm_config (DictConfig): The configuration for the algorithm.
        """
        super().__init__(algorithm_config)

    @rank_zero_only
    def save_merging_weights(self, file_path: str, merging_weights: torch.Tensor):
        """
        Save the merging weights to a file.

        Args:
            file_path (str): The path to save the merging weights.
            merging_weights (torch.Tensor): The merging weights to save.
        """
        if self.fabric.is_global_zero and self.config.get(
            "save_merging_weights", False
        ):
            if isinstance(file_path, str) and not file_path.startswith(("/", ".")):
                # if the file path is not absolute or relative to current working directory, save it in the log directory
                save_path = os.path.join(self.log_dir, file_path)
            else:
                save_path = file_path
            log.info(f"saving merging weights to {save_path}.")
            if os.path.dirname(save_path):
                os.makedirs(os.path.dirname(save_path), exist_ok=True)
            torch.save(merging_weights.detach().cpu(), save_path)

    def free_gpu_memory(self, module: LayerWiseMergedModel):
        module.pretrained_model.to("cpu")
        for model in module.task_vectors:
            model.to("cpu")
        del module
        gc.collect()
        torch.cuda.empty_cache()
        log.info(get_memory_usage("after freeing memory, the memory usage of GPU is:"))

    def update_datasets(self, datasets):
        """
        for evary epoch of local adamerging, we only use the data set corresponding to the model involved in the fusion
        """
        num_datasets = len(datasets)
        datasets_copy = datasets.copy()
        for i in range(num_datasets):
            datasets[i] = (
                datasets_copy[i]
                .union(datasets_copy[(i + 1) % num_datasets])
                .union(datasets_copy[(i - 1) % num_datasets])
            )
        return datasets

    def run(self, modelpool: ModelPool):
        """
        Run the Layer-Wise AdaMerging Algorithm.

        This method constructs the wrapped model and performs test-time adaptation if necessary.

        Args:
            modelpool (ModelPool): The model pool containing the pretrained and fine-tuned models.

        Returns:
            LayerWiseMergedModel: The merged model after test-time adaptation.
        """
        log.info("Fusing models using layer-wise adaptive merging.")
        self.modelpool = modelpool
        self.log_hyperparams(self.config)
        self.num_finetuned_models = len(modelpool.model_names)
        datasets = set([dataset for dataset in modelpool.model_names])

        with self.profile("construct the wrapped model"):
            model_scheduler = ModelScheduler(self.modelpool, self.config)
        model_scheduler.construct_complete_binary_tree(n=2)
        tree_loglist = []
        datasets = set([item for item in self.modelpool.model_names])
        scores_list = self.init_pareto_pre(model_scheduler.finetuned_models, datasets)
        for i, name in enumerate(modelpool.model_names):
            model_scheduler.pareto_pre[name] = scores_list[i]
        datasets = set([dataset for dataset in modelpool.model_names])
        if self.config.weights is not None:
            # skip the test-time adaptation
            return module.merge_and_unload()
        else:
            for step_idx in tqdm(
                range(self.config.btreemerge_max_steps),
                "Btreemerge merging",
                dynamic_ncols=True,
            ):
                # datasets = self.update_datasets(datasets)

                log.info(f"Btree merging step:, {step_idx}")
                model_scheduler.update_btree(self.modelpool)

                tree_loglist.append(model_scheduler.update_btree)

                log.info(f"Btree:{model_scheduler.btree_matrix}")
                print(100 * "###")
                print(model_scheduler.reward_list)
                print(model_scheduler.btree_matrix)
                print(100 * "###")
                for model_id in tqdm(
                    range(len(model_scheduler.btree_matrix)),
                    "local admerging",
                    dynamic_ncols=True,
                ):
                    with self.profile("construct the local wrapped model"):
                        module = model_scheduler(model_scheduler.btree_matrix, model_id)
                    datasets = set(
                        [
                            modelpool.model_names[i]
                            for i in model_scheduler.btree_matrix[model_id]
                        ]
                    )
                    log.info(f"the datasets used in this local merging is {datasets}")
                    with self.profile("test-time adaptation"):
                        module = self.test_time_adaptation(module, datasets)
                    # if self.config.get("save_merging_weights", False):
                    #     self.save_merging_weights(
                    #         self.config.save_merging_weights, module.merge_weight
                    #     )
                    model_scheduler.store_model(
                        module.merge_weights(),
                        model_scheduler.btree_matrix[model_id][0],
                    )
                    model_scheduler.update_models()
                    log.info(
                        get_memory_usage(
                            f"after local merging ({modelpool.model_names[model_id]}), the memory usage of GPU is:"
                        )
                    )

                    datasets = set([item for item in self.modelpool.model_names])
                    scores_list = self.compute_entropy(
                        model_scheduler.finetuned_models[
                            model_scheduler.btree_matrix[model_id][0]
                        ],
                        datasets,
                    )
                    model_scheduler.update_rewards_pareto(
                        scores_list, model_scheduler.btree_matrix[model_id][0]
                    )
                    self.free_gpu_memory(
                        module
                    )  # simulate distributed GPU memory usage as much as possible

                for model_id in tqdm(
                    range(len(model_scheduler.btree_matrix_rev)),
                    "local admerging",
                    dynamic_ncols=True,
                ):
                    with self.profile("construct the local wrapped model"):
                        module = model_scheduler(
                            model_scheduler.btree_matrix_rev, model_id
                        )
                    datasets = set(
                        [
                            modelpool.model_names[i]
                            for i in model_scheduler.btree_matrix_rev[model_id]
                        ]
                    )
                    log.info(f"the datasets used in this local merging is {datasets}")
                    with self.profile("test-time adaptation"):
                        module = self.test_time_adaptation(module, datasets)
                    # if self.config.get("save_merging_weights", False):
                    #     self.save_merging_weights(
                    #         self.config.save_merging_weights, module.merge_weight
                    #     )
                    # model_scheduler.store_model(module.merge_weights(), model_scheduler.btree_matrix_rev[model_id][0])
                    # datasets = set([item for item in self.modelpool.model_names])
                    # scores_list=self.compute_entropy(model_scheduler.new_finetuned_models[model_scheduler.btree_matrix_rev[model_id][0]],datasets)
                    # datasets = set([item for item in self.modelpool.model_names])
                    if len(model_scheduler.btree_matrix_rev[model_id]) > 2:
                        reward_1 = model_scheduler.reward_part1[
                            model_scheduler.btree_matrix_rev[model_id][-1]
                        ]
                        reward_2 = model_scheduler.reward_part1[
                            model_scheduler.btree_matrix_rev[model_id][-2]
                        ]
                        if reward_1 > reward_2:
                            model_scheduler.store_model(
                                module.merge_weights(),
                                model_scheduler.btree_matrix_rev[model_id][-2],
                            )  # , model_id)
                            model_scheduler.update_models()
                            log.info(
                                get_memory_usage(
                                    f"after local merging ({modelpool.model_names[model_id]}), the memory usage of GPU is:"
                                )
                            )
                            model_scheduler.count_update(
                                model_scheduler.btree_matrix_rev[model_id][-2]
                            )
                            # model_scheduler.update_rewards_pareto(scores_list, model_scheduler.btree_matrix_rev[model_id][-2])
                        else:
                            model_scheduler.store_model(
                                module.merge_weights(),
                                model_scheduler.btree_matrix_rev[model_id][-1],
                            )  # , model_id)
                            model_scheduler.update_models()
                            log.info(
                                get_memory_usage(
                                    f"after local merging ({modelpool.model_names[model_id]}), the memory usage of GPU is:"
                                )
                            )
                            model_scheduler.count_update(
                                model_scheduler.btree_matrix_rev[model_id][-1]
                            )
                            # model_scheduler.update_rewards_pareto(scores_list, model_scheduler.btree_matrix_rev[model_id][-1])
                    else:
                        model_scheduler.store_model(
                            module.merge_weights(),
                            model_scheduler.btree_matrix_rev[model_id][-1],
                        )  # , model_id)
                        model_scheduler.update_models()
                        log.info(
                            get_memory_usage(
                                f"after local merging ({modelpool.model_names[model_id]}), the memory usage of GPU is:"
                            )
                        )
                        model_scheduler.count_update(
                            model_scheduler.btree_matrix_rev[model_id][-1]
                        )
                        # model_scheduler.update_rewards_pareto(scores_list, model_scheduler.btree_matrix_rev[model_id][-1])
                    self.free_gpu_memory(
                        module
                    )  # simulate distributed GPU memory usage as much as possible

                model_scheduler.update_models()
                do_evaluation = False # whether to do evaluation after each Gossip step
                if isinstance(self.config.accuracy_test_interval, list):
                    if (step_idx+1) in self.config.accuracy_test_interval:
                        do_evaluation = True
                elif isinstance(self.config.accuracy_test_interval, int):
                    if ((step_idx+1) % self.config.accuracy_test_interval == 0):
                        do_evaluation = True
                if do_evaluation:
                    report = self._program.evaluate_merged_model(self._program.taskpool,  model_scheduler.get_final_models())
                    model_scheduler.move_to('cpu')
            log.info(f"tree map: {tree_loglist}")
        return model_scheduler.get_final_models()

    def evaluate_merged_model(self, taskpool, merged_model, model_names):
        """
        Evaluates the merged model using the provided task pool.
        """
        if isinstance(merged_model, nn.Module):
            report = taskpool.evaluate(merged_model, model_names)
            return report
        elif isinstance(merged_model, Dict):
            report = {}
            for model_name,(key, item) in zip(model_names, merged_model.items()):
                if isinstance(item, nn.Module):
                    report[key] = taskpool.evaluate(item,model_name)
                else:
                    # metadata
                    report[key] = item
            return report
        elif isinstance(merged_model, Iterable):
            return [
                self.evaluate_merged_model(taskpool, m, model_name)
                for m,model_name in tqdm(zip(merged_model,model_names), desc="Evaluating models")
            ]
        else:
            raise ValueError(f"Invalid type for merged model: {type(merged_model)}")

    def compute_cosine_similarity_sum(self, module):
        task_vectors = module.task_vectors
        trainable_matrices = module.trainable_matrices
        penalty = 0
        for trainable_matrix in trainable_matrices:
            penalty = penalty + torch.norm(trainable_matrix, p=2)
        penalty = penalty / len(trainable_matrices)
        # def get_param_vector(task_vector):
        #    param_diff = []
        #    for param, matrix in zip(task_vector.parameters(), trainable_matrices):
        #        param_diff.append((param.data - matrix.to(module.device)).flatten())
        #    return torch.cat(param_diff)
        # param_vectors = [get_param_vector(task_vector) for task_vector in task_vectors]
        # param_matrix = torch.stack(param_vectors)  # Shape: (n_models, vector_size)
        # param_matrix_norm = param_matrix / param_matrix.norm(dim=1, keepdim=True)
        # cosine_similarity_matrix = torch.mm(param_matrix_norm, param_matrix_norm.T)  # Shape: (n_models, n_models)
        # cosine_similarity_sum = cosine_similarity_matrix.triu(diagonal=1).sum().item()
        return penalty  # cosine_similarity_sum, penalty

    def compute_entropy(self, model, datasets):
        self.on_test_time_adaptation_start()
        entropy_list = []
        for task in self.modelpool.model_names:
            if task not in datasets:
                continue
            with self.profile("data loading"):
                batch = next(self.get_shuffled_test_loader_iter(task))
            with self.profile("forward pass"):
                model.to(batch[0].device)
                logits = self.compute_logits(model, batch[0], task)
                model.to("cpu")
                entropy = entropy_loss(logits)
            # entropy = entropy.item()
            entropy_list.append(entropy.item())
        return entropy_list

    def init_pareto_pre(self, models, datasets):
        scores_list = []
        for model in models:
            scores = self.compute_entropy(model, datasets)
            scores_list.append(scores)
        print(scores_list)
        return scores_list

    def on_test_time_adaptation_start(self):
        """
        Something to do before the test-time adaptation starts. Such as setting up the task-specific heads.
        """
        pass

    @abstractmethod
    def get_shuffled_test_loader_iter(self, task: str) -> DataLoader:
        """
        Loader of test dataset for test-time adaptation. labels are not needed.

        Args:
            task (str): The name of the task.

        Returns:
            DataLoader: The data loader for the test dataset.
        """
        pass

    @abstractmethod
    def compute_logits(self, module, images: Tensor, task: str) -> Tensor:
        """
        Compute the logits for the given images and task.

        Args:
            module: The model module.
            images (Tensor): The input images.
            task (str): The name of the task.

        Returns:
            Tensor: The computed logits.
        """
        pass

    def compute_logits_singlemodel(
        self,
        module,
        images: torch.Tensor,
        task: str,
        index: int,
        image_embeds: Optional[torch.Tensor] = None,
    ) -> torch.Tensor:
        
        text_embeds = self.zeroshot_weights[task]

        if image_embeds is None:
            image_embeds = module.forward_single_model_loss(index, images)[1]
        assert isinstance(
            image_embeds, torch.Tensor
        ), f"`image_embeds` must be a tensor, but got {type(image_embeds)}"
        image_embeds = self.visual_projection(image_embeds)

        # normalize embeddings
        image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)

        # cosine similarity
        logits_per_text = (
            torch.matmul(text_embeds, image_embeds.t()) * self.logit_scale_exp
        )
        logits_per_image = logits_per_text.t()

        return logits_per_image
    

    def test_time_adaptation(self, module: LayerWiseMergedModel, datasets):
        """
        Perform test-time adaptation on the merged model.

        This method adapts the merging weights during test-time to improve performance.

        Args:
            module (LayerWiseMergedModel): The merged model.

        Returns:
            LayerWiseMergedModel: The adapted merged model.
        """

        self.on_test_time_adaptation_start()

        # configure optimizer
        if self.config.optimizer == "adam":
            optimizer = torch.optim.Adam(
                [module.merge_weight], lr=self.config.lr
            )
            print(f"{optimizer=}")
            # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,T_max=10)  #cos scheduler
            module, optimizer = self.fabric.setup(module, optimizer)
            log.info(
                get_memory_usage(
                    "after loading models and optimizer, the memory usage of GPU is:"
                )
            )
        else:
            raise ValueError(f"Unsupported optimizer: {self.config.optimizer}")
        module.train()
        module.merge_weights()
        for step_idx in (
            pbar := tqdm(
                range(self.config.max_steps if not self.is_debug_mode else 1),
                ("[DEBUG MODE] " if self.is_debug_mode else "")
                + "AdaMerging Test-time adaptation",
                dynamic_ncols=True,
            )
        ):
            # default behavior for first-order optimizers
            #index = 0
            for task in self.modelpool.model_names:
                # penalty = self.compute_cosine_similarity_sum(module)
                # cosloss, penalty = self.compute_cosine_similarity_sum(module)
                if task not in datasets:
                    continue
                with self.profile("data loading"):
                    batch = next(self.get_shuffled_test_loader_iter(task))
                with self.profile("forward pass"):
                    #module.merge_weights()
                    logits = self.compute_logits(module, batch[0], task)
                    #logits_singlemodel = self.compute_logits_singlemodel(module, batch[0], task, index)
                    #index += 1
                    entropyloss = entropy_loss(logits)
                    #entropyloss_singlemodel = entropy_loss(logits_singlemodel)
                    #print("entropyloss: ", entropyloss)
                    #print("entropyloss_singlemodel: ", entropyloss_singlemodel)
                    # loss = entropyloss + cosloss + 1e-3*penalty
                    #loss = entropyloss+torch.exp(entropyloss - entropyloss_singlemodel)  # + 1e-3*penalty
                    loss = entropyloss
                    # print("#########################################")
                    # print("entropy: ",penalty.device)
                    # print("cosloss: ",cosloss)
                    # print("penalty: ",penalty)
                    # print("#########################################")
                    # sys.exit()
                with self.profile("backward pass"):
                    self.fabric.backward(loss, retain_graph=True)
            with self.profile("optimizer step"):
                optimizer.step()
                # scheduler.step() #cos scheduler
                optimizer.zero_grad()
            with self.profile("merging weights"):
                module.merge_weights()

            metrics = {
                "train/loss": loss.item(),
                "train/weight_max": module.merge_weight.max().item(),
                "train/weight_min": module.merge_weight.min().item(),
                "train/weight_mean": module.merge_weight.mean().item(),
            }
            self.fabric.log_dict(metrics, step=step_idx)
            pbar.set_postfix(metrics)

        self.print_profile_summary()
        del optimizer
        gc.collect()
        torch.cuda.empty_cache()
        return module
