import copy
import logging
import os
import time
from copy import deepcopy
from re import U
from typing import List

import numpy as np
import torch
import torch.nn.functional as F
from omegaconf import OmegaConf
from torch import Tensor, nn
from tqdm.auto import tqdm

from fusion_bench.method import BaseAlgorithm
from fusion_bench.method.s2_moe.utils import TSVC_utils
from fusion_bench.method.simple_average import simple_average
from fusion_bench.utils.state_dict_arithmetic import state_dict_add, state_dict_div, state_dict_mul
from fusion_bench.mixins.simple_profiler import SimpleProfilerMixin
from fusion_bench.modelpool import BaseModelPool
from fusion_bench.models.smile_moe.linear_from_module import ExpertNotTrainedError
from fusion_bench.models.smile_moe.utils import _is_all_zeros
from fusion_bench.models.utils import get_attr, set_attr
from fusion_bench.utils.parameters import print_parameters
from fusion_bench.models.s2_moe.s2moelinear import S2MoELinear

from transformers import AutoTokenizer, AutoModel

log = logging.getLogger(__name__)


class S2MoEUpscalingAlgorithm(
    SimpleProfilerMixin,
    BaseAlgorithm,
):
    _linear_layer_cls = (nn.Linear,)
    _config_mapping = BaseAlgorithm._config_mapping | {
        "device": "device",
        "upscaling_accelerator": "upscaling_accelerator",
        "full_matrices": "full_matrices",
        "gate_k": "gate_k",
        "k": "k",
        "top_k": "top_k",
        "routing_use_diff": "routing_use_diff",
        "average_experts": "average_experts",
        "model_path": "model_path",
        "threshold": "threshold",  # 新增参数
        "sv_reduction": "sv_reduction",  # 新增参数：SVD压缩比例
        "save_model_path": "save_model_path",  # 新增参数
    }

    def __init__(
        self,
        *,
        device: str = "cuda",
        upscaling_accelerator: str = None,
        full_matrices: bool = True,
        gate_k: int = 256,
        k: int = 256,
        top_k: int = 2,
        threshold: float = 0.1,  # 新增参数
        sv_reduction: float = 0.05,  # 新增参数：SVD压缩比例
        routing_use_diff: bool = True,
        average_experts: bool = False,
        model_path: str = None,
        save_model_path: str = None,  # 新增参数
        **kwargs,
    ):
        """
        初始化SmileUpscalingAlgorithm。

        Args:
            device (str): 用于计算的设备。
            upscaling_accelerator (str): 用于SVD计算的设备。
            full_matrices (bool): 是否计算完整大小的U和V矩阵。
            gate_k (int): 门控网络保留的奇异值数量。
            k (int): 专家保留的奇异值数量。
            top_k (int): 选择的顶部专家数量。
            threshold (float): 激活任务子空间的阈值。
            routing_use_diff (bool): 是否使用权重差异进行路由。
            average_experts (bool): 是否平均专家。
            model_path (str): 保存/加载模型的路径。
            **kwargs: 额外参数。
        """
        super().__init__()
        self.device = device
        self.upscaling_accelerator = upscaling_accelerator
        self.full_matrices = full_matrices
        self.gate_k = gate_k
        self.k = k
        self.top_k = top_k
        self.threshold = threshold  # 新增参数
        self.sv_reduction = sv_reduction  # 新增参数：SVD压缩比例
        self.routing_use_diff = routing_use_diff
        self.average_experts = average_experts
        self.model_path = model_path
        self.save_model_path = save_model_path  # 添加save_model_path属性
        self.rout_svd_cache = None
        self.is_continue = False
        for key, value in kwargs.items():
            log.warning(f"Unrecognized argument: {key}")
            setattr(self, key, value)

        # 打印配置
        print(f"=== Config for `{type(self).__name__}` ===")
        print(OmegaConf.to_yaml(self.config))
        print(f"=== Config for `{type(self).__name__}` ===")

    @torch.no_grad()
    def run(self, modelpool: BaseModelPool):
        """
        Executes the upscaling process.

        Args:
            modelpool (ModelPool): The pool of models to be used for upscaling.

        Returns:
            nn.Module: The upscaled model.
        """
        if not isinstance(modelpool, BaseModelPool):
            modelpool = BaseModelPool(modelpool)

        if self.config.model_path is not None and os.path.exists(
            self.config.model_path
        ):
            log.info(f"Loading model from {self.config.model_path}")
            model = torch.load(self.config.model_path)
            print_parameters(model)
            return model
        
        with self.profile("loading model"):
            # load models and move to GPU if available
            with self.profile("load pretrained model"):
                pretrained_model = modelpool.load_model("_pretrained_")

                # helper: detect experts count by scanning modules that have 'experts'
                def _detect_experts_count(model: nn.Module):
                    for _, module in model.named_modules():
                        if hasattr(module, 'experts'):
                            experts = getattr(module, 'experts')
                            try:
                                return len(experts)
                            except TypeError:
                                # experts may not support len(); fallback to children count
                                return len(list(experts.children()))
                    return None

                
                
                
                with self.profile("load fine-tuned model"):
                    finetuned_models = [
                        m
                        for m in tqdm(modelpool.models(), total=len(modelpool.model_names))
                    ]



                continue_flag = False
                if modelpool.model_names[0] == "continue_cache":
                    self.is_continue = True
                    # continue_cache_model = [torch.load("/sharedspace/hfh/fusionbench_0517/save_models/clip-vit-base-patch32_eight_tasks/model_full.pth", weights_only=False)]
                    continue_cache_model = [torch.load(modelpool._models["continue_cache"]+"/model_full.pth", weights_only=False)]
                    # compute experts count from continue_cache_model
                    # continue_cache_experts_count = _detect_experts_count(continue_cache_model)
                    # print(f"experts_count={continue_cache_experts_count}")
                    finetuned_models = continue_cache_model+finetuned_models

            
            #s2moe还原为多个finetuned model
            # if continue_flag:
            #     print("还原finetuned model!!!!!!!!!!!!!!!")
            #     pre_finetuned_models = [copy.deepcopy(pretrained_model) for _ in range(continue_cache_experts_count)]
            #     for name,module in pretrained_model.named_modules():
            #         name = name.split(".")
            #         if isinstance(module, self._linear_layer_cls):
            #             # shared linear under MoE container
            #             shared_linear = get_attr(continue_cache_model, name + ['shared_linear'])
            #             for i in range(continue_cache_experts_count):
            #                 expert_linear = get_attr(continue_cache_model, name + ['experts', str(i)])
            #                 target_linear = get_attr(pre_finetuned_models[i], name)
            #                 # sum weights
            #                 merged_weight = shared_linear.weight.data + expert_linear.weight.data
            #                 # merged_weight = shared_linear.weight.data + expert_linear.Aweight.data@expert_linear.Bweight.data
            #                 target_linear.weight.data.copy_(merged_weight)

            #                 # sum bias if present
            #                 if target_linear.bias is not None:
            #                     # print("bias available")
            #                     shared_bias = getattr(shared_linear, 'bias', None)
            #                     expert_bias = getattr(expert_linear, 'bias', None)
            #                     sb = shared_bias.data if shared_bias is not None else torch.zeros_like(target_linear.bias.data)
            #                     eb = expert_bias.data if expert_bias is not None else torch.zeros_like(target_linear.bias.data)
            #                     target_linear.bias.data.copy_(sb + eb) #.to(target_linear.bias.dtype))
            #                 set_attr(pre_finetuned_models[i], name, target_linear)

            #     finetuned_models = pre_finetuned_models + finetuned_models
            #     continue_flag = False
            
            if self.config.device == "cuda" and torch.cuda.is_available():
                pretrained_model = pretrained_model.cuda()
                finetuned_models = [m.cuda() for m in finetuned_models]
            
        # pretrained_model_orig = copy.deepcopy(pretrained_model)
        if modelpool.model_names[0] == "continue_cache":
            pretrained_model, orig_v, concat_u_list, concat_v_list = self.tsv_continue(pretrained_model, finetuned_models, continue_flag)
        else:
            pretrained_model, orig_v, concat_u_list, concat_v_list = self.tsv_m(pretrained_model, finetuned_models)

        with self.profile("merge model"): 
            if modelpool.model_names[0] == "continue_cache":
                model = self.construct_continue_moe(pretrained_model, finetuned_models, concat_u_list, concat_v_list, orig_v)
            else:
                model = self.merge(pretrained_model, finetuned_models, orig_v, concat_u_list, concat_v_list)
        
        # if modelpool.model_names[0] == "continue_cache":
        #     continue_flag = True
        #     continue_cache_model = torch.load("/sharedspace/hfh/fusionbench_0517/save_models/clip-vit-base-patch32_eight_tasks/model_full.pth", weights_only=False)
        #     # Replace experts in model with those from continue_cache_model, preserve extras not in continue_cache_model
        #     replaced_count = 0
        #     for name,_ in continue_cache_model.named_modules():
        #         if ".experts." in name:
        #             path_list = name.split(".")
        #             # only replace if destination path exists in current model
        #             src_module = get_attr(continue_cache_model, path_list)
        #             set_attr(model, path_list, copy.deepcopy(src_module))
        #             replaced_count += 1

            # for name,_ in continue_cache_model.named_modules():
            #     if "experts" in name:
            #         print(name)
            # for name,_ in model.named_modules():
            #     if "experts" in name:
            #         print(name)
            # import sys
            # sys.exit()

        
        # 添加或更新task_num属性
        
        if hasattr(model, 'task_num'):
            model.task_num += 1
        else:
            model.task_num = len(finetuned_models)
        log.info(f"Model task_num set to {model.task_num}")

        self.print_profile_summary()
        
        # for name,_ in pretrained_model.named_modules():
        #     print(name)
        
        if self.config.save_model_path is not None:
            os.makedirs(self.config.save_model_path, exist_ok=True)
            # 保存完整模型（结构+参数）
            model_save_path = os.path.join(self.config.save_model_path, "model_full.pth")
            log.info(f"Saving full model (structure + weights) to {model_save_path}")
            torch.save(model, model_save_path)
        
            # # 同时保存模型参数（仅权重）
            # weights_save_path = os.path.join(self.config.save_model_path, "model_weights.pth")
            # log.info(f"Saving model weights to {weights_save_path}")
            # torch.save(model.state_dict(), weights_save_path)
        

        print_parameters(model)
        return model

    def merge(
        self,
        pretrained_model: nn.Module,
        finetuned_models: List[nn.Module],
        orig_v,
        concat_u_list,
        concat_v_list,
        in_place: bool = True,
        continue_flag: bool = False,
    ):
        """
        Merges the pretrained model with the fine-tuned models to create an upscaled model.

        Args:
            pretrained_model (nn.Module): The pretrained model.
            finetuned_models (List[nn.Module]): A list of fine-tuned models.
            in_place (bool): If True, modifies the pretrained model in place. Otherwise, creates a copy.

        Returns:
            nn.Module: The merged model.
        """
        if in_place:
            model = pretrained_model
        else:
            model = deepcopy(pretrained_model)

        self._upscale_submodules(model, finetuned_models, orig_v, concat_u_list, concat_v_list, continue_flag=continue_flag)
                
        return model

    def _upscale_linear_layer(
        self,
        pretrained_model,
        finetuned_models,
        orig_v,
        concat_u=None,
        concat_v=None,
        name: str=None,
    ):
        """
        通过将其与微调模型中的相应层合并来升级线性层。

        Args:
            pretrained_model (nn.Module): 预训练模型。
            finetuned_models (List[nn.Module]): 微调模型列表。
            name (str): 要升级的线性层的名称。
        """
        config = self.config

        name_list = name.split(".")
        module = get_attr(pretrained_model, name_list)
        # module_orig = get_attr(pretrained_model_orig, name_list)
        experts = [get_attr(m, name_list) for m in finetuned_models]
        try:
            moe_linear = S2MoELinear(
                module,
                experts,
                gate_k=config.gate_k,
                k=config.k,
                top_k=config.top_k,
                threshold=config.threshold,  # 新增参数
                routing_use_diff=self.routing_use_diff,
                full_matrices=self.full_matrices,
                orig_v=orig_v,
                concat_u=concat_u,
                concat_v=concat_v,
                upscaling_accelerator=self.upscaling_accelerator,
            )
        except ExpertNotTrainedError:
            print(f"skip {name} because the experts are not trained.")
            return
        set_attr(pretrained_model, name_list, moe_linear)
        # 从微调模型中移除原始模块以节省内存
        for m in finetuned_models:
            set_attr(m, name_list, None)
            
    def _upscale_linear_layer_lora(
        self,
        pretrained_model,
        finetuned_models,
        orig_v,
        name: str,
    ):
        """
        通过将其与微调模型中的相应层合并来升级线性层。

        Args:
            pretrained_model (nn.Module): 预训练模型。
            finetuned_models (List[nn.Module]): 微调模型列表。
            name (str): 要升级的线性层的名称。
        """
        config = self.config

        name_list = name.split(".")
        module = get_attr(pretrained_model, name_list)
        # module_orig = get_attr(pretrained_model_orig, name_list)
        experts = [get_attr(m, name_list) for m in finetuned_models]
        try:
            moe_linear = S2MoELinear(
                module,
                experts,
                gate_k=config.gate_k,
                k=config.k,
                top_k=config.top_k,
                threshold=config.threshold,  # 新增参数
                routing_use_diff=self.routing_use_diff,
                full_matrices=self.full_matrices,
                orig_v=orig_v,
                upscaling_accelerator=self.upscaling_accelerator,
            )
        except ExpertNotTrainedError:
            print(f"skip {name} because the experts are not trained.")
            return
        set_attr(pretrained_model, name_list, moe_linear)
        # 从微调模型中移除原始模块以节省内存
        for m in finetuned_models:
            set_attr(m, name_list, None)
            
    def _upscale_linear_layer_continue(
        self,
        pretrained_model,
        finetuned_models,
        orig_v,
        name: str,
    ):
        """
        通过将其与微调模型中的相应层合并来升级线性层。

        Args:
            pretrained_model (nn.Module): 预训练模型。
            finetuned_models (List[nn.Module]): 微调模型列表。
            name (str): 要升级的线性层的名称。
        """
        config = self.config

        name_list = name.split(".")
        moe_gate = name_list + ["gate"] + ["orig_v"]
        
        # 动态获取experts的数量
        expert_count = 0
        base_expert_path = ".".join(name_list + ["experts"])
        for module_name, _ in finetuned_models[0].named_modules():
            if module_name.startswith(base_expert_path) and module_name != base_expert_path:
                parts = module_name[len(base_expert_path)+1:].split(".")
                if parts and parts[0].isdigit() and len(parts) == 1:
                    expert_idx = int(parts[0])
                    expert_count = max(expert_count, expert_idx + 1)
                    
        # 创建包含所有experts的路径列表
        moe_experts = []
        for i in range(expert_count):
            moe_experts.append(name_list + ["experts", str(i)])
        
        
        shared_module = get_attr(pretrained_model, name_list)
        gate_weight = [item for item in get_attr(finetuned_models[0], moe_gate)]
        expert_weights = [get_attr(finetuned_models[0], moe_expert) for moe_expert in moe_experts]
       
        orig_v = [item[:,:gate_weight[0].shape[1]] for item in orig_v]
        # print(orig_v.shape)
        
        orig_v0 = [item for item in gate_weight]
        orig_v1 = [nn.Parameter(v, requires_grad=False) for v in orig_v[1:]]
        orig_v = orig_v0 + orig_v1
        orig_v = nn.ParameterList(
            [nn.Parameter(v, requires_grad=False) for v in orig_v]
        )

        set_attr(finetuned_models[0], moe_gate, orig_v)
        expert_num = get_attr(finetuned_models[0], name_list + ["gate", "num_experts"])
        set_attr(finetuned_models[0], name_list + ["gate", "num_experts"], expert_num + 1)
        # 不要覆盖整个容器为Linear，否则后续无法访问experts
        set_attr(finetuned_models[0], name_list + ["shared_linear"], shared_module)
        
        new_expert_path = name_list + ["experts", str(len(expert_weights))]
        # print(new_expert_path)
        new_expert = copy.deepcopy(expert_weights[0])
        
        # print(new_expert.weight)
        
        diff_weight = get_attr(finetuned_models[1],name_list).weight - get_attr(pretrained_model, name_list).weight
        diff_module = get_attr(finetuned_models[1], name_list)
        diff_module.weight.data = diff_weight
        
        new_expert.set_parameters(diff_module)
        set_attr(finetuned_models[0], new_expert_path, new_expert)
        
        src_submodule = get_attr(finetuned_models[0], name_list)
        set_attr(pretrained_model, name_list, copy.deepcopy(src_submodule))

        

    def _average_experts(
        self,
        pretarined_model: nn.Module,
        finetuned_models: List[nn.Module],
        name: str,
        moe_model: nn.Module=None,
        continue_task_num: int = 1,
    ):
        """
        Average the experts for a given layer.

        Args:
            pretarined_model (nn.Module): The pretrained model.
            finetuned_models (List[nn.Module]): A list of fine-tuned models.
            name (str): The name of the layer to average.
        """

        # if continue_task_num == 1:
            
        if self.is_continue:
            name_list = name.split(".")
            experts = get_attr(finetuned_models, name_list)
            moe_module = get_attr(moe_model, name_list)
            average_list = [copy.deepcopy(moe_module) for _ in range(continue_task_num)]+[experts]
            average_module = copy.deepcopy(experts)
            sd_moe = moe_module.state_dict(keep_vars=True)
            sd_exp = experts.state_dict(keep_vars=True)
            sd_sum = state_dict_add(state_dict_mul(sd_moe, continue_task_num), sd_exp)
            new_sd = state_dict_div(sd_sum, continue_task_num + 1)
            average_module.load_state_dict(new_sd)
            set_attr(pretarined_model, name_list, average_module)
        else:
            name_list = name.split(".")
            experts = [get_attr(m, name_list) for m in finetuned_models]
            averaged_module = simple_average(experts)
            set_attr(pretarined_model, name_list, averaged_module)
            
        

    def _upscale_submodules(
        self,
        pretrained_model: nn.Module,
        finetuned_models: List[nn.Module],
        orig_v,
        concat_u_list=None,
        concat_v_list=None,
        continue_flag: bool = False,
        tqdm_desc: str = "Upscaling Linear Modules",
    ):
        """
        Upscales the submodules of the pretrained model by merging them with the corresponding submodules from the fine-tuned models.

        Args:
            pretrained_model (nn.Module): The pretrained model.
            finetuned_models (List[nn.Module]): A list of fine-tuned models.
            tqdm_desc (str): Description for the tqdm progress bar.
        """
        config = self.config
        i = 0
        for name, module in tqdm(
            tuple(pretrained_model.named_modules()),
            tqdm_desc,
            leave=False,
            dynamic_ncols=True,
        ):
            if isinstance(module, self._linear_layer_cls):
                if continue_flag:
                    self._upscale_linear_layer_continue(
                        pretrained_model=pretrained_model,
                        orig_v=orig_v[i],
                        finetuned_models=finetuned_models,
                        name=name,
                    )
                else:
                    self._upscale_linear_layer(
                        pretrained_model=pretrained_model,
                        orig_v=orig_v[i],
                        finetuned_models=finetuned_models,
                        concat_u=concat_u_list[i] if concat_u_list is not None else None,
                        concat_v=concat_v_list[i] if concat_v_list is not None else None,
                        name=name,
                    )
                i += 1
                # print("is linear layer")
            elif config.average_experts and len(tuple(module.named_modules())) == 1:
                # if the module is a leaf module, we perform a parameter average
                print("not linear layer, average experts")
                self._average_experts(pretrained_model, finetuned_models, name)
        
    def tsv_m(self, pretrained_model: nn.Module, finetuned_models: List[nn.Module], continue_flag: bool = False):
        """
        使用任务奇异向量合并(Task Singular Vector Merge)方法创建合并模型

        Args:
            pretrained_model: 预训练模型
            finetuned_models: 微调模型列表

        Returns:
            合并后的模型(在原预训练模型基础上修改)
        """
        
        # if continue_flag:
        #     print("#####################################################")
        #     print("finetuned 0 task num: ", finetuned_models[0].task_num)
        #     sv_reduction = 1.0 / (finetuned_models[0].task_num + len(finetuned_models) - 1)  # 根据任务数量确定压缩比例
        # else:
        #     ft_model_length = len(finetuned_models)
        #     sv_reduction = 1.0 / ( 2 * ft_model_length )  # 根据模型数量确定压缩比例
        # 从配置中读取SVD压缩比例
        sv_reduction = self.config.sv_reduction if hasattr(self, "config") else self.sv_reduction
        
        # 预先获取所有线性层模块，避免重复遍历
        linear_modules = [
            (name, module)
            for name, module in pretrained_model.named_modules()
            if isinstance(module, self._linear_layer_cls)
        ]

        #如果需要平均专家，处理非线性层
        non_linear_modules = [
            (name, module)
            for name, module in pretrained_model.named_modules()
            if not isinstance(module, self._linear_layer_cls)
            and len(tuple(module.named_modules())) == 1
        ]
        if continue_flag:
            for name, _ in tqdm(non_linear_modules, desc="处理非线性层"):
                name_list = name.split(".")
                experts = [get_attr(m, name_list) for m in finetuned_models]
                assert len(experts) > 0, "modules must be a non-empty list"
                new_module = deepcopy(experts[0])
                # 正确处理state_dict合并
                state_dict_0 = experts[0].state_dict()
                state_dict_1 = experts[1].state_dict()
                merged_state_dict = {}
                
                # 对每个参数进行加权平均
                for key in state_dict_0.keys():
                    if key in state_dict_1:
                        merged_state_dict[key] = (1-sv_reduction) * state_dict_0[key] + sv_reduction * state_dict_1[key]
                    else:
                        merged_state_dict[key] = state_dict_0[key]
                new_module.load_state_dict(merged_state_dict)
                set_attr(pretrained_model, name_list, new_module)
        else:
            for name, _ in tqdm(non_linear_modules, desc="处理非线性层"):
                self._average_experts(pretrained_model, finetuned_models, name)

        # 使用tqdm显示进度
        orig_v = []
        concat_u_list = []
        concat_v_list = []
        
        for name, module in tqdm(linear_modules, desc="使用TSV合并线性层"):
            name_list = name.split(".")
            pretrained_module = get_attr(pretrained_model, name_list)
            
            if continue_flag:
                expert_modules = [get_attr(m, name_list) for m in finetuned_models[1:]]
                shared_linear = [get_attr(finetuned_models[0], name_list+["shared_linear"])]
                expert_modules = shared_linear + expert_modules
                # experts = get_attr(finetuned_models[0], name_list+["experts"])

                # experts_list = [getattr(experts, str(i)) for i in range(len(experts))]
                # experts0 = get_attr(finetuned_models[0], name_list+["experts"])
                # for item in expert_modules:
                #     print("expert module shape: ",item.weight.shape)
                # print("shared_linear shape: ", shared_linear.weight.shape)
                # for item in experts_list:
                #     print("expert shape: ",item.weight.shape)
                # print("experts0 shape: ", experts0.weight.shape)
                # import sys
                # sys.exit()
                # continue_modules = [get_attr(m, name_list+["shared_linear"]) for m in finetuned_models[0]]
                # continue_modules = [get_attr(m, name_list+["shared_linear"]) for m in finetuned_models[0]]                
            else:
                expert_modules = [get_attr(m, name_list) for m in finetuned_models]
            
            
            # 计算权重差异
            weight_diffs = [
                expert.weight - pretrained_module.weight for expert in expert_modules
            ]
            # 检查是否所有差异都为零
            if _is_all_zeros(weight_diffs):
                continue

            with torch.no_grad():
                # 使用compute_svd_and_compress函数处理每个差异矩阵
                svd_results = []
                svd_orig = []
                for i, diff in enumerate(weight_diffs):
                    # 将差异矩阵移至指定设备以加速计算
                    device = self.upscaling_accelerator or diff.device
                    diff = diff.to(device)

                        
                    # 使用TSVC_utils中的函数计算SVD并压缩
                    _, u, s, v, U, S, V = TSVC_utils.compute_svd_and_compress(
                        None, diff, sv_reduction
                    )
                    # 将结果存储在列表中
                    svd_results.append((u, s, v))
                    svd_orig.append((U, S, V))

                # 参考TSVM_utils中的方法合并SVD结果
                
                if continue_flag:
                    all_u = [result[0] for result in svd_orig[0:1]]
                    all_s = [result[1] for result in svd_orig[0:1]]
                    all_v = [result[2] for result in svd_orig[0:1]]
                    
                    
                    all_u = all_u + [result[0] for result in svd_results[1:]]
                    all_s = all_s + [result[1] for result in svd_results[1:]]
                    all_v = all_v + [result[2] for result in svd_results[1:]]
                    
                else:
                    all_u = [result[0] for result in svd_results]
                    all_s = [result[1] for result in svd_results]
                    all_v = [result[2] for result in svd_results]
                
                if continue_flag:
                    # orig_v.append(all_u)
                    orig_v.append(all_v)
                    
                    
                    concat_u = torch.cat([all_u[0],all_u[1]],dim=1)

                    concat_s = torch.cat([all_s[0],all_s[1]],dim=0)

                    concat_v = torch.cat([all_v[0],all_v[1]],dim=1)
                else:
                    # orig_v.append(all_u)
                    orig_v.append(all_v)
                    # svd_results = [all_u,all_s,all_v]
                    # 创建一个与第一个专家的U矩阵形状相同的全零张量

                    concat_u = torch.zeros_like(U, device=all_u[0].device)

                    reduced_index_u = int(svd_orig[0][0].shape[1] * sv_reduction)

                    # 将每个专家的U矩阵放在特定的索引位置上
                    for i, u_tensor in enumerate(all_u):
                        concat_u[:, i * reduced_index_u : (i + 1) * reduced_index_u] = (
                            u_tensor[:, :reduced_index_u]
                        )

                    # 修改：使用TSVM_utils.py中的方式构建S矩阵
                    # 创建一个与第一个专家的S矩阵形状相同的全零张量

                    # concat_s = torch.zeros_like(S, device=all_s[0].device)
                    concat_s = torch.ones_like(S, device=all_s[0].device)/S.shape[0]
                    
                    # reduced_index_s = int(svd_orig[0][1].shape[0] * sv_reduction)

                    # # 将每个专家的S矩阵放在特定的索引位置上
                    # for i, s_tensor in enumerate(all_s):
                    #     concat_s[i * reduced_index_s : (i + 1) * reduced_index_s] = (
                    #         s_tensor[:reduced_index_s]
                    #     )

                    # 修改：使用TSVM_utils.py中的方式构建V矩阵
                    # 创建一个与第一个专家的V矩阵形状相同的全零张量
                    concat_v = torch.zeros_like(V, device=all_v[0].device)
                    reduced_index_v = int(svd_orig[0][2].shape[1] * sv_reduction)
                    # 将每个专家的V矩阵放在特定的索引位置上
                    for i, v_tensor in enumerate(all_v):
                        concat_v[:, i * reduced_index_v : (i + 1) * reduced_index_v] = (
                            v_tensor[:, :reduced_index_v]
                        )
                        
                concat_u_list.append(concat_u)
                concat_v_list.append(concat_v)  
                # 使用TSVM_utils中的方法重新计算SVD以获得更好的合并效果
                u_u, s_u, v_u = torch.linalg.svd(concat_u, full_matrices=False)
                u_v, s_v, v_v = torch.linalg.svd(concat_v.T, full_matrices=False)
                # 使用multi_dot构建最终权重
                reconstructed_weight = torch.linalg.multi_dot(
                    (
                        u_u,
                        v_u,
                        torch.diag(concat_s),
                        u_v,
                        v_v,
                    )
                )


                # 更新预训练模型权重
                pretrained_module.weight.data.add_(reconstructed_weight)
                

                # 打印日志信息
                # if logging.getLogger().level <= logging.DEBUG:
                #     log.debug(f"更新模块 {name} 的权重，权重差异范数: {torch.norm(reconstructed_weight)}")
                # else:
                #     log.info(f"已更新模块 {name}")

        # 如果需要平均专家，处理非线性层
        if self.average_experts:
            print("average_experts!!!!!!!!!!!!!!")
            non_linear_modules = [
                (name, module)
                for name, module in pretrained_model.named_modules()
                if not isinstance(module, self._linear_layer_cls)
                and len(tuple(module.named_modules())) == 1
            ]

            for name, _ in tqdm(non_linear_modules, desc="处理非线性层"):
                self._average_experts(pretrained_model, finetuned_models, name)

        return pretrained_model, orig_v, concat_u_list, concat_v_list


    def tsv_continue(self, pretrained_model: nn.Module, finetuned_models: List[nn.Module], continue_flag: bool = False):
        """
        使用任务奇异向量合并(Task Singular Vector Merge)方法创建合并模型

        Args:
            pretrained_model: 预训练模型
            finetuned_models: 微调模型列表

        Returns:
            合并后的模型(在原预训练模型基础上修改)
        """
        
        # if continue_flag:
        #     print("#####################################################")
        #     print("finetuned 0 task num: ", finetuned_models[0].task_num)
        #     sv_reduction = 1.0 / (finetuned_models[0].task_num + len(finetuned_models) - 1)  # 根据任务数量确定压缩比例
        # else:
        #     ft_model_length = len(finetuned_models)
        #     sv_reduction = 1.0 / ( 2 * ft_model_length )  # 根据模型数量确定压缩比例
        # 从配置中读取SVD压缩比例
        sv_reduction = self.config.sv_reduction if hasattr(self, "config") else self.sv_reduction
        # 预先获取所有线性层模块，避免重复遍历
        
        moe_model = finetuned_models[0]
        finetuned_model = finetuned_models[1]
        
        
        linear_modules = [
            (name, module)
            for name, module in pretrained_model.named_modules()
            if isinstance(module, self._linear_layer_cls)
        ]

        #如果需要平均专家，处理非线性层
        non_linear_modules = [
            (name, module)
            for name, module in pretrained_model.named_modules()
            if not isinstance(module, self._linear_layer_cls)
            and len(tuple(module.named_modules())) == 1
        ]

        for name, _ in tqdm(non_linear_modules, desc="处理非线性层"):
            self._average_experts(pretrained_model, finetuned_model, name, moe_model, moe_model.task_num)

        # 使用tqdm显示进度
        orig_v_list = []
        concat_u_list = []
        concat_v_list = []
        i=0
        for name, module in tqdm(linear_modules, desc="使用TSV合并线性层"):
            name_list = name.split(".")
            
            
            pretrained_module = get_attr(pretrained_model, name_list)
            
            expert_module = get_attr(finetuned_model, name_list)
            

            # 计算权重差异
            weight_diff = expert_module.weight - pretrained_module.weight

            # 检查是否所有差异都为零
            if _is_all_zeros(weight_diff):
                continue

            with torch.no_grad():
                # 使用compute_svd_and_compress函数处理每个差异矩阵
                svd_results = []
                svd_orig = []
                # for i, diff in enumerate(weight_diffs):
                # 将差异矩阵移至指定设备以加速计算
                device = self.upscaling_accelerator or diff.device
                weight_diff = weight_diff.to(device)
                
                # 使用TSVC_utils中的函数计算SVD并压缩
                _, u, s, v, U, S, V = TSVC_utils.compute_svd_and_compress(
                    None, weight_diff, sv_reduction
                )
                
                
                orig_v = get_attr(moe_model, name_list + ["gate"] + ["orig_v"])

                orig_v.append(v.requires_grad_(False))
                orig_v_list.append(orig_v)
                
                i+=1

                # svd_results = [all_u,all_s,all_v]
                # 创建一个与第一个专家的U矩阵形状相同的全零张量
                concat_u = get_attr(moe_model, name_list + ["concat_u"])
                concat_v = get_attr(moe_model, name_list + ["concat_v"])
                reduced_index_u = int(concat_u.shape[1] * sv_reduction)
                # 将每个专家的U矩阵放在特定的索引位置上


                
                # import sys
                # sys.exit()

                concat_u[:, moe_model.task_num * reduced_index_u : (moe_model.task_num + 1) * reduced_index_u] = u #U[:, :reduced_index_u]
            
                # 修改：使用TSVM_utils.py中的方式构建S矩阵
                # 创建一个与第一个专家的S矩阵形状相同的全零张量
                # concat_s = torch.zeros_like(S, device=all_s[0].device)
                concat_s = torch.ones_like(S, device=S.device)/S.shape[0]
                
                # reduced_index_s = int(svd_orig[0][1].shape[0] * sv_reduction)
                # # 将每个专家的S矩阵放在特定的索引位置上
                # for i, s_tensor in enumerate(all_s):
                #     concat_s[i * reduced_index_s : (i + 1) * reduced_index_s] = (
                #         s_tensor[:reduced_index_s]
                #     )
                # 修改：使用TSVM_utils.py中的方式构建V矩阵
                # 创建一个与第一个专家的V矩阵形状相同的全零张量
                reduced_index_v = int(concat_v.shape[1] * sv_reduction)
                # 将每个专家的V矩阵放在特定的索引位置上
                concat_v[:, moe_model.task_num * reduced_index_v : (moe_model.task_num + 1) * reduced_index_v] = v #V[:, :reduced_index_v]

                concat_u_list.append(concat_u)
                concat_v_list.append(concat_v)

                # 使用TSVM_utils中的方法重新计算SVD以获得更好的合并效果
                u_u, s_u, v_u = torch.linalg.svd(concat_u, full_matrices=False)
                u_v, s_v, v_v = torch.linalg.svd(concat_v.T, full_matrices=False)
                # 使用multi_dot构建最终权重
                reconstructed_weight = torch.linalg.multi_dot(
                    (
                        u_u,
                        v_u,
                        torch.diag(concat_s),
                        u_v,
                        v_v,
                    )
                )


                # # 更新预训练模型权重
                pretrained_module.weight.data.add_(reconstructed_weight)
                
                """
                moe_weight = get_attr(moe_model, name_list + ["weight"])
                pre_shared_weight = pretrained_module.weight.data.clone()
                shift_tensor = pre_shared_weight - moe_weight
                for i in range(moe_model.task_num):
                    print(get_attr(moe_model, name_list + ["experts", str(i), "weight"]))
                    new_expert = copy.deepcopy(get_attr(moe_model, name_list + ["experts", str(i)]))
                    new_weight = get_attr(moe_model, name_list + ["experts", str(i), "weight"]) - shift_tensor
                    new_expert.weight = torch.nn.Parameter(new_weight)
                    # new_expert.apply_pruning_init()
                    set_attr(moe_model, name_list + ["experts", str(i)], new_expert)
                    print(get_attr(moe_model, name_list + ["experts", str(i), "weight"]))
                """
                # 打印日志信息
                # if logging.getLogger().level <= logging.DEBUG:
                #     log.debug(f"更新模块 {name} 的权重，权重差异范数: {torch.norm(reconstructed_weight)}")
                # else:
                #     log.info(f"已更新模块 {name}")

        return pretrained_model, orig_v_list, concat_u_list, concat_v_list
    
    def construct_continue_moe(self, pretrained_model, finetuned_models, concat_u_list, concat_v_list, orig_v_list):
        
        
        moe_model = finetuned_models[0]
        finetuned_model = finetuned_models[1]
        linear_modules = [
            (name, module)
            for name, module in pretrained_model.named_modules()
            if isinstance(module, self._linear_layer_cls)
        ]

        #如果需要平均专家，处理非线性层
        non_linear_modules = [
            (name, module)
            for name, module in pretrained_model.named_modules()
            if not isinstance(module, self._linear_layer_cls)
            and len(tuple(module.named_modules())) == 1
        ]
        
        for name, _ in tqdm(non_linear_modules, desc="constructing....处理非线性层"):
            name = name.split(".")
            set_attr(moe_model, name, get_attr(pretrained_model, name))
        i=0        
        for name, _ in tqdm(linear_modules, desc="constructing....处理线性层"):
            
            
            
            
            orig_v = orig_v_list[i]
            
            name = name.split(".")
            
            set_attr(moe_model, name+["weight"], copy.deepcopy(get_attr(pretrained_model, name)))
            # set_attr(moe_model, name+["bias"], get_attr(pretrained_model, name+["bias"]))
            expert_num = moe_model.task_num
            new_expert = copy.deepcopy(get_attr(moe_model, name+["experts"]+["0"]))
            new_expert.weight.data = get_attr(finetuned_model, name+["weight"]).data - get_attr(pretrained_model, name+["weight"]).data
            new_expert.apply_pruning_init()
            set_attr(moe_model, name+["experts"]+[str(expert_num)], new_expert)
            set_attr(moe_model, name+["gate"]+["orig_v"], orig_v)  
            set_attr(moe_model, name+["gate"]+["num_experts"], get_attr(moe_model, name+["gate"]+["num_experts"])+1)        
            i+=1  

        return moe_model
            
            