import argparse
import os
import torch
from tqdm import tqdm

def get_attr(root, name_list):
    obj = root
    for n in name_list:
        if n.isdigit():
            obj = obj[int(n)]
        else:
            obj = getattr(obj, n)
    return obj

def _is_all_zeros(diffs):
    for d in diffs:
        if d.abs().sum().item() != 0:
            return False
    return True

def compute_svd_and_compress(_, diff, sv_reduction):
    U, S, Vh = torch.linalg.svd(diff, full_matrices=False)
    k = S.shape[0]
    r = max(1, int(k * sv_reduction))
    u = U[:, :r].contiguous()
    s = S[:r].contiguous()
    v = Vh[:r, :].T.contiguous()
    return None, u, s, v, U.contiguous(), S.contiguous(), Vh.T.contiguous()

class TSVM_Merger:
    def __init__(self, linear_layer_cls=torch.nn.Linear, upscaling_accelerator=None):
        self._linear_layer_cls = linear_layer_cls
        self.upscaling_accelerator = upscaling_accelerator

    def _average_experts(self, pretrained_model, finetuned_models, name):
        name_list = name.split(".")
        base_mod = get_attr(pretrained_model, name_list)
        expert_mods = [get_attr(m, name_list) for m in finetuned_models]
        if hasattr(base_mod, "weight"):
            ws = [em.weight.data.to(base_mod.weight.device) for em in expert_mods if hasattr(em, "weight")]
            if ws:
                base_mod.weight.data.copy_(torch.stack(ws, dim=0).mean(dim=0))
        if hasattr(base_mod, "bias") and base_mod.bias is not None:
            bs = [em.bias.data.to(base_mod.bias.device) for em in expert_mods if hasattr(em, "bias") and em.bias is not None]
            if bs:
                base_mod.bias.data.copy_(torch.stack(bs, dim=0).mean(dim=0))

    def tsv_m(self, pretrained_model, finetuned_models):
        ft_model_length = len(finetuned_models)
        sv_reduction = 1.0 / (4 * ft_model_length)
        linear_modules = [(name, module) for name, module in list(pretrained_model.named_modules())[1:-1] if isinstance(module, self._linear_layer_cls)]
        non_linear_modules = [(name, module) for name, module in list(pretrained_model.named_modules()) if not isinstance(module, self._linear_layer_cls) and len(tuple(module.named_modules())) == 1]
        for name, _ in tqdm(non_linear_modules, desc="处理非线性层"):
            if name == "model.embed_tokens":
                continue
            self._average_experts(pretrained_model, finetuned_models, name)
        orig_v = []
        for name, module in tqdm(linear_modules, desc="使用TSV合并线性层"):
            name_list = name.split(".")
            pretrained_module = get_attr(pretrained_model, name_list)
            expert_modules = [get_attr(m, name_list) for m in finetuned_models]
            weight_diffs = [expert.weight - pretrained_module.weight for expert in expert_modules]
            if _is_all_zeros(weight_diffs):
                continue
            with torch.no_grad():
                svd_results = []
                svd_orig = []
                for diff in weight_diffs:
                    device = self.upscaling_accelerator or diff.device
                    diff = diff.to(device)
                    _, u, s, v, U, S, V = compute_svd_and_compress(None, diff, sv_reduction)
                    svd_results.append((u, s, v))
                    svd_orig.append((U, S, V))
                all_u = [r[0] for r in svd_results]
                all_s = [r[1] for r in svd_results]
                all_v = [r[2] for r in svd_results]
                orig_v.append(torch.stack(all_v, dim=0))
                U0, S0, V0 = svd_orig[0]
                concat_u = torch.zeros_like(U0, device=all_u[0].device)
                reduced_index_u = max(1, int(U0.shape[1] * sv_reduction))
                for i, u_tensor in enumerate(all_u):
                    end = min((i + 1) * reduced_index_u, concat_u.shape[1])
                    width = end - i * reduced_index_u
                    if width <= 0:
                        break
                    concat_u[:, i * reduced_index_u:end] = u_tensor[:, :width]
                concat_s = torch.zeros_like(S0, device=all_s[0].device)
                reduced_index_s = max(1, int(S0.shape[0] * sv_reduction))
                for i, s_tensor in enumerate(all_s):
                    end = min((i + 1) * reduced_index_s, concat_s.shape[0])
                    width = end - i * reduced_index_s
                    if width <= 0:
                        break
                    concat_s[i * reduced_index_s:end] = s_tensor[:width]
                concat_v = torch.zeros_like(V0, device=all_v[0].device)
                reduced_index_v = max(1, int(V0.shape[1] * sv_reduction))
                for i, v_tensor in enumerate(all_v):
                    end = min((i + 1) * reduced_index_v, concat_v.shape[1])
                    width = end - i * reduced_index_v
                    if width <= 0:
                        break
                    concat_v[:, i * reduced_index_v:end] = v_tensor[:, :width]
                u_u, s_u, vhu = torch.linalg.svd(concat_u, full_matrices=False)
                u_v, s_v, vhv = torch.linalg.svd(concat_v.T, full_matrices=False)
                reconstructed_weight = torch.linalg.multi_dot((u_u, vhu, torch.diag(concat_s), u_v, vhv))
                reconstructed_weight = reconstructed_weight.to(pretrained_module.weight.device, dtype=pretrained_module.weight.dtype)
                pretrained_module.weight.data.add_(reconstructed_weight)
        return pretrained_model

def load_sd(path):
    sd = torch.load(path, map_location="cpu")
    if isinstance(sd, dict) and "state_dict" in sd:
        sd = sd["state_dict"]
    return sd

def _is_linear_weight(name, tensor):
    if not isinstance(tensor, torch.Tensor):
        return False
    if tensor.ndim != 2:
        return False
    if name.endswith(".bias"):
        return False
    if "embed_tokens" in name and name.endswith(".weight"):
        return False
    return True

def merge_tsvm(base_sd, task_sds, rank_ratio, cap_ratio):
    out = {}
    keys = list(base_sd.keys())
    for k in keys:
        vb = base_sd[k]
        if _is_linear_weight(k, vb):
            diffs = []
            for sd in task_sds:
                if k in sd and isinstance(sd[k], torch.Tensor) and sd[k].shape == vb.shape:
                    diffs.append(sd[k] - vb)
            if not diffs or _is_all_zeros(diffs):
                out[k] = vb
                continue
            with torch.no_grad():
                svd_results = []
                svd_orig = []
                for diff in diffs:
                    diff = diff.to(vb.device).float()
                    U, S, Vh = torch.linalg.svd(diff, full_matrices=False)
                    kmin = S.shape[0]
                    r = max(1, int(kmin * rank_ratio))
                    u = U[:, :r].contiguous()
                    s = S[:r].contiguous()
                    v = Vh[:r, :].T.contiguous()
                    svd_results.append((u, s, v))
                    svd_orig.append((U.contiguous(), S.contiguous(), Vh.T.contiguous()))
                all_u = [r[0] for r in svd_results]
                all_s = [r[1] for r in svd_results]
                all_v = [r[2] for r in svd_results]
                U0, S0, V0 = svd_orig[0]
                concat_u = torch.zeros_like(U0, device=vb.device)
                cap_u = max(1, int(U0.shape[1] * cap_ratio))
                red_u = max(1, int(U0.shape[1] * rank_ratio))
                for i, u_tensor in enumerate(all_u):
                    start = i * red_u
                    end = min(start + red_u, cap_u, concat_u.shape[1])
                    w = end - start
                    if w <= 0:
                        break
                    concat_u[:, start:end] = u_tensor[:, :w]
                concat_s = torch.zeros_like(S0, device=vb.device)
                cap_s = max(1, int(S0.shape[0] * cap_ratio))
                red_s = max(1, int(S0.shape[0] * rank_ratio))
                for i, s_tensor in enumerate(all_s):
                    start = i * red_s
                    end = min(start + red_s, cap_s, concat_s.shape[0])
                    w = end - start
                    if w <= 0:
                        break
                    concat_s[start:end] = s_tensor[:w]
                concat_v = torch.zeros_like(V0, device=vb.device)
                cap_v = max(1, int(V0.shape[1] * cap_ratio))
                red_v = max(1, int(V0.shape[1] * rank_ratio))
                for i, v_tensor in enumerate(all_v):
                    start = i * red_v
                    end = min(start + red_v, cap_v, concat_v.shape[1])
                    w = end - start
                    if w <= 0:
                        break
                    concat_v[:, start:end] = v_tensor[:, :w]
                u_u, s_u, vhu = torch.linalg.svd(concat_u, full_matrices=False)
                u_v, s_v, vhv = torch.linalg.svd(concat_v.T, full_matrices=False)
                recon = torch.linalg.multi_dot((u_u, vhu, torch.diag(concat_s), u_v, vhv))
                recon = recon.to(vb.dtype)
                out[k] = vb + recon
        else:
            if k == "model.embed_tokens.weight":
                out[k] = vb
                continue
            vals = []
            for sd in task_sds:
                if k in sd and isinstance(sd[k], torch.Tensor) and sd[k].shape == vb.shape:
                    vals.append(sd[k].to(vb.device))
            if vals:
                out[k] = torch.stack(vals, dim=0).mean(dim=0).to(vb.dtype)
            else:
                out[k] = vb
    return out

def main():
    p = argparse.ArgumentParser()
    p.add_argument("--base", type=str, required=True)
    p.add_argument("--models", type=str, required=True)
    p.add_argument("--output", type=str, required=True)
    p.add_argument("--rank_ratio", type=float, default=0.25)
    p.add_argument("--cap_ratio", type=float, default=0.50)
    a = p.parse_args()
    base_sd = load_sd(a.base)
    model_paths = [x for x in a.models.split(",") if x]
    task_sds = [load_sd(x) for x in model_paths]
    merged_sd = merge_tsvm(base_sd, task_sds, a.rank_ratio, a.cap_ratio)
    d = os.path.dirname(a.output)
    if d:
        os.makedirs(d, exist_ok=True)
    torch.save(merged_sd, a.output)
    print(f"Saved merged state dict to {a.output}")

if __name__ == "__main__":
    main()
