#!/usr/bin/env python
import argparse
import os
import torch
import mindspeed.megatron_adaptor

# 与转换脚本中保持一致的padding数
PADDING_NUM = 64
TP_NUM = 4  # 本例中仅考虑tp_size=4

def merge_splits(split_tensors, key):
    """
    根据key的命名规则判断如何沿相应维度拼接各TP分片的权重张量。
    """
    # 针对 MLP 中 linear_fc1.weight 的特殊情况：原始张量先被沿第0维均分成两部分，
    # 每个TP分片保存对应的片段，并在内部先进行拼接。因此需要先把每个分片拆分为 gate 和 up 两部分，
    # 再分别拼接后最终拼接成完整的权重。
    if key.startswith('text_decoder.decoder.layer') and 'linear_fc1.weight' in key:
        gate_parts = []
        up_parts = []
        for t in split_tensors:
            sub = t.shape[0] // 2
            gate_parts.append(t[:sub])
            up_parts.append(t[sub:])
        merged_gate = torch.cat(gate_parts, dim=0)
        merged_up = torch.cat(up_parts, dim=0)
        merged = torch.cat([merged_gate, merged_up], dim=0)
        return merged

    # 对于沿第0维切分的情况
    elif ('linear_qkv.weight' in key) or (('linear_fc1.weight' in key) and ('image_encoder.projector' not in key)):
        return torch.cat(split_tensors, dim=0)
    elif ('linear_qkv.bias' in key) or (('linear_fc1.bias' in key) and ('image_encoder.projector' not in key)):
        return torch.cat(split_tensors, dim=0)
    
    # 对于沿第1维切分的情况
    elif ('linear_proj.weight' in key) or (('linear_fc2.weight' in key) and ('image_encoder.projector' not in key)):
        return torch.cat(split_tensors, dim=1)
    
    # 对于输出层和词嵌入（沿第0维切分，可能含有填充）
    elif ('output_layer' in key) or ('word_embeddings' in key):
        return torch.cat(split_tensors, dim=0)
    
    # 其它未分割权重直接取第一个
    else:
        return split_tensors[0]

def main(args):
    tp_state_dicts = []

    # 载入每个TP分片的模型权重，假设各文件位于 release/mp_rank_XX/model_optim_rng.pt 下
    for tp_rank in range(TP_NUM):
        rank_dir = os.path.join(args.input_dir, f'mp_rank_{tp_rank:02d}')
        model_file = os.path.join(rank_dir, 'model_optim_rng.pt')
        if not os.path.exists(model_file):
            raise FileNotFoundError(f"Model file not found: {model_file}")
        checkpoint = torch.load(model_file, map_location='cpu')
        if 'model' not in checkpoint:
            raise KeyError(f"Key 'model' not found in {model_file}")
        tp_state_dicts.append(checkpoint['model'])

    # 假定所有TP分片具有相同的keys集合
    merged_state = {}
    keys = tp_state_dicts[0].keys()
    for key in keys:
        if tp_state_dicts[0][key] is None:
            continue
        # 从每个TP分片中提取对应的张量
        split_tensors = [tp_sd[key] for tp_sd in tp_state_dicts]
        merged_state[key] = merge_splits(split_tensors, key)
    
    # 按照MindSpeed-MM格式，保存为一个单文件
    out_dict = {'model': merged_state}
    torch.save(out_dict, args.output_path)
    print(f"Merged model saved to {args.output_path}")

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Merge TP-split MindSpeed-MM model weights into a single file")
    parser.add_argument('--input-dir', type=str, required=True,
                        help="Directory containing the split model (e.g., InternVL2_5-8B-tp4)")
    parser.add_argument('--output-path', type=str, required=True,
                        help="Output file path for the merged model (e.g., merged_model.pt)")
    args = parser.parse_args()
    main(args)
