import argparse
import os
import torch
import pdb

def convert_hf_to_megatron(hf_checkpoint_path, megatron_checkpoint_path, tensor_parallel_size):
    # Load Hugging Face checkpoint
    hf_ckp = torch.load(hf_checkpoint_path, map_location='cpu')
    megatron_ckp = [{'model': {}} for _ in range(tensor_parallel_size)]

    # Collect Q, K, V weights and biases
    qkv_weights = {}
    qkv_biases = {}

    # Conversion mapping rules
    for name, param in hf_ckp.items():
        if 'decoder' in name:
            continue # decoder is skipped
        new_name = None
        chunk_dim = None

        # Convolutional layers
        if 'conv1' in name:
            new_name = name.replace('model.encoder.conv1', 'conv1')
        elif 'conv2' in name:
            new_name = name.replace('model.encoder.conv2', 'conv2')
        
        # Positional embeddings
        elif 'embed_positions' in name:
            new_name = name.replace('model.encoder.embed_positions', 'position_embeddings')
        
        # Self Attention layers (collect Q, K, V for processing later)
        elif 'self_attn.k_proj.weight' in name or 'self_attn.q_proj.weight' in name or 'self_attn.v_proj.weight' in name:
            layer_idx = name.split('.')[3]
            if layer_idx not in qkv_weights:
                qkv_weights[layer_idx] = {}
            if 'k_proj.weight' in name:
                qkv_weights[layer_idx]['k'] = param
            elif 'q_proj.weight' in name:
                qkv_weights[layer_idx]['q'] = param
            elif 'v_proj.weight' in name:
                qkv_weights[layer_idx]['v'] = param
        elif 'self_attn.k_proj.bias' in name or 'self_attn.q_proj.bias' in name or 'self_attn.v_proj.bias' in name:
            layer_idx = name.split('.')[3]
            if layer_idx not in qkv_biases:
                qkv_biases[layer_idx] = {}
            if 'k_proj.bias' in name:
                qkv_biases[layer_idx]['k'] = param
            elif 'q_proj.bias' in name:
                qkv_biases[layer_idx]['q'] = param
            elif 'v_proj.bias' in name:
                qkv_biases[layer_idx]['v'] = param
        
        elif 'self_attn.out_proj' in name:
            new_name = name.replace('self_attn.out_proj', 'self_attention.linear_proj')
            if 'weight' in name:
                chunk_dim = -1
            else:
                chunk_dim = None
            # chunk_dim = -1
        
        # Layer Norm layers
        elif 'self_attn_layer_norm' in name:
            new_name = name.replace('self_attn_layer_norm.', 'self_attention.linear_qkv.layer_norm_')
        elif 'final_layer_norm' in name:
            new_name = name.replace('final_layer_norm.', 'mlp.linear_fc1.layer_norm_')
        
        # Feed Forward layers (MLP)
        elif 'fc1' in name:
            new_name = name.replace('fc1', 'mlp.linear_fc1')
            chunk_dim = 0
        elif 'fc2' in name:
            new_name = name.replace('fc2', 'mlp.linear_fc2')
            if 'weight' in name:
                chunk_dim = -1
            else:
                chunk_dim = None
        elif 'model.encoder.layer_norm' in name:
            new_name = name.replace('model.encoder.layer_norm', 'final_layer_norm')
            chunk_dim = None
        
        # Assign new name and parameter
        if new_name is not None:
            if new_name.startswith('model'):
                new_name = new_name.replace('model.', '')
            # new_name = new_name.lstrip('model.')
            if chunk_dim is None:
                for i in range(tensor_parallel_size):
                    megatron_ckp[i]['model'][new_name] = param.clone()
            else:
                param_chunks = param.chunk(tensor_parallel_size, dim=chunk_dim)

                for i in range(tensor_parallel_size):
                    megatron_ckp[i]['model'][new_name] = param_chunks[i].clone()
        else:
            if 'q_proj' in name or 'k_proj' in name or 'v_proj' in name:
                pass
            else:
                print(f"Warning: Unhandled parameter {name}")

    # Process Q, K, V weights and biases
    for layer_idx, weights in qkv_weights.items():
        q_weight = weights['q']
        k_weight = weights['k']
        v_weight = weights['v']
        # Split Q, K, V weights by tensor parallel size
        k_weight_chunks = k_weight.chunk(tensor_parallel_size, dim=0)
        q_weight_chunks = q_weight.chunk(tensor_parallel_size, dim=0)
        v_weight_chunks = v_weight.chunk(tensor_parallel_size, dim=0)
        for i in range(tensor_parallel_size):
            new_name = f'encoder.layers.{layer_idx}.self_attention.linear_qkv.weight'
            new_param = torch.cat([q_weight_chunks[i], k_weight_chunks[i], v_weight_chunks[i]], dim=0)
            megatron_ckp[i]['model'][new_name] = new_param
            megatron_ckp[i]['model'][f'encoder.layers.{layer_idx}.self_attention.core_attention._extra_state'] = None
            megatron_ckp[i]['model'][f'encoder.layers.{layer_idx}.self_attention.linear_qkv._extra_state'] = None
            megatron_ckp[i]['model'][f'encoder.layers.{layer_idx}.self_attention.linear_proj._extra_state'] = None
            megatron_ckp[i]['model'][f'encoder.layers.{layer_idx}.mlp.linear_fc1._extra_state'] = None
            megatron_ckp[i]['model'][f'encoder.layers.{layer_idx}.mlp.linear_fc2._extra_state'] = None
            
    for layer_idx, biases in qkv_biases.items():
        q_bias = biases['q']
        # k_bias = biases['k']
        k_bias = torch.zeros_like(q_bias) # no bias for k for orig whisper
        v_bias = biases['v']
        # Split Q, K, V biases by tensor parallel size
        k_bias_chunks = k_bias.chunk(tensor_parallel_size, dim=0)
        q_bias_chunks = q_bias.chunk(tensor_parallel_size, dim=0)
        v_bias_chunks = v_bias.chunk(tensor_parallel_size, dim=0)
        for i in range(tensor_parallel_size):
            new_name = f'encoder.layers.{layer_idx}.self_attention.linear_qkv.bias'
            new_param = torch.cat([q_bias_chunks[i], k_bias_chunks[i], v_bias_chunks[i]], dim=0)
            megatron_ckp[i]['model'][new_name] = new_param

    # Save the converted checkpoint
    for i in range(tensor_parallel_size):
        output_dir_tp = os.path.join(megatron_checkpoint_path, "iter_0000001", f"mp_rank_0{i}")
        os.makedirs(output_dir_tp, exist_ok=True)
        megatron_ckp[i]['iteration'] = 1
        output_path_tp = os.path.join(output_dir_tp, "model_optim_rng.pt")
        torch.save(megatron_ckp[i], output_path_tp)
    with open(f"{megatron_checkpoint_path}/latest_checkpointed_iteration.txt", "w") as f:
        f.write("1")
    print(f"Checkpoint saved to {megatron_checkpoint_path}")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Convert Hugging Face checkpoint to Megatron checkpoint format.")
    parser.add_argument('--load', type=str, required=True, help="Path to the Hugging Face checkpoint to load.")
    parser.add_argument('--save', type=str, required=True, help="Path to save the converted Megatron checkpoint.")
    parser.add_argument('--tensor-parallel-size', type=int, default=1, help="Tensor parallel size for the Megatron model.")

    args = parser.parse_args()

    convert_hf_to_megatron(args.load, args.save, args.tensor_parallel_size)