import torch
import megatron
import pdb
import sys
from safetensors import safe_open

def load_tensor_file(file):
    if file.endswith('.safetensors'):
        with safe_open(file, framework="torch") as f:
            tensors = {key: f.get_tensor(key) for key in f.keys()}
    else:
        tensors = torch.load(file)
    return tensors

def compare_tensors(file_list, file2, merge=False):
    # 加载 file2 中的张量
    tensors2 = load_tensor_file(file2)

    # 加载 file_list 中的第一个文件
    # file_list = sorted(file_list, key=lambda x: int(x.split(".")[-2]))
    # file_list = file_list[:8]
    print(file_list)
    print('vs')
    print(file2)
    tensors_list = [load_tensor_file(f) for f in file_list]
    tensors1 = tensors_list[0]
    if merge:
        empty_ckp = {}
        for t in tensors_list:
            empty_ckp.update(t)
        tensors_list = [empty_ckp]
        tensors1 = empty_ckp
    # 比较字典中的所有键
    keys1 = set(tensors1.keys())
    keys2 = set(tensors2.keys())

    # 检查键是否相同
    if keys1 != keys2:
        print("Keys are different between the files.")
        print("Keys in file1 not in file2:", keys1 - keys2)
        print("Keys in file2 not in file1:", keys2 - keys1)
#        return

    # 比较每个张量的形状和数值差异
    keys1 = sorted(keys1)
    for key in keys1:
        if key not in tensors2:
            print(f'skip incompatible key {key}')
            #print(key + f" is None, skip, shape {tensors1[key].shape}")
            continue
        tensor2 = tensors2[key]
        if tensor2 is None:
            continue
        if len(tensors_list) > 1 and tensors_list[0][key].shape != tensor2.shape:
            # 合并 file_list 中所有文件的 tensor
            if key in [
                'embeddings',
                'embedding_bef_dropout',
                'output',
                'mlp_output',
                'second_residual',
                'second_lnop',
                'layernorm_input',
                'hidden_states',
                'first_lnop',
                'second_lnip',
                'first_residual',
                'attention_output',
                'input_parallel',
                'input_',
                'output_tensor'
            ] or 'hidden_states' in key:  # sequence parallel
                dim = 0
            elif key == 'words_embeddings' or key == 'embeddings_after_position' or key == 'output_parallel':
                dim = 1
            else:
                dim = -1
            pdb.set_trace()
            cat_tensor1 = torch.cat([tensors[key].cpu() for tensors in tensors_list if tensors[key] is not None], dim=dim)
        else:
            cat_tensor1 = tensors_list[0][key].cpu()
        
        if type(tensor2) != torch.Tensor:
            pdb.set_trace()
            if tensor2 != cat_tensor1:
                print(f"key {key} not the same for {tensor1} != {tensor2}")
            else:
                print(f"key {key} are the same for {tensor1}")
            continue
        
        if tensors1[key] is None or tensor2 is None:
            print(f'{key} in t1 is None, skip')
            continue
        
        if tensors1[key].shape == tensor2.shape:
            tensor1 = tensors1[key]
        else:
            tensor1 = cat_tensor1

        if tensor1.shape != tensor2.shape:
            #print(f"Shape mismatch for key '{key}': {tensor1.shape} vs {tensor2.shape}")

            tensor1 = tensor1.transpose(1,0)
            if key in  ['adjusted_key', 'adjusted_value']:
                pass
#                tensor1 = tensor1.transpose(1,2)
            if key == 'core_attention_output':
                pass
#                tensor2 = tensor2.transpose(1,2).reshape(1,1314,-1)
            if key in  ['key', 'value', 'query']:
                pass
#                tensor1 = tensor1.transpose(2,3)
                #tensor1 = tensor1.reshape(1,1314,-1)
            if key in  ['key_after_rotary', 'value_after_rotary']:
                pass
#                tensor1 = tensor1.transpose(1,2)
            if tensor1.shape != tensor2.shape:
                print(f"Shape mismatch for key '{key}': {tensor1.shape} vs {tensor2.shape}")
                continue
            else:
                print(f"Shapes for key '{key}' are the same: {tensor1.shape} after permute 0,1")
        
        else:
            print(f"Shapes for key '{key}' are the same: {tensor1.shape}")
        if key == 'input_attention_mask':
            tensor1 = tensor1 < 0
        # 计算差值
        diff = tensor1.cpu().float() - tensor2.cpu().float()
        max_diff = torch.max(torch.abs(diff.float())).item()
        mean_diff = torch.mean(torch.abs(diff.float())).item()
        print(f"Max difference for key '{key}': {max_diff}")
        print(f"Mean difference for key '{key}': {mean_diff}")

if __name__ == "__main__":
    file_list = sys.argv[1:-1]
    file2 = sys.argv[-1]
    compare_tensors(file_list, file2, merge=True)
