#!/usr/bin/python3
# coding=utf-8
#
# Copyright (C) 2023-2024. Huawei Technologies Co., Ltd. All rights reserved.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# ===============================================================================

import numpy as np
import os
import torch
import torch_npu
import struct

def binary(num):
    return ''.join('{:0>8b}'.format(c) for c in struct.pack('!f', num))

def read_from_bin(filePath,dtype,shape):
    # 从二进制文件读取数据
    with open(filePath, 'rb') as file:
        data = file.read()

    # 使用 NumPy 解析二进制数据，并恢复张量
    np_array = np.frombuffer(data, dtype=dtype)  # 注意需要对应的数据类型
    tensor = torch.tensor(np_array.reshape(shape))  # 使用原始形状重塑数组
    return tensor

def gen_golden_data():
    M = int(os.getenv('PROMPT_LEN'))
    N = 4096
    K = 1024
    transpose_a = False
    transpose_b = False
    rank_size = int(os.getenv('RANK_SIZE'))
    used_core_num = int(os.getenv('USED_CORE_NUM'))
    p_value = int(os.getenv('P_VALUE'))


    mat_a = torch.randint(-128,127,(M,K)).to(dtype = torch.int8)
    mat_b = torch.randint(-128,127,(K,N)).to(dtype = torch.int8)
    mat_b = torch_npu.npu_format_cast_(mat_b.npu(),29).cpu()
    scale = torch.randn(N).to(dtype=torch.float32)
    bias = None
    pertoken_scale = torch.randn(M).to(dtype=torch.float32)

    # msdebug
    # mat_a = torch.randint(-128,127,(M,K)).to(dtype = torch.int8)
    # mat_b = torch.ones_like(torch.randint(-128,127,(K,N))).to(dtype = torch.int8)
    # mat_b = torch_npu.npu_format_cast_(mat_b.npu(),29).cpu()
    # scale = torch.ones_like(torch.randn(N)).to(dtype=torch.float32)
    # bias = None
    # pertoken_scale = torch.ones_like(torch.randn(M)).to(dtype=torch.float32)

    os.system("mkdir -p input")
    os.system("mkdir -p output")

    

    mat_a.numpy().tofile("./input/input_a.bin")

    mat_b0 = mat_b.numpy()
    mat_b_nz = np.concatenate([mat_b0[:,x:x+32] for x in range(0,mat_b0.shape[1],32)], axis=0)
    mat_b_nz.tofile("./input/input_b.bin")
    
    pertoken_scale.numpy().tofile("./input/input_pertoken_scale.bin")
    


    with open("./input/input_a.bin", 'rb') as file:
        tmp = file.read(2)
        print('mat_a[0] ' +str(mat_a[0,0]) + 'binary' + str(bin(tmp[0])))
        print('mat_a[1] ' +str(mat_a[0,1]) + 'binary' + str(bin(tmp[1])))

    scale.numpy().tofile("./input/input_scale_ft32.bin")
    scalebin = b''
    scalebin_ft32 = b''
    with open("./input/input_scale_ft32.bin", 'rb') as file:
        while True:
            data = file.read(4)
            if not data:  # 文件结束
                break
            scalebin = scalebin + data[2::]
            scalebin_ft32 = scalebin_ft32 + b'\x00\x00' + data[2::]
    with open("./input/input_scale.bin", 'wb') as file:
        file.write(scalebin)
    
    with open("./input/input_scale_ft32TObf16.bin", 'wb') as file:
        file.write(scalebin_ft32)

    np_array = np.fromfile("./input/input_scale_ft32TObf16.bin", dtype=np.float32)
    scale = torch.tensor(np_array).to(dtype = torch.bfloat16)
    print("gen_data, scale[0]" + str(binary(scale[0])))
    # torch.save(mat_a, "./input/input_a.bin")
    # torch.save(mat_b, "./input/input_b.bin")
    # torch.save(scale, "./input/input_scale.bin")
    # torch.save(pertoken_scale, "./input/input_pertoken_scale.bin")

    
    
    # scale = read_from_bin("./input/input_scale.bin", torch.bfloat16, (N))
    # print(scale)
    expect = torch_npu.npu_quant_matmul(mat_a.npu(), mat_b.npu(), scale.npu(), offset=None, bias=bias, pertoken_scale=pertoken_scale.npu(), output_dtype=torch.bfloat16)
    expect[torch.where(expect == -0.0)] = + 0.0

    #torch.save(expect.to, "./output/golden.pt")
    for rank in range(rank_size):
        # expect_rank = expect[int(rank * M / rank_size): int((rank+1)*M / rank_size),:]
        expect_rank = expect[int(rank * M / rank_size): int((rank+1)*M / rank_size),:] + expect[int(rank * M / rank_size): int((rank+1)*M / rank_size),:] + expect[int(rank * M / rank_size): int((rank+1)*M / rank_size),:] + expect[int(rank * M / rank_size): int((rank+1)*M / rank_size),:] + expect[int(rank * M / rank_size): int((rank+1)*M / rank_size),:] + expect[int(rank * M / rank_size): int((rank+1)*M / rank_size),:] + expect[int(rank * M / rank_size): int((rank+1)*M / rank_size),:] + expect[int(rank * M / rank_size): int((rank+1)*M / rank_size),:]
        expect_rank = expect_rank.cpu()
                    
        expect_rank.to(dtype = torch.float32).numpy().tofile("./output/golden_"+str(rank)+".bin")



if __name__ == "__main__":
    gen_golden_data()
