#!/usr/bin/python3
# coding=utf-8
#
# Copyright (C) 2023-2024. Huawei Technologies Co., Ltd. All rights reserved.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# ===============================================================================

import sys
import numpy as np
import torch
import time
import os
# for float32
relative_tol = 1e-6
absolute_tol = 1e-9
error_tol = 1e-5

import struct

def binary(num):
    return ''.join('{:0>8b}'.format(c) for c in struct.pack('!f', num))

def verify_result_ft32():
    rank_size = 8
    call_times = 1

    for j in range(call_times):
        expects = []
        outputs = []
        error_ratio = 0
        for i in range(rank_size):
            filename_gd = './output/golden_'+str(i)+'.bin'
            expect = np.fromfile(filename_gd, dtype=np.float32).reshape(-1)
            expects.append(expect)
            filename_op = './output/output_'+str(i)+'_ft32_'+str(j)+'.bin'
            output = np.fromfile(filename_op, dtype=np.float32).reshape(-1)
            outputs.append(output)

            different_element_results = np.isclose(output,
                                                expect,
                                                rtol=relative_tol,
                                                atol=absolute_tol,
                                                equal_nan=True)
            
            different_element_indexes = np.where(different_element_results == False)[0]
            # for index in range(len(different_element_indexes)):
            #     real_index = different_element_indexes[index]
            #     golden_data = expect[real_index]
            #     output_data = output[real_index]
            #     print(
            #         "data index: %06d, expected: %-.9f, actual: %-.9f, rdiff: %-.6f" %
            #         (real_index, golden_data, output_data,
            #         abs(output_data - golden_data) / golden_data))
            #     if index == 100:
            #         break
            error_ratio += float(different_element_indexes.size) / expect.size
            print("%dnd, average error ratio of ranks: %.4f, tolrence: %.4f" % (i,float(different_element_indexes.size) / expect.size, error_tol))
            np.save("./result.npy", output!=expect)
        print("%dnd, average error ratio of ranks: %.4f, tolrence: %.4f" % (j,error_ratio/8, error_tol))


    
def verify_result():
    #output = np.fromfile(output, dtype=np.float32).reshape(-1)
    rank_size = int(os.getenv("RANK_SIZE"))
    prompt_len = int(os.getenv("PROMPT_LEN"))
    # buff2 = np.fromfile('/mnt/qjl/ascendc_dev/samples/operator/ascendc/0_introduction/quantmatmulv2_gmem/AclNNInvocation/input/buff_tmp_2_ft32.bin', dtype=np.float32)
    # buff2bin = b''
    # with open('/mnt/qjl/ascendc_dev/samples/operator/ascendc/0_introduction/quantmatmulv2_gmem/AclNNInvocation/input/buff_tmp_2.bin', 'rb') as file:
    #     buff2bin = file.read()

    outputbin = b''
    outputs = []
    goldens = []
    for rank in range(rank_size):
        outputfile = './output/output_' + str(rank) + '.bin'
        with open(outputfile, 'rb') as file:
            outputs.append(file.read())

        goldenfile = './output/golden_' + str(rank) + '.bin'
        with open(goldenfile, 'rb') as file:
            goldens.append(file.read())
    
    
    # mat_a = np.fromfile('./AclNNInvocation/input/input_a.bin',dtype=np.int8).reshape(prompt_len,1024)
    # filename_gd = '/mnt/qjl/ascendc_dev/samples/operator/ascendc/0_introduction/quantmatmulv2_gmem/AclNNInvocation/output/golden_0.bin'
    # expect = np.fromfile(filename_gd, dtype=np.float32).reshape(-1)
    # scale = np.fromfile('./AclNNInvocation/input/input_scale_ft32.bin',dtype=np.float32).reshape(4096)
    # pertoken_scale = np.fromfile('./AclNNInvocation/input/input_pertoken_scale.bin',dtype=np.float32).reshape(prompt_len)

    # scale_expect = torch.tensor(scale).to(dtype = torch.bfloat16)

    # print(np.sum(mat_a[0])*scale[0])
    # print(expect[0])
    # print(np.frombuffer(b'\x00\x00'+output[:2], dtype=np.float32))

    different_element_results1 = []
    different_element_results = []
    error_ratio = []
    for rank in range(rank_size):
        print("rank:" + str(rank))
        golden = goldens[rank]
        output = outputs[rank]
        golden_d2 = np.array([golden[x] for x in range(2, len(golden), 4)],dtype=np.int32)
        golden_d3 = np.array([golden[x] for x in range(3, len(golden), 4)],dtype=np.int32)
        output_d0 = np.array([output[x] for x in range(0, len(output), 2)],dtype=np.int32)
        output_d1 = np.array([output[x] for x in range(1, len(output), 2)],dtype=np.int32)
        

        different_element_results1_tmp = np.isclose(output_d0,
                                            golden_d2,
                                            rtol=relative_tol,
                                            atol=absolute_tol,
                                            equal_nan=True)
        print(np.mean(different_element_results1_tmp))
        
        different_element_results_tmp = np.isclose(output_d1,
                                            golden_d3,
                                            rtol=relative_tol,
                                            atol=absolute_tol,
                                            equal_nan=True)
        print(np.mean(different_element_results_tmp))

        error_ratio1 = 1 - np.mean(different_element_results1_tmp)
        error_ratio2 = 1 - np.mean(different_element_results_tmp)
        
        different_element_results1.append(different_element_results1_tmp)
        different_element_results.append(different_element_results_tmp)
        error_ratio.append(error_ratio1)
        error_ratio.append(error_ratio2)
    
    return np.mean(np.array(error_ratio)) <= error_tol


if __name__ == '__main__':
    verify_result_ft32()
    # verify_result()
