import hashlib
import os
import subprocess
import numpy as np
from datetime import datetime

def assert_close(gold: np.ndarray, act: np.ndarray, eval_type: str = 'DEFAULT'):
    gold = gold.flatten()  # Create copies to avoid modifying original arrays
    act = act.flatten()
    
    assert gold.shape == act.shape, "shape not match"

    if act.dtype in [np.float16, np.float32, np.float64]:
        def compare_special_value(f, name):
            f_act = f(act)
            f_gold = f(gold)
            if f_act.any() or f_gold.any():
                if np.array_equal(f_act, f_gold):
                    print(f"Both tensor contain {name} at same location.")
                    return True
                else:
                    print(f"Tensor contain {name} and but location not match!")
                    return False
            return True

        compare_special_value(np.isnan, "NaN")
        compare_special_value(np.isposinf, "+inf")
        compare_special_value(np.isneginf, "-inf")


    # Assuming eval_standard is defined elsewhere with appropriate NumPy dtypes
    eps = 1e-4
    rtol = 1e-4
    atol = 1e-4
    
    if eval_type == 'DEFAULT':
        ae = np.abs(act - gold)
        re = ae / np.abs(gold)
        mask = np.abs(gold) < eps
        
        ae_pos = np.argmax(ae)
        re_pos = np.nanargmax(re)
        print(f"count ae > {atol}: {(ae > atol).sum()}, max diff: {ae[ae_pos]}, where relative diff is {re[ae_pos]}")
        print(f"count re > {rtol}: {(re > rtol).sum()}, max relative diff: {re[re_pos]}, where diff is {ae[re_pos]}")
        
        not_close = np.where(mask, ae > atol, re > rtol)
        print(f"count not_close = {np.sum(not_close).item()}")
        print(f"not_close.size = {not_close.size}, gold.size = {gold.size}")
        print(f"not close ratio = {np.sum(not_close).item() / not_close.size}")
        
        if not np.any(not_close):
            return False
        
        assert np.sum(not_close).item() < not_close.size * eps, "actual tensor are not close enough with golden tensor, you can use 'benchmark_compare_close' function to compare again!"
        return 1
    elif eval_type == 'ABS':
        act = act.astype(gold.dtype)
        assert np.array_equal(gold, act), "actual tensor and golden tensor are not binary equal!"
        return 1
    else:
        assert 0, "ERROR! invalid eval_type"
        return 1
    return False

def precision_test(filename: str, seed: int, varlen: bool, hashed_output: bool, rerun_interpret: bool):

    def read_output_file(output):
        with open(output, 'r') as file:
            data = file.read().strip()
            return np.array([float(x) for x in data.split()])

    def clear_prev_output(output):
        try:
            os.remove(output)
            print(f"The file '{output}' has been removed.")
        except FileNotFoundError:
            print(f"The file '{output}' was not found.")
        except PermissionError:
            print(f"Error: Insufficient permissions to remove '{output}'.")
        except Exception as e:
            print(f"An unexpected error occurred: {e}")

    clear_prev_output("npu.output.txt")
    if rerun_interpret:
        clear_prev_output("interpret.output.txt")
    else:
        print("using previous interpret result interpret.output.txt")

    cmd_list = ['python3', filename]
    if varlen:
        cmd_list += ["--varlen"]
    if seed != 42:
        cmd_list += [f"--seed={seed}"]

    if hashed_output:
        current_time = datetime.now().isoformat()
        sha256_hash = hashlib.sha256()
        sha256_hash.update(current_time.encode('utf-8'))
        hash_code = sha256_hash.hexdigest()
        npu_output_name = f"npu.output-{hash_code}.txt"
        interpret_output_name = f"interpret.output-{hash_code}.txt"
    else:
        npu_output_name = "npu.output.txt"
        interpret_output_name = "interpret.output.txt"
    subprocess.run(
        cmd_list + [f'--output={npu_output_name}'],
        env={**os.environ, 'TRITON_DEBUG': '1'},
        check=True
    )
    print(f"{npu_output_name} is generated.")

    if rerun_interpret:
        subprocess.run(
            cmd_list + [f'--output={interpret_output_name}'],
            env={**os.environ, 'TRITON_INTERPRET': '1', 'TRITON_DEBUG': '1'},
            check=True
        )
        print(f"{interpret_output_name} is generated.")

    npu_output = read_output_file(npu_output_name)
    interpret_output = read_output_file(interpret_output_name)

    return assert_close(interpret_output, npu_output)

def precision_test_bwd(test_filename):
    """
    反向核精度测试：比对NPU执行与Interpret解释器的输出梯度
    :param test_filename: 被测测试文件路径（如 'test_chunk_bwd_kernel_dqkwg.py'）
    """

    npu_prefix = "npu_bwd"
    interpret_prefix = "interpret_bwd"
    atol = 1e-5
    rtol = 1e-5
    required_outputs = ["dq", "dk"]
    optional_outputs = {"dg": True, "dw": True}

    def read_output(prefix, output_name):
        file_path = f"{prefix}_{output_name}.output.txt"
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"Output file not found: {file_path}")
        with open(file_path, 'r') as f:
            lines = [line.strip() for line in f if line.strip() and not line.startswith('#')]
        return np.array([float(line) for line in lines], dtype=np.float32)

    print("Running NPU mode...")
    subprocess.run(
        [
            'python3', test_filename,
            '--output-prefix', npu_prefix
        ],
        env={**os.environ, 'TRITON_DEBUG': '1'},
        check=True,
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE,
        text=True
    )

    print("Running Interpret mode...")
    subprocess.run(
        [
            'python3', test_filename,
            '--output-prefix', interpret_prefix
        ],
        env={**os.environ, 'TRITON_INTERPRET': '1', 'TRITON_DEBUG': '1'},
        check=True,
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE,
        text=True
    )

    all_pass = True
    for output_name in required_outputs:
        print(f"\n=== Comparing required output: {output_name} ===")
        npu_data = read_output(npu_prefix, output_name)
        interpret_data = read_output(interpret_prefix, output_name)

        assert len(npu_data) == len(interpret_data), \
            f"{output_name} length mismatch: NPU={len(npu_data)}, Interpret={len(interpret_data)}"

        valid_mask = np.isfinite(npu_data) & np.isfinite(interpret_data)
        if not np.any(valid_mask):
            raise ValueError(f"All values of {output_name} are NaN/Inf!")
        npu_valid = npu_data[valid_mask]
        interpret_valid = interpret_data[valid_mask]
        close_mask = np.isclose(npu_valid, interpret_valid, atol=atol, rtol=rtol)
        match_rate = np.mean(close_mask) * 100
        max_diff = np.max(np.abs(npu_valid - interpret_valid[~close_mask])) if np.any(~close_mask) else 0.0
        max_diff_idx = np.argmax(np.abs(npu_valid - interpret_valid)) if len(npu_valid) > 0 else -1

        print(f"Match rate: {match_rate:.2f}%")
        print(f"Largest difference: {max_diff:.8f} (at index {max_diff_idx})")
        print(f"Number of mismatched values: {np.sum(~close_mask)} / {len(close_mask)}")

        if not np.all(close_mask):
            print(f"FAIL: {output_name} has mismatched values!")
            all_pass = False

    for output_name, is_enabled in optional_outputs.items():
        if not is_enabled:
            print(f"\nSkipping optional output: {output_name} (disabled)")
            continue
        print(f"\n=== Comparing optional output: {output_name} ===")
        try:
            npu_data = read_output(npu_prefix, output_name)
            interpret_data = read_output(interpret_prefix, output_name)
        except FileNotFoundError:
            print(f"FAIL: {output_name} is enabled but output file not found!")
            all_pass = False
            continue

        assert len(npu_data) == len(interpret_data), \
            f"{output_name} length mismatch: NPU={len(npu_data)}, Interpret={len(interpret_data)}"

        valid_mask = np.isfinite(npu_data) & np.isfinite(interpret_data)
        if not np.any(valid_mask):
            print(f"FAIL: All values of {output_name} are NaN/Inf!")
            all_pass = False
            continue
        npu_valid = npu_data[valid_mask]
        interpret_valid = interpret_data[valid_mask]
        close_mask = np.isclose(npu_valid, interpret_valid, atol=atol, rtol=rtol)
        match_rate = np.mean(close_mask) * 100
        max_diff = np.max(np.abs(npu_valid - interpret_valid[~close_mask])) if np.any(~close_mask) else 0.0

        print(f"Match rate: {match_rate:.2f}%")
        print(f"Largest difference: {max_diff:.8f}")
        if not np.all(close_mask):
            print(f"FAIL: {output_name} has mismatched values!")
            all_pass = False

    assert all_pass, "Some outputs failed precision check! See details above."
    print("\n=== All outputs passed precision check! ===")
    

import sys
import argparse
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('filename', help='The file to process.')
    parser.add_argument('--seed', '-seed', type=int, default=42, help='The random seed (default: 42)')
    parser.add_argument('--varlen', '-varlen', action='store_false', help='Enable var-length input.')
    parser.add_argument('--hash', '-hash', action='store_false', help='Hashed output file name to avoid duplicated output file.')
    parser.add_argument('-q', action='store_true', help='Not rerun interpreter.')
    args = parser.parse_args()

    input_name = args.filename
    rerun_interpret = not args.q
    varlen = not args.varlen
    hashed_output = not args.hash
        
    target_path = f"{input_name}/test_{input_name}.py"
    print(f"running: precision_test({target_path})")
    sys.exit(precision_test(target_path, args.seed, varlen, hashed_output, rerun_interpret))





