import time
import random
from tqdm import tqdm
import torch
import torch_npu
import curr_op_py


def call_curr_op(input_, output_):
    try:
        curr_op_py.run_curr_op(input_, output_)
        return True
    except Exception as e:
        print(f"curr_op 运行失败: {e}")
        return False


def golden_curr_op(input_, output_):
    torch.sinh(input_, out=output_)


def measure_avg_runtime(func, input_, output_, num_repeat):
    total_time = 0.0
    for _ in range(num_repeat):
        start_time = time.time()
        func(input_, output_)
        end_time = time.time()
        total_time += (end_time - start_time)
    return total_time / num_repeat


def get_final_reward(batch_input, num_repeat=10, perf_rw_coef=0.75):
    """
    计算最终得分，范围在 [-1, 1] 区间内。
    - 跑不通：-1；
    - 能跑通，但结果不对：-0.5；
    - 能跑通，结果也正确：min(1.0, perf_rw_coef * golden_time / curr_op_time)；
    """
    final_rewards = []

    idx = 0

    for input_ in batch_input:
        print(f"    -- {idx} input:")
        idx += 1

        output_curr_op = torch.zeros_like(input_)
        output_golden = torch.zeros_like(input_)

        is_runnable = call_curr_op(input_, output_curr_op)
        if not is_runnable:
            final_rewards.append(-1.0)  # 跑不通，得分为 -1
            continue

        golden_curr_op(input_, output_golden)
        print(f"    output_golden={output_golden}")
        print(f"    output_curr_op={output_curr_op}")
        result_match = torch.allclose(output_golden, output_curr_op, rtol=1e-4)
        print(f"        result_match = {result_match}")
        if not result_match:
            final_rewards.append(-0.5)  # 结果不正确，得分为 -0.5
            continue

        # 如果能跑通且结果正确，运行 num_repeat 次，记录平均时间，根据性能计算分数，分数范围为 [0,1]
        golden_time = measure_avg_runtime(golden_curr_op, input_, output_golden, num_repeat=num_repeat)
        curr_op_time = measure_avg_runtime(call_curr_op, input_, output_curr_op, num_repeat=num_repeat)
        print(f"        golden_time = {golden_time}s")
        print(f"        curr_op_time = {curr_op_time}s")
        performance_reward = min(1.0, perf_rw_coef * golden_time / curr_op_time)
        final_rewards.append(performance_reward)

    # 返回平均得分
    return sum(final_rewards) / len(final_rewards), final_rewards


def generate_random_inputs(batch_size, dtype=torch.float16, size_lower_limit=1, size_upper_limit=65536):
    batch_input = []
    # random_numbers = [random.randint(size_lower_limit, size_upper_limit) for _ in range(2 * batch_size)]
    for i in tqdm(range(batch_size)):
        # batch_input.append(torch.randn(size=(random_numbers[2 * i], random_numbers[2 * i + 1]), dtype=dtype))
        batch_input.append(torch.randn(size=(8, 2048), dtype=dtype))
    return batch_input


def test_curr_op():
    print(f">>> Begin generating random inputs...")
    batch_input = generate_random_inputs(batch_size=2, dtype=torch.float16, size_lower_limit=1024, size_upper_limit=2048)
    print(f">>> Begin getting final rewards...")
    final_reward, final_rewards = get_final_reward(batch_input=batch_input)
    print(f"final_reward: {final_reward}")
    print(f"final_rewards: {final_rewards}")


if __name__ == "__main__":
    test_curr_op()
