import os

import numpy as np
import tvm
from tvm import te, auto_scheduler, topi

@auto_scheduler.register_workload
def cast_cast_reducesum_cast(n,m,k):#64 2 0
    A = te.placeholder((n, m), name="A",dtype="float32")
    A_cast = topi.cast(A, "float16")
    A_cast_cast = topi.cast(A_cast, "float32")
    B = topi.sum(A_cast_cast,axis=k, keepdims=False)
    C = topi.cast(A_cast_cast, "float16")
    return [A,B,C]


target = tvm.target.Target("cuda")

n,m,k=64,2,0
task = auto_scheduler.SearchTask(
    func=cast_cast_reducesum_cast, args=(n,m,k), target=target
)
# Inspect the computational graph
print("Computational DAG:")
print(task.compute_dag)


log_file = "cast_cast_reducesum_cast.json"
measure_ctx = auto_scheduler.LocalRPCMeasureContext(min_repeat_ms=300)
tune_option = auto_scheduler.TuningOptions(
    num_measure_trials=1000,  # change this to 1000 to achieve the best performance
    runner=measure_ctx.runner,
    measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
    verbose=2,
)

# task.tune(tune_option)


# Apply the best schedule
sch, args = task.apply_best(log_file)
task.print_best(log_file)
# Kill the measurement process
del measure_ctx

print("Lowered TIR:")
print(tvm.lower(sch, args, simple_mode=True))

func = tvm.build(sch, args, target)

A_np = np.random.uniform(size=(n,m)).astype(np.float32)
B_np = np.random.uniform(size=(m)).astype(np.float32) #np.(A_np, axis=k)
C_np = np.random.uniform(size=(n,m)).astype(np.float16)

ctx = tvm.gpu()

A_tvm = tvm.nd.array(A_np,ctx=ctx)
B_tvm = tvm.nd.empty(B_np.shape, ctx=ctx,dtype="float32")
C_tvm = tvm.nd.array(C_np,ctx=ctx)

func(A_tvm, B_tvm, C_tvm)

# Evaluate execution time
evaluator = func.time_evaluator(func.entry_name, ctx, min_repeat_ms=500)
print(
    "Execution time of this operator: %.3f ns"
    % (np.median(evaluator(A_tvm, B_tvm,C_tvm).results) * 1000000)
)

print("Equivalent python schedule:")
print(task.print_best(log_file, print_mode="schedule"))

print("CUDA source code:")
print(task.print_best(log_file, print_mode="cuda"))

exit()
