import os
import numpy as np
import tvm
from tvm import te, auto_scheduler, topi

@auto_scheduler.register_workload
def reshape_cast_reducesum(d0,d1,d2,k):
    i0 = te.placeholder((d0,d1,d2),dtype="float16")
    t0 = topi.cast(i0,"float32")
    t1 = topi.reshape(t0,(d0*d1,d2))
    o0 = topi.sum(t1,axis=k, keepdims=False)
    return [i0,o0]

target = tvm.target.Target("cuda")

d0,d1,d2,k=64,128,768,0
task = auto_scheduler.SearchTask(
    func=reshape_cast_reducesum, args=(d0,d1,d2,k), target=target
)
# Inspect the computational graph
print("Computational DAG:")
print(task.compute_dag)


log_file = "reshape_cast_reducesum.json"
measure_ctx = auto_scheduler.LocalRPCMeasureContext(min_repeat_ms=300)
tune_option = auto_scheduler.TuningOptions(
    num_measure_trials=1000,  # change this to 1000 to achieve the best performance
    runner=measure_ctx.runner,
    measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
    verbose=2,
)

task.tune(tune_option)


# Apply the best schedule
sch, args = task.apply_best(log_file)
task.print_best(log_file)
# Kill the measurement process
del measure_ctx

print("Lowered TIR:")
print(tvm.lower(sch, args, simple_mode=True))

func = tvm.build(sch, args, target)

i0_np = np.random.uniform(size=(d0,d1,d2)).astype(np.float16)
o0_np = np.random.uniform(size=(d2,)).astype(np.float32) 

ctx = tvm.gpu()

i0_tvm = tvm.nd.array(i0_np,ctx=ctx)
o0_tvm = tvm.nd.empty(o0_np.shape, ctx=ctx,dtype="float32")

func(i0_tvm,o0_tvm)

# Evaluate execution time
evaluator = func.time_evaluator(func.entry_name, ctx, min_repeat_ms=500)
print(
    "Execution time of this operator: %.3f ns"
    % (np.median(evaluator(i0_tvm,o0_tvm).results) * 1000000)
)

print("Equivalent python schedule:")
print(task.print_best(log_file, print_mode="schedule"))

print("CUDA source code:")
print(task.print_best(log_file, print_mode="cuda"))
