# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.

import os

import numpy as np
import tvm
from tvm import te, auto_scheduler, topi

data_type = "float32" # float32, float16, int, bool
np_type = np.float32 # float32, float16, int, bool

n,m,k = 768,21128,1
# n,m,k = 1024, 1024, 0

op_func = topi.sum # max, min, all(and), any(or)
np_func = np.sum # amax, amin, all, any


@auto_scheduler.register_workload
def reduce_sum_ansor(N,M,K):
    global data_type
    A = te.placeholder((N, M), name="A",dtype=data_type) 
    B = op_func(A,axis=K, keepdims=False) 
    return [A,B]

target = tvm.target.Target("cuda")

task = auto_scheduler.SearchTask(
    func=reduce_sum_ansor, args=(n,m,k), target=target
)

# Inspect the computational graph
print("Computational DAG:")
print(task.compute_dag)

log_file = data_type + "_" + str(n) + "_" + str(m) + "_" + str(k) +  ".json"
measure_ctx = auto_scheduler.LocalRPCMeasureContext(min_repeat_ms=300)
tune_option = auto_scheduler.TuningOptions(
    num_measure_trials=1000,  # change this to 1000 to achieve the best performance
    runner=measure_ctx.runner,
    measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
    verbose=2,
)

# Run auto-tuning (search)
task.tune(tune_option)
# Apply the best schedule
sch, args = task.apply_best(log_file)
task.print_best(log_file)
# Kill the measurement process
del measure_ctx

print("Lowered TIR:")
print(tvm.lower(sch, args, simple_mode=True))

func = tvm.build(sch, args, target)

A_np = np.random.uniform(size=(n,m)).astype(np_type)
B_np = np_func(A_np, axis=k)


ctx = tvm.gpu()
A_tvm = tvm.nd.array(A_np,ctx=ctx)
B_tvm = tvm.nd.empty(B_np.shape, ctx=ctx,dtype=data_type)

func(A_tvm, B_tvm)

# Evaluate execution time
evaluator = func.time_evaluator(func.entry_name, ctx, min_repeat_ms=500)
print(
    "Execution time of this operator: %.3f ns"
    % (np.median(evaluator(A_tvm, B_tvm).results) * 1000000)
)

print("Equivalent python schedule:")
print(task.print_best(log_file, print_mode="schedule"))

print("CUDA source code:")
print(task.print_best(log_file, print_mode="cuda"))