import numpy as np
import akg.topi
import akg.tvm
from akg.utils import format_transform as ft_util
from akg.utils import validation_check as vc_util
from akg.utils import kernel_exec as utils
from akg.utils.result_analysis import gpu_profiling
from akg.utils.format_transform import to_tvm_nd_array

@vc_util.check_input_type(akg.tvm.tensor.Tensor, (list, tuple, int, type(None)), (bool, type(None)))
def reduce_sum(inputs, axis=None, keepdims=False):
    """
    Compute the sum of elements across dimensions of a tensor.

    Args:
        inputs (tvm.tensor.Tensor): Tensor.
        axis (Union[list, tuple, int, None]): If the list or tuple is empty, the axis equal to None.
        keepdims (bool): If keepdims equal to True, the result shape length is same to input shape length.

    Returns:
        tvm.tensor.Tensor, has same type as input. If keepdims is True, all reduced dimensions are retained
        with length 1, else these reduced axis will be eliminate.
    """
    axis = ft_util.refine_reduce_axis(inputs, axis)
    vc_util.check_shape(inputs.shape)

    in_dtype = inputs.dtype
    if in_dtype == 'float16':
        inputs = akg.topi.cast(inputs, 'float32')

    output = akg.topi.sum(inputs, axis=axis, keepdims=keepdims)
    
    if in_dtype == 'float16':
        output = akg.topi.cast(output, 'float16')

    return output

@akg.schedule(akg.topi.cuda.reduce_opt.schedule_reduce)
def reduce_sum_manual(data, axis, keepdims):
    """Reduce sum with manual schedule."""
    return reduce_sum(data, axis, keepdims)

def reduce_sum_auto(data, axis, keepdims):
    """Reduce sum with auto schedule."""
    return reduce_sum(data, axis, keepdims)


def gen_data(in_shape, in_dtype, axis, keepdims):
    support_list = {"float16": np.float16, "float32": np.float32, "float64":np.float64, "int":np.int}
    data = np.random.random(in_shape).astype(support_list[in_dtype])
    expect = np.sum(data, axis=axis, keepdims=keepdims)
    if axis==None and keepdims==False:
        expect = np.broadcast_to(expect, (1,))
    output = np.full(expect.shape, 0.0, in_dtype)
    return data, output, expect

def test_ms_reduce_sum(in_shape, in_dtype, axis=None, keepdims=False, poly_sch=False, attrs=None):
    if poly_sch:
        mod = utils.op_build_test(reduce_sum_auto, (in_shape, ), (in_dtype, ), kernel_name="reduce_sum_auto", op_attrs=[axis, keepdims], attrs=attrs)
    else:
        mod = utils.op_build_test(reduce_sum_manual, (in_shape, ), (in_dtype, ), kernel_name="reduce_sum_manual", op_attrs=[axis, keepdims])

    data, output, expect = gen_data(in_shape, in_dtype, axis, keepdims)
    args = (data, output)
    output = utils.mod_launch(mod, args, expect=expect)
    res = np.allclose(output, expect, rtol=5e-03, atol=1.e-8) 
    print(mod.imported_modules[0].get_source())
    print("Test {}".format("Pass" if res else "Fail"))
    if not res:
        raise AssertionError("Test fail")
    data, expect = to_tvm_nd_array([data, expect])
    gpu_profiling(mod, data, expect, 400)


if __name__ == "__main__":
    poly_sch = True # use Panamera
    in_shape = (1024,1024)
    data_type = "float16" # "float16", "float32", "float64", "int"
    axis = (1,)
    # We analyse and decide whether use atomic operation automatically.
    # Set attrs as {"target":"cuda", "enable_atomic_add": True} if you want to enable 
    # atomic operation explicitly.
    test_ms_reduce_sum(in_shape, data_type, axis=axis,
                        keepdims=True, poly_sch=poly_sch,
                        attrs={"target":"cuda"})
