# Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
# executor_function_ascend_grouped_matmul_add.py
import torch
from atk.configs.dataset_config import InputDataset
from atk.configs.results_config import TaskResult
from atk.tasks.api_execute import register
from atk.tasks.api_execute.base_api import BaseApi
from atk.tasks.api_execute.aclnn_base_api import AclnnBaseApi
from atk.tasks.dataset.base_dataset import OpsDataset


@register("ascend_function_grouped_bias_add_grad_v2")
class MethodGroupedBiasAddGradV2Api(BaseApi):
    def __init__(self, task_result: TaskResult):
        super(MethodGroupedBiasAddGradV2Api, self).__init__(task_result)

    def __call__(self, input_data: InputDataset, with_output: bool = False):
        grad_y = input_data.kwargs["gradY"]
        group_idx = input_data.kwargs["groupIdxOptional"]
        group_idx_type = input_data.kwargs["groupIdxType"]
        if len(grad_y.shape) == 3:
            grad_y_dtype = grad_y.dtype
            if grad_y_dtype != torch.float32:
                grad_y = grad_y.to(torch.float32)
            grad_bias = torch.sum(grad_y, 1)
            return grad_bias.to(grad_y_dtype)
        grad_y_dtype = grad_y.dtype
        if grad_y_dtype != torch.float32:
            grad_y = grad_y.to(torch.float32)

        grad_bias = []
        if group_idx_type == 0:
            for i, num in enumerate(group_idx):
                if i == 0:
                    x = grad_y[:num, :]
                    grad_bias = torch.sum(x, 0, keepdim=True)
                else:
                    x = grad_y[group_idx[i-1]:num, :]
                    tmp = torch.sum(x, 0, keepdim=True)
                    grad_bias = torch.cat((grad_bias, tmp), 0)
        elif group_idx_type == 1:
            group_idx = group_idx.tolist()
            grad_y = torch.split(grad_y, group_idx, 0)
            out_list = []
            for i in range(len(group_idx)):
                out_list.append(torch.sum(grad_y[i], 0, keepdim=True))
            grad_bias = torch.cat(out_list, 0)
        return grad_bias.to(grad_y_dtype)

    
    def init_by_input_data(self, input_data: InputDataset, with_output: bool = False):
        grad_y = input_data.kwargs["gradY"]
        groupIdxOptional = input_data.kwargs["groupIdxOptional"]
        group_idx_type = input_data.kwargs["groupIdxType"]
        if len(grad_y.shape) == 3:
            pass
        else:
            if self.device == 'pyaclnn':
                if group_idx_type == 0:
                    input_data.kwargs['groupIdxOptional'] = torch.linspace(grad_y.shape[0] // groupIdxOptional.shape[0], grad_y.shape[0], steps=int(groupIdxOptional.size(0))).round().to(groupIdxOptional.dtype).npu()
                else:
                    count = (1 + grad_y.shape[0]) // 2
                    base, reminder = divmod(grad_y.shape[0], count)
                    input_data.kwargs['groupIdxOptional'] = torch.Tensor([base + 1] * reminder + [base] * (count - reminder)).to(groupIdxOptional.dtype).npu()
            else:
                if group_idx_type == 0:
                    input_data.kwargs['groupIdxOptional'] = torch.linspace(grad_y.shape[0] // groupIdxOptional.shape[0], grad_y.shape[0], steps=int(groupIdxOptional.size(0))).round().to(groupIdxOptional.dtype).cpu()
                else:
                    count = (1 + grad_y.shape[0]) // 2
                    base, reminder = divmod(grad_y.shape[0], count)
                    input_data.kwargs['groupIdxOptional'] = torch.Tensor([base + 1] * reminder + [base] * (count - reminder)).to(groupIdxOptional.dtype).cpu()

@register("aclnn_grouped_bias_add_grad_v2")
class AclnnFunctionApi(AclnnBaseApi):
    def __call__(self):
        super().__call__()
        
    def init_by_input_data(self, input_data):
        import ctypes
        input_args, output_packages = super().init_by_input_data(input_data)
        if len(input_args[0].shape) == 3:
            input_args[1] = ctypes.c_void_p(0)
        return input_args, output_packages

    def after_call(self, output_packages):
        output = super().after_call(output_packages)
        return output
