# Copyright (c) Huawei Technologies Co., Ltd. 2023. All rights reserved.
import torch
import numpy as np
from atk.configs.dataset_config import InputDataset
from atk.configs.results_config import TaskResult
from atk.tasks.api_execute import register
from atk.tasks.api_execute.base_api import BaseApi
from atk.tasks.dataset.base_dataset import OpsDataset


@register("ascend_function_expand_into_jagged_permute")
class FunctionExpandIntoJaggedPermuteApi(BaseApi):
    def __init__(self, task_result: TaskResult):
        super(FunctionExpandIntoJaggedPermuteApi, self).__init__(task_result)


    def init_by_input_data(self, input_data: InputDataset):
        OpsDataset.seed_everything()
        def generate_strictly_increasing_tensor_torch(n, k):
            if k < 2 or n < k - 1:
                raise ValueError("无效的n或k")
            # 生成1到n-1的随机排列，取前k-2个并排序
            random_points = torch.randperm(n - 1, dtype=torch.int32)[:k - 2] + 1
            random_points, _ = torch.sort(random_points)
            # 加入首尾
            tensor = torch.cat([torch.tensor([0], dtype=torch.int32), 
                                random_points, 
                                torch.tensor([n], dtype=torch.int32)])
            return tensor

        n = input_data.kwargs['output_size']
        k = input_data.kwargs['output_offsets'].shape[0]
        input_data.kwargs["output_offsets"] = generate_strictly_increasing_tensor_torch(n, k).to(dtype=torch.int32).to(device=input_data.kwargs['output_offsets'].device)
        input_data.kwargs['permute'] = torch.randperm(k-1).to(torch.int32).to(input_data.kwargs['output_offsets'].device)

    def expand_into_jagged_permute(self, permute, input_offsets, output_offsets, output_size):
        output2 = torch.empty(output_size,dtype=permute.dtype, device=permute.device)
        leni = output_offsets[1:] - output_offsets[:-1]
        permute_length = permute.shape[0]
        st=input_offsets[permute]
        stl=st+leni
        for i in range (permute_length):
            print(f"Index {i}: st={st[i]}, stl={stl[i]}, output_offsets={output_offsets[i]}:{output_offsets[i+1]}")
            print(f"Expected size: {output_offsets[i+1] - output_offsets[i]}, arange size: {max(0, stl[i] - st[i])}")
            output2[output_offsets[i]:output_offsets[i+1]]=torch.arange(st[i],stl[i],dtype=permute.dtype,device=output_offsets.device)
            print(f"Expected size: {output_offsets[i+1] - output_offsets[i]}, arange size: {max(0, stl[i] - st[i])}")
            output2.contiguous()
        return output2.to(dtype=permute.dtype)

    def __call__(self, input_data: InputDataset, with_output: bool = False):
        permute = input_data.kwargs["permute"]
        input_offsets = input_data.kwargs["input_offsets"]
        output_offsets = input_data.kwargs["output_offsets"]
        output_size = input_data.kwargs["output_size"]
        cpu_res = self.expand_into_jagged_permute(permute, input_offsets, output_offsets, output_size)

        return cpu_res