#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import itertools
import random
import sysconfig

import pytest
import torch
import torch_npu
import fbgemm_gpu
import numpy as np

DEVICE = "npu:0"
torch.ops.load_library(f"{sysconfig.get_path('purelib')}/libfbgemm_npu_api.so")

PTYPE = [np.int32]
LTYPE = [np.int64, np.int32]
VTYPE = [np.int64, np.int32, np.float32]
WTYPE = [None, np.float32]
TYPE_LIST = list(itertools.product(PTYPE, LTYPE, VTYPE, WTYPE))

# lengths shape为[1 ~ (2T - 1), B]
# extra_t用于测试permute和lengths不等长的情况，lengths[T + extra_T, B]
T = np.random.randint(2, 500, 5)
EXTRA_T = [1, 0, -1]
B = [128, 1024, 2048, 20480]
SHAPE_LIST = list(itertools.product(T, EXTRA_T, B))


def get_result(tensors: dict, device: str = 'cpu', is_mxrec: bool = False, d2: bool = False):
    tensors = {k: torch.from_numpy(v) if isinstance(v, np.ndarray) else v for k, v in tensors.items()}

    if device and device.startswith('npu'):
        torch.npu.set_device(device)
        tensors = {k: v.to(device) if isinstance(v, torch.Tensor) else v for k, v in tensors.items()}

    if is_mxrec:
        if d2:
            results = torch.ops.mxrec.permute_2D_sparse_data(**tensors)
        else:
            results = torch.ops.mxrec.permute_sparse_data(**tensors)
    else:
        if d2:
            results = torch.ops.fbgemm.permute_2D_sparse_data(**tensors)
        else:
            results = torch.ops.fbgemm.permute_sparse_data(**tensors)
    return [x.cpu() if isinstance(x, torch.Tensor) else x for x in results]


@pytest.mark.parametrize("types", TYPE_LIST)
@pytest.mark.parametrize("shapes", SHAPE_LIST)
@pytest.mark.parametrize("enable_permuted_sum", [True, False])
@pytest.mark.parametrize("is_mxrec", [True, False])
@pytest.mark.parametrize("d2", [True, False])
def test_permute2d_sparse_data(types, shapes, enable_permuted_sum, is_mxrec, d2):
    """
    Params:
        permute: (T) dtype=int32
        lenghts: (T + T', B) dtype=ltype
                 L = lengths[:T].sum()
        values: (L) dtype=vtype
        weights: (L) dtype=fp32
    """
    ptype, ltype, vtype, wtype = types
    t, extra_t, b = shapes
    extra_t = random.randint(1, t - 1) * extra_t

    permute = np.random.choice(t + extra_t, t).astype(dtype=np.int32)
    lengths = np.random.randint(1, 10, size=(t + extra_t, b), dtype=ltype)
    total_length = int(lengths.sum())
    values = np.arange(0, total_length, dtype=vtype)
    weights = np.arange(0, total_length, dtype=wtype) if wtype else None
    permuted_lengths_sum = lengths[permute].sum() if enable_permuted_sum else None
    params = {
        'permute': permute,
        'lengths': lengths,
        'values': values,
        'weights': weights,
        'permuted_lengths_sum': permuted_lengths_sum
    }

    golden = get_result(params, d2=d2)
    result = get_result(params, DEVICE, is_mxrec, d2=d2)

    for gt, pred in zip(golden, result):
        assert type(gt) is type(pred)
        if isinstance(gt, torch.Tensor) and isinstance(pred, torch.Tensor):
            assert torch.allclose(gt, pred, atol=1e-5)


@pytest.mark.parametrize("types", TYPE_LIST)
def test_small_permuted_dim_large_values_length(types):
    """
        测试permutedim小,values长度大场景
    """
    ptype, ltype, vtype, wtype = types
    t = 32
    b = 128

    permute = np.random.choice(t, t).astype(dtype=np.int32)
    lengths = np.random.randint(10000, 30000, size=(t, b), dtype=ltype)
    total_length = int(lengths.sum())
    values = np.arange(0, total_length, dtype=vtype)
    weights = np.arange(0, total_length, dtype=wtype) if wtype else None
    permuted_lengths_sum = lengths[permute].sum()
    params = {
        'permute': permute,
        'lengths': lengths,
        'values': values,
        'weights': weights,
        'permuted_lengths_sum': permuted_lengths_sum
    }

    golden = get_result(params)
    result = get_result(params, DEVICE)

    for gt, pred in zip(golden, result):
        assert type(gt) is type(pred)
        if isinstance(gt, torch.Tensor) and isinstance(pred, torch.Tensor):
            assert torch.allclose(gt, pred, atol=1e-5)


@pytest.mark.parametrize("types", TYPE_LIST)
def test_large_permuted_dim_small_values_length(types):
    """
        测试permutedim大,values长度小场景
    """
    ptype, ltype, vtype, wtype = types
    t = 872
    b = 32

    permute = np.random.choice(t, t).astype(dtype=np.int32)
    lengths = np.random.randint(10, 800, size=(t, b), dtype=ltype)
    total_length = int(lengths.sum())
    values = np.arange(0, total_length, dtype=vtype)
    weights = np.arange(0, total_length, dtype=wtype) if wtype else None
    permuted_lengths_sum = lengths[permute].sum()
    params = {
        'permute': permute,
        'lengths': lengths,
        'values': values,
        'weights': weights,
        'permuted_lengths_sum': permuted_lengths_sum
    }

    golden = get_result(params)
    result = get_result(params, DEVICE)

    for gt, pred in zip(golden, result):
        assert type(gt) is type(pred)
        if isinstance(gt, torch.Tensor) and isinstance(pred, torch.Tensor):
            assert torch.allclose(gt, pred, atol=1e-5)
