# Copyright 2025 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""mindformers mcore linear wrapper cell."""
from typing import Optional

from mindspore import mint
from mindspore.nn import Cell
from mindformers.modules.layers import Linear
from mindspore_gs.ptq.ptq.hal import ParallelType, QuantWithSmooth, DynamicQuantCell


# class McoreLinearInferCell(Cell):
#     """DeployLinearCell"""

#     def __init__(self, linear: Linear, parallel_type: ParallelType):
#         super().__init__()
#         self._layer = linear
#         self.parallel_type = parallel_type

#         self.has_act_quant = False
#         self.quant_op: Optional[QuantWithSmooth] = None
#         self.has_act_dynamic_quant = False
#         self.dyn_quant_op: Optional[DynamicQuantCell] = None

#     def _set_act_quant(self, quant_op: QuantWithSmooth):
#         self.has_act_quant = True
#         self.quant_op = quant_op

#     def _set_act_dynamic_quant(self, quant_op: DynamicQuantCell):
#         self.has_act_dynamic_quant = True
#         self.dyn_quant_op = quant_op

#     @property
#     def layer(self):
#         """layer"""
#         return self._layer

#     #QKV fc1
#     def col_linear_forward(self, input_, weight=None):
#         """
#         Forward of ColumnParallelLinear.
#         Performs a linear transformation considering various parallel modes and data type conversions.
#         """
#         if weight is None:
#             if self._layer.weight is None:
#                 raise RuntimeError(
#                     "For ColumnParallelLinear, weight was not supplied to construct(), "
#                     "and `skip_weight_param_allocation` is True."
#                     )
#             weight = self._layer.weight
#         else:
#             # Check the weight passed in is the correct shape.
#             experted_shape = (self._layer.output_size_per_partition, self._layer.input_size)
#             if weight.shape != experted_shape:
#                 raise RuntimeError(
#                     f"supplied weight's shape is {tuple(weight.shape)}, "
#                     f"not {experted_shape} as expected."
#                 )

#         origin_dtype = input_.dtype
#         output_shape = input_.shape[:-1] + (self._layer.output_size_per_partition,)

#         input_ = mint.reshape(input_, (-1, self._layer.input_size))
#         input_ = self._layer.cast(input_, self._layer.compute_dtype)

#         if self.has_act_quant:
#             input_ = self.quant_op(input_)
#         if self.has_act_dynamic_quant:
#             input_, x_scale = self.dyn_quant_op(input_)
#             x_scale = mint.reshape(x_scale, (-1,))

#         if self.has_act_dynamic_quant:
#             output_parallel = self._layer.matmul(input_, weight, None, x_scale)
#         else:
#             output_parallel = self._layer.matmul(input_, weight)

#         if self._layer.has_bias and not self._layer.skip_bias_add:
#             bias = self._layer.cast(self._layer.bias, self._layer.compute_dtype)
#             output_parallel = mint.add(output_parallel, bias)

#         output_parallel = mint.reshape(output_parallel, output_shape)
#         output_parallel = self._layer.cast(output_parallel, origin_dtype)

#         if self._layer.gather_output:
#             output = self._layer.gather_from_mp_region(output_parallel)
#         else:
#             output = output_parallel
#         return output

#     def row_linear_forward(self, input_):
#         """
#         Forward of RowParallelLinear.
#         Performs a linear transformation considering various parallel modes and data type conversions.
#         """
#         #并行需要通信
#         if self._layer.input_is_parallel:
#             input_parallel = input_
#         else:
#             input_parallel = self._layer.scatter_to_mp_region(input_)

#         origin_dtype = input_parallel.dtype
#         input_parallel = self._layer.cast(input_parallel, self._layer.compute_dtype)

#         if self.has_act_quant:
#             #quant_op就是算子 换成我的算子 
#             #input_parallel = self.quant_op(input_parallel)
#             #input_parallel_2 = self.quant_op(input_parallel)
#             input_parallel = self.quant_op(input_parallel)
#         if self.has_act_dynamic_quant:
#             input_parallel, x_scale = self.dyn_quant_op(input_parallel)
#             x_scale = mint.reshape(x_scale, (-1,))

#         output_shape = input_parallel.shape[:-1] + (self._layer.output_size,)
#         input_parallel = mint.reshape(input_parallel, (-1, self._layer.input_size_per_partition))

#         if self.has_act_dynamic_quant:
#             output_parallel = self._layer.matmul(input_parallel, self._layer.weight, None, x_scale)
#         else:
#             output_parallel = self._layer.matmul(input_parallel, self._layer.weight)
#         output = self._layer.reduce_from_mp_region(output_parallel)

#         if self._layer.has_bias and not self._layer.skip_bias_add:
#             bias = self._layer.cast(self._layer.bias, self._layer.compute_dtype)
#             output = mint.add(output, bias)

#         output = mint.reshape(output, output_shape)
#         output = self._layer.cast(output, origin_dtype)
#         return output

#     def construct(self, x):
#         """linear deploy construct"""
#         if self.parallel_type == ParallelType.NO_PARALLEL:
#             raise RuntimeError(f"Normal Linear is not supplied by mcore")
#         if self.parallel_type == ParallelType.COL_PARALLEL:
#             x = self.col_linear_forward(x)
#         if self.parallel_type == ParallelType.ROW_PARALLEL:
#             x = self.row_linear_forward(x)
#         return x

#     def sharded_state_dict(self, **kwargs):
#         """provide the sharded state dict based on the config"""
#         state_dict = {}
#         if self.parallel_type == ParallelType.NO_PARALLEL:
#             return {}
#         tensor_parallel_num = self.layer.tensor_parallel_group_size

#         if self.parallel_type == ParallelType.COL_PARALLEL:
#             w_shard = (tensor_parallel_num, 1) if self.layer.transpose_b else (1, tensor_parallel_num)
#             if not self.layer.skip_weight_param_allocation:
#                 state_dict[self.layer.weight.name] = {'shape': self.layer.weight.shape,
#                                                       'shard': w_shard}
#             if self.layer.bias:
#                 state_dict[self.layer.bias.name] = {'shape': self.layer.bias.shape,
#                                                     'shard': (tensor_parallel_num,)}
#         elif self.parallel_type == ParallelType.ROW_PARALLEL:
#             w_shard = (1, tensor_parallel_num) if self.layer.transpose_b else (tensor_parallel_num, 1)
#             if self.layer.is_expert and self.layer.expert_num > 1:
#                 w_shard = (1, 1, tensor_parallel_num) if self.layer.transpose_b \
#                     else (1, tensor_parallel_num, 1)
#             if self.layer.bias:
#                 state_dict[self.layer.bias.name] = {'shape': self.layer.bias.shape,
#                                                     'shard': (1,)}
#             state_dict[self.layer.weight.name] = {'shape': self.layer.weight.shape,
#                                                   'shard': w_shard}
#         else:
#             return {}
#         if self.quant_op:
#             state_dict.update(self.quant_op.param_shard_state(tensor_parallel_num, **kwargs))
#         if hasattr(self.layer.matmul, "param_shard_state"):
#             state_dict.update(self.layer.matmul.param_shard_state(tensor_parallel_num, self.parallel_type))
#         return state_dict

class McoreLinearInferCell(Cell):
    def __init__(self, linear: Linear, parallel_type: ParallelType):
        super().__init__()
        self._layer = linear
        self.parallel_type = parallel_type

        self.has_act_quant = False
        self.quant_op: Optional[QuantWithSmooth] = None
        self.has_act_dynamic_quant = False
        self.dyn_quant_op: Optional[DynamicQuantCell] = None

        # for x @ W1 @ W2
        self.matmul1 = None
        self.matmul2 = None
        self.q_weight1 = None
        self.q_weight2 = None
        self.w1_scale = None
        self.w1_zp = None
        self.w2_scale = None
        self.w2_zp = None

    # def _set_act_quant(self, quant_op: QuantWithSmooth):
    #     self.has_act_quant = True
    #     self.quant_op = quant_op

    # def _set_act_dynamic_quant(self, quant_op: DynamicQuantCell):
    #     self.has_act_dynamic_quant = True
    #     self.dyn_quant_op = quant_op

    @property
    def layer(self):
        """layer"""
        return self._layer
    # ---------------------- 重点1：列并行 ----------------------
    def col_linear_forward(self, input_, weight=None):
        origin_dtype = input_.dtype
        # 最终输出的形状还是跟原来一样
        output_shape = input_.shape[:-1] + (self._layer.output_size_per_partition,)

        # [B, in]
        input_ = mint.reshape(input_, (-1, self._layer.input_size))
        input_ = self._layer.cast(input_, self._layer.compute_dtype)

        # # activation 量化逻辑保持不动
        # if self.has_act_quant:
        #     input_ = self.quant_op(input_)
        # if self.has_act_dynamic_quant:
        #     # 这条路你现在的 MyV3Matmul 没处理 x_scale，如果你暂时不用动态量化，可以先 raise / 跳过
        #     input_, x_scale = self.dyn_quant_op(input_)
        #     x_scale = mint.reshape(x_scale, (-1,))

        # === 1) x @ W1(q) ===
        x_mid = self._layer.matmul1(input_)    # -> [B, r]

        # === 2) (x @ W1) @ W2(q) ===
        output_parallel = self._layer.matmul2(x_mid)  # -> [B, out_per_partition]

        # === 3) 加 bias / reshape / cast / gather 跟原来完全一样 ===
        # if self._layer.has_bias and not self._layer.skip_bias_add:
        #     # bias = self._layer.cast(self._layer.bias, self._layer.compute_dtype)
        #     bias = self._layer.bias
        #     print('type of bias: ',bias.dtype)
        #     output_parallel = mint.add(output_parallel, bias)

        output_parallel = mint.reshape(output_parallel, output_shape)
        output_parallel = self._layer.cast(output_parallel, origin_dtype)

        if self._layer.gather_output:
            output = self._layer.gather_from_mp_region(output_parallel)
        else:
            output = output_parallel
            
        return output

    # ---------------------- 重点2：行并行 ----------------------
    def row_linear_forward(self, input_):

        # 1) 并行 scatter 保持不变
        if self._layer.input_is_parallel:
            input_parallel = input_
        else:
            input_parallel = self._layer.scatter_to_mp_region(input_)

        origin_dtype = input_parallel.dtype
        input_parallel = self._layer.cast(input_parallel, self._layer.compute_dtype)

        # 2) activation 量化保持不变
        if self.has_act_quant:
            input_parallel = self.quant_op(input_parallel)
        if self.has_act_dynamic_quant:
            # 同上：你现在的 matmul 没接 x_scale，就先简单处理
            input_parallel, x_scale = self.dyn_quant_op(input_parallel)
            x_scale = mint.reshape(x_scale, (-1,))

        # 3) reshape 成 [B, in_per_partition]
        output_shape = input_parallel.shape[:-1] + (self._layer.output_size,)
        input_parallel = mint.reshape(input_parallel, (-1, self._layer.input_size_per_partition))

        # === 4) x_shard @ W1_shard ===
        mid = self._layer.matmul1(input_parallel)   # -> [B, r]
        output_parallel = self._layer.matmul2(mid)  # -> [B, out_size]

        # 6) 行并行要做 reduce
        output = self._layer.reduce_from_mp_region(output_parallel)

        # 7) bias / reshape / cast 保持不变
        # if self._layer.has_bias and not self._layer.skip_bias_add:
        #     bias = self._layer.cast(self._layer.bias, self._layer.compute_dtype)
        #     output = mint.add(output, bias)

        output = mint.reshape(output, output_shape)
        output = self._layer.cast(output, origin_dtype)
        return output

    def construct(self, x):
        """linear deploy construct"""
        if self.parallel_type == ParallelType.NO_PARALLEL:
            raise RuntimeError(f"Normal Linear is not supplied by mcore")
        if self.parallel_type == ParallelType.COL_PARALLEL:
            x = self.col_linear_forward(x)
        if self.parallel_type == ParallelType.ROW_PARALLEL:
            x = self.row_linear_forward(x)
        return x

    def sharded_state_dict(self, **kwargs):
        """provide the sharded state dict based on the config"""
        state_dict = {}
        if self.parallel_type == ParallelType.NO_PARALLEL:
            return {}
        tensor_parallel_num = self.layer.tensor_parallel_group_size

        if self.parallel_type == ParallelType.COL_PARALLEL:
            w_shard = (tensor_parallel_num, 1) if self.layer.transpose_b else (1, tensor_parallel_num)
            if not self.layer.skip_weight_param_allocation:
                state_dict[self.layer.weight.name] = {'shape': self.layer.weight.shape,
                                                      'shard': w_shard}
            if self.layer.bias:
                state_dict[self.layer.bias.name] = {'shape': self.layer.bias.shape,
                                                    'shard': (tensor_parallel_num,)}
        elif self.parallel_type == ParallelType.ROW_PARALLEL:
            w_shard = (1, tensor_parallel_num) if self.layer.transpose_b else (tensor_parallel_num, 1)
            if self.layer.is_expert and self.layer.expert_num > 1:
                w_shard = (1, 1, tensor_parallel_num) if self.layer.transpose_b \
                    else (1, tensor_parallel_num, 1)
            if self.layer.bias:
                state_dict[self.layer.bias.name] = {'shape': self.layer.bias.shape,
                                                    'shard': (1,)}
            state_dict[self.layer.weight.name] = {'shape': self.layer.weight.shape,
                                                  'shard': w_shard}
        else:
            return {}
        if self.quant_op:
            state_dict.update(self.quant_op.param_shard_state(tensor_parallel_num, **kwargs))
        if hasattr(self.layer.matmul, "param_shard_state"):
            state_dict.update(self.layer.matmul.param_shard_state(tensor_parallel_num, self.parallel_type))
        return state_dict