# Copyright (c) 1984 ; 1989-1990; 2000-2015 Free Software Foundation; Inc.
# Copyright (c) 1998-2018 Third party code and tools
# Copyright (c) 1999 University of North Carolina at Chapel Hill.
# Copyright (c) 1991 ; 1999 Free Software Foundation; Inc.
# Copyright (c) 1997-2018 Sam Lantinga <slouken@libsdl.org>
# Copyright (c) 2020-present Daniel Trugman
# Copyright (c) 2020-2022 Codeplay Software Limited
# Copyright (c) 2020-2023 Arm Ltd. and affiliates
# Copyright (c) 2020 ; Advanced Micro Devices; Inc. (Xiaoyan Zhou)
# Copyright (c) 2020 ; NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2021-2024 ; Advanced Micro Devices; Inc. All rights reserved.
# Copyright (c) 2021-2022 ; Advanced Micro Devices; Inc. (Jianfeng Yan)
# Copyright (c) 2021 Alanna Tempest
# Copyright (c) 2021 ; 2023-2024 Arm Limited and or its affiliates
# Copyright (c) 2022-2023 IBM Corporation
# Copyright (c) 2022 V ctor Fisac
# Copyright (c) 2022 Cruise LLC.
# Copyright (c) 2022 libcpr and many other contributors
# Copyright (c) 2022 ; NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2022 ; the respective contributors; as shown by the AUTHORS file.
# Copyright (c) 2022 - ; Advanced Micro Devices; Inc. (Anthony Chang; Chunyu Lai; Illia Silin; Adam Osewski; Poyen Chen; Jehandad Khan)
# Copyright (c) 2023 KNS Group LLC (YADRO)
# Copyright (c) 2024 Tri Dao.
# Copyright (c) 2008 Google Inc. All rights reserved.
# Copyright (c) 2008 ; Google Inc.
# Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
# Copyright (c) 2009 Dave Gamble
# Copyright (c) 2000-2009 Kitware; Inc.; Insight Software Consortium
# Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert; Samy Bengio; Johnny Mariethoz)
# Copyright (c) 2001-2002 Enthought; Inc. 2003-2019; SciPy Developers.
# Copyright (c) 2001 ; Dr Martin Porter;
# Copyright (c) 2002-2015 Nikolaus Gebhardt
# Copyright (c) 2002 by Lucent Technologies.
# Copyright (c) 2002 ; NVIDIA Corporation.
# Copyright (c) 2002 ; Richard Boulton.
# Copyright (c) 2003 SRA; Inc.
# Copyright (c) 2003 SKC; Inc.
# Copyright (c) 2005 - 2017 G-Truc Creation
# Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert; Leon Bottou; Iain Melvin; Jason Weston)
# Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
# Copyright (c) 2007-2010 Baptiste Lepilleur
# Copyright (c) 2007 Free Software Foundation; Inc. <http: fsf.org >
# Copyright (c) 2007 Free Software Foundation; Inc. <https: fsf.org >
# Copyright (c) 2018-2024 ; Advanced Micro Devices; Inc. All rights reserved.
# Copyright (c) 2018-2025 Microsoft Corporation; Daan Leijen
# Copyright (c) 2018-2025 ; Advanced Micro Devices; Inc. All rights reserved.
# Copyright (c) 2018 YANDEX LLC
# Copyright (c) 2018 Intel Corporation.
# Copyright (c) 2018 Marat Dukhan
# Copyright (c) 2018 ; Steven Moshier
# Copyright (c) 2018 - ; Advanced Micro Devices; Inc. (Chao Liu; Jing Zhang)
# Copyright (c) 2019-2020 Kakao Brain
# Copyright (c) 2019-2020 ; Advanced Micro Devices; Inc. (Tejash Shah)
# Copyright (c) 2019-2021 ; Advanced Micro Devices; Inc. (Hanwen Chang)
# Copyright (c) 2019-2023 FUJITSU LIMITED
# Copyright (c) 2019 Google LLC
# Copyright (c) 2019 - ; Advanced Micro Devices; Inc. (Letao Qin; Qianfeng Zhang; Liang Huang; Shaojie Wang)
# Copyright (c) 2010-2014 Rich Geldreich and Tenacious Software LLC
# Copyright (c) 2010-2012 Marat Dukhan
# Copyright (c) 2010 Jonathan Hartley <tartley@tartley.com>
# Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (Clement Farabet)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2012-2016 ; Yann Collet
# Copyright (c) 2012-2017 Georgia Institute of Technology
# Copyright (c) 2012 - present ; Victor Zverovich and fmt contributors
# Copyright (c) 2012 - present ; Victor Zverovich
# Copyright (c) 2013-2014 RAD Game Tools and Valve Software
# Copyright (c) 2013-2016 by Duktape authors (see AUTHORS.rst)
# Copyright (c) 2013-2017 by Duktape authors (see AUTHORS.rst)
# Copyright (c) 2013-2017 ; Georgia Institute of Technology
# Copyright (c) 2013 ; 2014; 2015; the respective contributors
# Copyright (c) 2014-2017 Georgia Institute of Technology
# Copyright (c) 2014 Crytek
# Copyright (c) 2014 Google Inc.
# Copyright (c) 2014 joseph werle <joseph.werle@gmail.com>
# Copyright (c) 2014 ; The Regents of the University of California (Regents)
# Copyright (c) 2014 ; the respective contributors
# Copyright (c) 2014 - Facebook; Inc (Soumith Chintala)
# Copyright (c) 2015-present libuv project contributors.
# Copyright (c) 2015-2018 Baldur Karlsson
# Copyright (c) 2015-2016 LunarG; Inc.
# Copyright (c) 2015-2016 The Khronos Group Inc.
# Copyright (c) 2015-2016 Valve Corporation
# Copyright (c) 2015-2017 Georgia Institute of Technology
# Copyright (c) 2015-2017 ; Georgia Institute of Technology
# Copyright (c) 2015-2020 ; NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2015 Yangqing Jia
# Copyright (c) 2015 Google Inc.
# Copyright (c) 2015 Microsoft Corporation. All rights reserved.
# Copyright (c) 2015 Martin Moene
# Copyright (c) 2015 ; 2016 the respective contributors
# Copyright (c) 2016-present ; Facebook Inc. All rights reserved.
# Copyright (c) 2016-2017 Marat Dukhan
# Copyright (c) 2016-2021 Jupp Mueller
# Copyright (c) 2016-2021 Viktor Kirilov
# Copyright (c) 2016-2023 Intel Corporation
# Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>; All rights reserved.
# Copyright (c) 2016 Facebook Inc.
# Copyright (c) 2016 - Facebook; Inc (Adam Paszke)
# Copyright (c) 2017-present ; Facebook; Inc. All rights reserved.
# Copyright (c) 2017-2018 Facebook Inc.
# Copyright (c) 2017-2025 Advanced Micro Devices; Inc. All rights reserved.
# Copyright (c) 2017-2021 Huu Nguyen
# Copyright (c) 2017-2022 Gregor Jasny
# Copyright (c) 2017 The Khronos Group Inc.;
# Copyright (c) 2017 The Native Object Protocols Authors
# Copyright (c) 2017 Facebook Inc.
# Copyright (c) 2017 Georgia Institute of Technology
# Copyright (c) 2017 yhirose
# Copyright (c) 2017 ; Facebook Inc.
# Copyright (c) 2017 - 2024 NVIDIA CORPORATION AFFILIATES. All rights reserved.
# Copyright (c) 2017 - 2025 NVIDIA CORPORATION AFFILIATES. All rights reserved.
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# 
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

# This is a patch file for TorchTitan to train the DeepseekV3 model on NPUs, 
# primarily adding compatibility enhancements, affinity performance optimizations, 
# and other related modifications.
# Part of source code is licensed under the MIT license.

import os
import time
import subprocess
from typing import Callable, cast, NamedTuple, Optional, Union
import contextlib
import torchtitan
import torch
from torchtitan.tools.logging import logger
from torch.distributed.distributed_c10d import ReduceOp
import torch.distributed as dist
import torch_npu
from torchtitan.config import Profiling as ProfilingConfig
from torchtitan.tools import profiling
from torchtitan.distributed.expert_parallel import ExpertParallel
from torch.distributed.tensor import DTensor
from torchtitan.models.moe.utils import _permute, _unpermute
from torch.distributed._functional_collectives import (
    all_to_all_single,
    all_to_all_single_autograd,
)
from torchtitan.models.qwen3.infra import parallelize
import torch.nn as nn
from torch.distributed.device_mesh import DeviceMesh
from torch.distributed.tensor import Replicate, Shard
from torch.distributed.tensor.parallel import (
    ColwiseParallel,
    parallelize_module,
    PrepareModuleInput,
    RowwiseParallel,
    SequenceParallel,
)
from torchtitan.models.moe import MoEArgs
from torchtitan.config import JobConfig
from torchtitan.models.deepseek_v3.model.args import DeepSeekV3ModelArgs
from torchtitan.models.deepseek_v3 import deepseekv3_args
from torchtitan.models.moe.moe import TokenReorderer

##=====================patch for torchtitan==========================


## obtain NPU peak_flops
def get_peak_flops(device_name: str) -> int:
    logger.info('get_peak_flops is patched')
    try:
        # Run the lspci command and capture the output
        result = subprocess.run(["lspci"], stdout=subprocess.PIPE, text=True)
        # Filter the output for lines containing both "NVIDIA" and "H100"
        filtered_lines = [
            line
            for line in result.stdout.splitlines()
            if "NVIDIA" in line and "H100" in line
        ]
        # Join all filtered lines into a single string
        device_name = " ".join(filtered_lines) or device_name
    except FileNotFoundError as e:
        logger.warning(f"Error running lspci: {e}, fallback to use device_name")
    if "A100" in device_name:
        return 312e12
    elif "H100" in device_name:
        # NOTE: Specifications are one-half lower without sparsity.
        if "NVL" in device_name:
            return 835e12
        elif "PCIe" in device_name:
            return 756e12
        else:  # for H100 SXM and other variants
            return 989e12
    elif "H200" in device_name:
        return 989e12
    elif "MI300X" in device_name or "MI325X" in device_name:
        return 1300e12
    elif "MI250X" in device_name:
        return 191.5e12
    # add the peak flops of Ascend NPUs
    elif "Ascend910B1" in device_name:
        return 373.88e12  # 400e12
    elif "Ascend910B2" in device_name:
        return 353.8944e12  # 376e12
    elif "Ascend910B3" in device_name:
        return 294.912e12  # 313e12
    elif "Ascend910B4" in device_name:
        return 245.76e12  # 280e12
    else:  # for other GPU types, assume A100
        logger.warning(f"Peak flops undefined for: {device_name}, fallback to A100")
        return 312e12


## profiling for npu
@contextlib.contextmanager
def maybe_enable_profiling(
    profiling_config: ProfilingConfig,
    *,
    global_step: int = 0,
    base_folder: str = "",
    leaf_folder: str = "",
):
    # get user defined profiler settings
    enable_profiling = profiling_config.enable_profiling

    if enable_profiling:
        trace_dir = os.path.join(base_folder, profiling_config.save_traces_folder)
        profile_freq, warmup, active = (
            profiling_config.profile_freq,
            profiling_config.profiler_warmup,
            profiling_config.profiler_active,
        )

        rank = torch.distributed.get_rank()

        def trace_handler(prof):
            curr_trace_dir_name = "iteration_" + str(prof.step_num)
            curr_trace_dir = os.path.join(trace_dir, curr_trace_dir_name, leaf_folder)
            if not os.path.exists(curr_trace_dir):
                os.makedirs(curr_trace_dir, exist_ok=True)

            logger.info(f"Dumping profiler traces at step {prof.step_num}")
            begin = time.monotonic()

            output_file = os.path.join(curr_trace_dir, f"rank{rank}_trace.json")
            prof.export_chrome_trace(output_file)
            logger.info(
                f"Finished dumping profiler traces in {time.monotonic() - begin:.2f} seconds"
            )

        logger.info(f"Profiling active. Traces will be saved at {trace_dir}")

        if not os.path.exists(trace_dir):
            os.makedirs(trace_dir, exist_ok=True)

        wait = profile_freq - (active + warmup)
        assert (
            wait >= 0
        ), "profile_freq must be greater than or equal to warmup + active"
        gpu_device_profiled = None
        experimental_config = torch_npu.profiler._ExperimentalConfig(
            export_type=[
                torch_npu.profiler.ExportType.Text,
                torch_npu.profiler.ExportType.Db
                ],
            profiler_level=torch_npu.profiler.ProfilerLevel.Level1,
            msprof_tx=False,
            mstx_domain_include=[],
            mstx_domain_exclude=[],
            aic_metrics=torch_npu.profiler.AiCMetrics.AiCoreNone,
            l2_cache=False,
            op_attr=False,
            data_simplification=False,
            record_op_args=False,
            gc_detect_threshold=None,
            host_sys=[
                torch_npu.profiler.HostSystem.CPU,
                torch_npu.profiler.HostSystem.MEM],
            sys_io=False,
            sys_interconnection=False
        )
        if torch.cuda.is_available():
            gpu_device_profiled = torch.profiler.ProfilerActivity.CUDA
        elif torch.xpu.is_available():
            gpu_device_profiled = torch.profiler.ProfilerActivity.XPU
        elif torch.npu.is_available():
            gpu_device_profiled = torch_npu.profiler.ProfilerActivity.NPU
        with torch_npu.profiler.profile(
            activities=[
                torch_npu.profiler.ProfilerActivity.CPU,
                gpu_device_profiled,
            ],
            schedule=torch_npu.profiler.schedule(wait=wait, warmup=warmup, active=active),
            on_trace_ready=torch_npu.profiler.tensorboard_trace_handler(trace_dir),
            record_shapes=True,
            profile_memory=True,
            with_stack=False,
            experimental_config=experimental_config
        ) as torch_profiler:
            torch_profiler.step_num = global_step
            yield torch_profiler
    else:
        torch_profiler = contextlib.nullcontext()
        yield None


## patch for disable flex_attn
## torch_npu does not support the flex_atten operator now, 
## so this parameter should be set to false.
for config in deepseekv3_args.values():
    config.use_flex_attn = False


## patch for enable torch._grouped_mm
def update_from_config(self, job_config: JobConfig, **kwargs) -> None:
    seq_len = job_config.training.seq_len
    if seq_len > self.max_seq_len:
        logger.warning(
            f"Sequence length {seq_len} exceeds original maximum {self.max_seq_len}."
        )
    self.max_seq_len = seq_len
    
    if job_config.parallelism.context_parallel_degree > 1 and self.use_flex_attn:
        raise NotImplementedError(
            "CP support for FlexAttention is still in progress."
        )

    self.moe_args._debug_force_load_balance = (
        job_config.training.debug_moe_force_load_balance
    )


## patch for float type used in argsort
def forward(
    self,
    top_scores: torch.Tensor,
    selected_experts_indices: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
    """
    Reorders token indices to match the order of experts for MoE routing.

    Args:
        top_scores (torch.Tensor): Routing scores for selected experts,
            shape (batch_size * seq_len, top_k)
        selected_experts_indices (torch.Tensor): Expert indices selected for each token,
            shape (batch_size*seq_len, top_k)

    Returns:
        tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
            - top_scores_experts_sorted: Scores reordered to match expert ordering
            - token_indices_experts_sorted: Token indices reordered to match expert ordering
            - num_tokens_per_expert: Number of tokens assigned to each expert
    """
    # group tokens together by expert indices from 0 to num_experts and pass that to experts forward
    num_tokens_per_expert = torch.histc(
        selected_experts_indices.view(-1),
        bins=self.num_experts,
        min=0,
        max=self.num_experts,
    )

    # Reorder the token indices to match the order of the experts
    # token_indices_experts_sorted shape (bs*slen*top_k,)
    # Ascend NPUs do not support the int type，so computations involving the int type will be offloaded to the AI CPU. 
    # It is recommended to convert the int type to the float type to improve the performence.
    selected_experts_indices_float = selected_experts_indices.to(torch.float32)
    token_indices_experts_sorted = torch.argsort(
        selected_experts_indices_float.view(-1), stable=True
    )

    top_scores_experts_sorted = top_scores.view(-1)[token_indices_experts_sorted]
    token_indices_experts_sorted = token_indices_experts_sorted // self.top_k

    return (
        top_scores_experts_sorted,
        token_indices_experts_sorted,
        num_tokens_per_expert,
    )

##=====================patch for torch==========================


## EP patch
def _get_gradient_divide_factors(
    reduce_scatter_group: dist.ProcessGroup,
    all_reduce_group: Optional[dist.ProcessGroup],
    reduce_dtype: torch.dtype,
    device_type: str = "",
    factor: Optional[float] = None,
    force_sum_reduction_for_comms: bool = False,
) -> tuple[
    Optional[float],
    Optional[float],
    Union[dist.ReduceOp, dist.ReduceOp.RedOpType],
    Union[dist.ReduceOp, dist.ReduceOp.RedOpType],
]:
    # MTIA appears to only support SUM reduction, hence we force it implicitly
    if device_type == "mtia":
        force_sum_reduction_for_comms = True

    # For fp32/bf16, we do not need to worry about overflow/underflow, so we
    # use NCCL's built-in division to avoid separate div kernels
    overflow_risk = reduce_dtype not in (torch.float32, torch.bfloat16)

    data_parallel_size = reduce_scatter_group.size()
    if all_reduce_group is not None:
        data_parallel_size *= all_reduce_group.size()

    if factor is None:
        factor = float(data_parallel_size)
        
    if not overflow_risk and not force_sum_reduction_for_comms:
        if factor == data_parallel_size:
            # Warning: NCCL ReduceOp.AVG may produce incorrect results with
            # world size 1.
            if data_parallel_size == 1:
                return None, None, ReduceOp.SUM, ReduceOp.SUM
            return None, None, ReduceOp.AVG, ReduceOp.AVG
        else:
            return factor, None, ReduceOp.SUM, ReduceOp.SUM

    pre_factor: Optional[float]
    if overflow_risk:
        # Since fp16 has smaller dynamic range than fp32/bf16, we want to avoid
        # overflow/underflow. For N data parallel workers, each worker computes
        # g_i, and they collectively reduce (g_1 + ... + g_N) / N. To avoid
        # overflow/underflow, we divide by ~sqrt(N) before/after the reduction.
        pre_factor = 1
        while factor % pre_factor == 0 and factor / pre_factor > pre_factor:
            pre_factor *= 2
        post_factor = factor / pre_factor
    else:
        # Prefer post-multiplying as it operates on less data and is thus faster
        pre_factor, post_factor = None, factor

    return pre_factor, post_factor, ReduceOp.SUM, ReduceOp.SUM


##TP patch
aten = torch.ops.aten
if aten.matmul.default in DTensor._op_dispatcher.sharding_propagator.op_strategy_funcs:
    DTensor._op_dispatcher.sharding_propagator.op_strategy_funcs.pop(aten.matmul.default)  # TP=2

##=====================environment variable configuration=======================


##set environment variable to improve the performence on NPUs
def set_environ_variable():
    logger.warning('set_environ_variable is called')
    os.environ['PYTORCH_NPU_ALLOC_CONF'] = 'expandable_segments:True'
    os.environ['TASK_QUEUE_ENABLE'] = '2'
    os.environ['MULTI_STREAM_MEMORY_REUSE'] = '2'
    os.environ['CPU_AFFINITY_CONF'] = '2'

    # print environment variable
    logger.warning(f"PYTORCH_NPU_ALLOC_CONF = {os.environ.get('PYTORCH_NPU_ALLOC_CONF')}")
    logger.warning(f"TASK_QUEUE_ENABLE = {os.environ.get('TASK_QUEUE_ENABLE')}")
    logger.warning(f"MULTI_STREAM_MEMORY_REUSE = {os.environ.get('MULTI_STREAM_MEMORY_REUSE')}")
    logger.warning(f"CPU_AFFINITY_CONF = {os.environ.get('CPU_AFFINITY_CONF')}")

#======================================overlap============================================

set_environ_variable()
torchtitan.tools.utils.get_peak_flops = get_peak_flops
profiling.maybe_enable_profiling = maybe_enable_profiling
torch.distributed.fsdp._fully_shard._fsdp_collectives._get_gradient_divide_factors = _get_gradient_divide_factors  # EP=8
DeepSeekV3ModelArgs.update_from_config = update_from_config
TokenReorderer.forward = forward