
#ifndef _G_KERNEL_SEC2_NVOC_H_
#define _G_KERNEL_SEC2_NVOC_H_

// Version of generated metadata structures
#ifdef NVOC_METADATA_VERSION
#undef NVOC_METADATA_VERSION
#endif
#define NVOC_METADATA_VERSION 2

#include "nvoc/runtime.h"
#include "nvoc/rtti.h"

#ifdef __cplusplus
extern "C" {
#endif

/*
 * SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
 * SPDX-License-Identifier: MIT
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

#pragma once
#include "g_kernel_sec2_nvoc.h"

#ifndef KERNEL_SEC2_H
#define KERNEL_SEC2_H

#include "core/bin_data.h"
#include "core/core.h"
#include "gpu/eng_state.h"
#include "gpu/falcon/kernel_falcon.h"
#include "gpu/gpu.h"
#include "gpu/fsp/kern_fsp_cot_payload.h"
#include "gpu/fsp/kern_fsp_mctp_pkt_state.h"
#include "gpu/timer/objtmr.h"
#include "kernel/gpu/mem_mgr/mem_desc.h"
#include "kernel/gpu/gpu_halspec.h"
#include "nvoc/utility.h"

// forward declaration of RM_FLCN_BL_DESC from rmflcnbl.h
struct _def_rm_flcn_bl_desc;
typedef struct _def_rm_flcn_bl_desc RM_FLCN_BL_DESC;


// Private field names are wrapped in PRIVATE_FIELD, which does nothing for
// the matching C source file, but causes diagnostics to be issued if another
// source file references the field.
#ifdef NVOC_KERNEL_SEC2_H_PRIVATE_ACCESS_ALLOWED
#define PRIVATE_FIELD(x) x
#else
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
#endif


// Metadata with per-class RTTI and vtable with ancestor(s)
struct NVOC_METADATA__KernelSec2;
struct NVOC_METADATA__OBJENGSTATE;
struct NVOC_METADATA__IntrService;
struct NVOC_METADATA__KernelFalcon;
struct NVOC_VTABLE__KernelSec2;


struct KernelSec2 {

    // Metadata starts with RTTI structure.
    union {
         const struct NVOC_METADATA__KernelSec2 *__nvoc_metadata_ptr;
         const struct NVOC_RTTI *__nvoc_rtti;
    };

    // Parent (i.e. superclass or base class) objects
    struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
    struct IntrService __nvoc_base_IntrService;
    struct KernelFalcon __nvoc_base_KernelFalcon;

    // Ancestor object pointers for `staticCast` feature
    struct Object *__nvoc_pbase_Object;    // obj super^2
    struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;    // engstate super
    struct IntrService *__nvoc_pbase_IntrService;    // intrserv super
    struct CrashCatEngine *__nvoc_pbase_CrashCatEngine;    // crashcatEngine super^3
    struct KernelCrashCatEngine *__nvoc_pbase_KernelCrashCatEngine;    // kcrashcatEngine super^2
    struct KernelFalcon *__nvoc_pbase_KernelFalcon;    // kflcn super
    struct KernelSec2 *__nvoc_pbase_KernelSec2;    // ksec2

    // Vtable with 37 per-object function pointers
    NV_STATUS (*__ksec2ConfigureFalcon__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // halified (4 hals) body
    NV_STATUS (*__ksec2ResetHw__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // virtual halified (3 hals) override (kflcn) base (kflcn) body
    NV_STATUS (*__ksec2StateLoad__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NvU32);  // virtual halified (3 hals) override (engstate) base (engstate) body
    void (*__ksec2StateDestroy__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // virtual halified (2 hals) override (engstate) base (engstate) body
    NvU32 (*__ksec2ReadUcodeFuseVersion__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NvU32);  // halified (4 hals) body
    const BINDATA_ARCHIVE * (*__ksec2GetBinArchiveBlUcode__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // halified (3 hals) body
    NV_STATUS (*__ksec2GetGenericBlUcode__)(struct OBJGPU *, struct KernelSec2 * /*this*/, const RM_FLCN_BL_DESC **, const NvU8 **);  // halified (3 hals) body
    const BINDATA_ARCHIVE * (*__ksec2GetBinArchiveSecurescrubUcode__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // halified (3 hals) body
    NV_STATUS (*__ksec2SetupGspImages__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NVDM_PAYLOAD_COT *);  // halified (3 hals) body
    NV_STATUS (*__ksec2PrepareBootCommands__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // halified (3 hals) body
    NV_STATUS (*__ksec2SafeToSendBootCommands__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // halified (3 hals) body
    NV_STATUS (*__ksec2SendBootCommands__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // halified (3 hals) body
    NV_STATUS (*__ksec2PrepareAndSendBootCommands__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // halified (3 hals) body
    NvBool (*__ksec2CanSendPacket__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // halified (2 hals) body
    NvU32 (*__ksec2GetMaxSendPacketSize__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // halified (2 hals) body
    NvU32 (*__ksec2CreateNvdmHeader__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NvU32);  // halified (2 hals) body
    NvU32 (*__ksec2CreateMctpHeader__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NvU8, NvU8, NvU8, NvU8);  // halified (3 hals) body
    NV_STATUS (*__ksec2SendPacket__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NvU8 *, NvU32);  // halified (2 hals) body
    NV_STATUS (*__ksec2WaitForGspTargetMaskReleased__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // halified (2 hals) body
    NV_STATUS (*__ksec2ReadPacket__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NvU8 *, NvU32, NvU32 *);  // halified (2 hals) body
    NvBool (*__ksec2IsResponseAvailable__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // halified (2 hals) body
    NvBool (*__ksec2GspFmcIsEnforced__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // halified (2 hals) body
    NV_STATUS (*__ksec2WaitForSecureBoot__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // halified (2 hals) body
    NvU32 (*__ksec2GetMaxRecvPacketSize__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // halified (2 hals) body
    NvU8 (*__ksec2NvdmToSeid__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NvU8);  // halified (2 hals) body
    NV_STATUS (*__ksec2GetPacketInfo__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NvU8 *, NvU32, MCTP_PACKET_STATE *, NvU8 *);  // halified (2 hals) body
    NV_STATUS (*__ksec2ValidateMctpPayloadHeader__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NvU8 *, NvU32);  // halified (2 hals) body
    NV_STATUS (*__ksec2ProcessNvdmMessage__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NvU8 *, NvU32);  // halified (2 hals) body
    NV_STATUS (*__ksec2ProcessCommandResponse__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NvU8 *, NvU32);  // halified (2 hals) body
    void (*__ksec2DumpDebugState__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // halified (2 hals) body
    NV_STATUS (*__ksec2ErrorCode2NvStatusMap__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NvU32);  // halified (2 hals) body
    NvU32 (*__ksec2RegRead__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NvU32);  // virtual halified (3 hals) inherited (kflcn) base (kflcn) body
    void (*__ksec2RegWrite__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NvU32, NvU32);  // virtual halified (3 hals) inherited (kflcn) base (kflcn) body
    NvU32 (*__ksec2MaskDmemAddr__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NvU32);  // virtual halified (4 hals) inherited (kflcn) base (kflcn) body
    void (*__ksec2ReadDmem__)(struct KernelSec2 * /*this*/, NvU32, NvU32, void *);  // virtual halified (2 hals) inherited (kcrashcatEngine) base (kflcn)
    const NvU32 * (*__ksec2GetScratchOffsets__)(struct KernelSec2 * /*this*/, NV_CRASHCAT_SCRATCH_GROUP_ID);  // virtual halified (2 hals) inherited (kcrashcatEngine) base (kflcn)
    NvU32 (*__ksec2GetWFL0Offset__)(struct KernelSec2 * /*this*/);  // virtual halified (2 hals) inherited (kcrashcatEngine) base (kflcn)

    // 6 PDB properties
    NvBool PDB_PROP_KSEC2_GSP_MODE_GSPRM;
    NvBool PDB_PROP_KSEC2_BOOT_GSPFMC;
//  NvBool PDB_PROP_KSEC2_IS_MISSING inherited from OBJENGSTATE
    NvBool PDB_PROP_KSEC2_BOOT_COMMAND_OK;
    NvBool PDB_PROP_KSEC2_DISABLE_GSPFMC;
    NvBool PDB_PROP_KSEC2_RM_BOOT_GSP;

    // Data members
    MEMORY_DESCRIPTOR *pGspFmcMemdesc;
    MEMORY_DESCRIPTOR *pGspBootArgsMemdesc;
    NvBool bGspDebugBufferInitialized;
    RMTIMEOUT rpcTimeout;
    const RM_FLCN_BL_DESC *pGenericBlUcodeDesc;
    const NvU8 *pGenericBlUcodeImg;
    NVDM_PAYLOAD_COT *pCotPayload;
    NvU32 cotPayloadSignatureSize;
    NvU32 cotPayloadPublicKeySize;
    NvU32 cotPayloadVersion;
};


// Vtable with 25 per-class function pointers
struct NVOC_VTABLE__KernelSec2 {
    NV_STATUS (*__ksec2ConstructEngine__)(struct OBJGPU *, struct KernelSec2 * /*this*/, ENGDESCRIPTOR);  // virtual halified (singleton optimized) override (engstate) base (engstate) body
    void (*__ksec2RegisterIntrService__)(struct OBJGPU *, struct KernelSec2 * /*this*/, IntrServiceRecord *);  // virtual halified (singleton optimized) override (intrserv) base (intrserv) body
    NV_STATUS (*__ksec2ServiceNotificationInterrupt__)(struct OBJGPU *, struct KernelSec2 * /*this*/, IntrServiceServiceNotificationInterruptArguments *);  // virtual halified (singleton optimized) override (intrserv) base (intrserv)
    NV_STATUS (*__ksec2StateUnload__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NvU32);  // virtual override (engstate) base (engstate)
    void (*__ksec2InitMissing__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // virtual inherited (engstate) base (engstate)
    NV_STATUS (*__ksec2StatePreInitLocked__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // virtual inherited (engstate) base (engstate)
    NV_STATUS (*__ksec2StatePreInitUnlocked__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // virtual inherited (engstate) base (engstate)
    NV_STATUS (*__ksec2StateInitLocked__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // virtual inherited (engstate) base (engstate)
    NV_STATUS (*__ksec2StateInitUnlocked__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // virtual inherited (engstate) base (engstate)
    NV_STATUS (*__ksec2StatePreLoad__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NvU32);  // virtual inherited (engstate) base (engstate)
    NV_STATUS (*__ksec2StatePostLoad__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NvU32);  // virtual inherited (engstate) base (engstate)
    NV_STATUS (*__ksec2StatePreUnload__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NvU32);  // virtual inherited (engstate) base (engstate)
    NV_STATUS (*__ksec2StatePostUnload__)(struct OBJGPU *, struct KernelSec2 * /*this*/, NvU32);  // virtual inherited (engstate) base (engstate)
    NvBool (*__ksec2IsPresent__)(struct OBJGPU *, struct KernelSec2 * /*this*/);  // virtual inherited (engstate) base (engstate)
    NvBool (*__ksec2ClearInterrupt__)(struct OBJGPU *, struct KernelSec2 * /*this*/, IntrServiceClearInterruptArguments *);  // virtual inherited (intrserv) base (intrserv)
    NvU32 (*__ksec2ServiceInterrupt__)(struct OBJGPU *, struct KernelSec2 * /*this*/, IntrServiceServiceInterruptArguments *);  // virtual inherited (intrserv) base (intrserv)
    NvBool (*__ksec2Configured__)(struct KernelSec2 * /*this*/);  // virtual inherited (kcrashcatEngine) base (kflcn)
    void (*__ksec2Unload__)(struct KernelSec2 * /*this*/);  // virtual inherited (kcrashcatEngine) base (kflcn)
    void (*__ksec2Vprintf__)(struct KernelSec2 * /*this*/, NvBool, const char *, va_list);  // virtual inherited (kcrashcatEngine) base (kflcn)
    NvU32 (*__ksec2PriRead__)(struct KernelSec2 * /*this*/, NvU32);  // virtual inherited (kcrashcatEngine) base (kflcn)
    void (*__ksec2PriWrite__)(struct KernelSec2 * /*this*/, NvU32, NvU32);  // virtual inherited (kcrashcatEngine) base (kflcn)
    void * (*__ksec2MapBufferDescriptor__)(struct KernelSec2 * /*this*/, CrashCatBufferDescriptor *);  // virtual inherited (kcrashcatEngine) base (kflcn)
    void (*__ksec2UnmapBufferDescriptor__)(struct KernelSec2 * /*this*/, CrashCatBufferDescriptor *);  // virtual inherited (kcrashcatEngine) base (kflcn)
    void (*__ksec2SyncBufferDescriptor__)(struct KernelSec2 * /*this*/, CrashCatBufferDescriptor *, NvU32, NvU32);  // virtual inherited (kcrashcatEngine) base (kflcn)
    void (*__ksec2ReadEmem__)(struct KernelSec2 * /*this*/, NvU64, NvU64, void *);  // virtual halified (singleton) inherited (kcrashcatEngine) base (kflcn)
};

// Metadata with per-class RTTI and vtable with ancestor(s)
struct NVOC_METADATA__KernelSec2 {
    const struct NVOC_RTTI rtti;
    const struct NVOC_METADATA__OBJENGSTATE metadata__OBJENGSTATE;
    const struct NVOC_METADATA__IntrService metadata__IntrService;
    const struct NVOC_METADATA__KernelFalcon metadata__KernelFalcon;
    const struct NVOC_VTABLE__KernelSec2 vtable;
};

#ifndef __NVOC_CLASS_KernelSec2_TYPEDEF__
#define __NVOC_CLASS_KernelSec2_TYPEDEF__
typedef struct KernelSec2 KernelSec2;
#endif /* __NVOC_CLASS_KernelSec2_TYPEDEF__ */

#ifndef __nvoc_class_id_KernelSec2
#define __nvoc_class_id_KernelSec2 0x2f36c9
#endif /* __nvoc_class_id_KernelSec2 */

// Casting support
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelSec2;

#define __staticCast_KernelSec2(pThis) \
    ((pThis)->__nvoc_pbase_KernelSec2)

#ifdef __nvoc_kernel_sec2_h_disabled
#define __dynamicCast_KernelSec2(pThis) ((KernelSec2*) NULL)
#else //__nvoc_kernel_sec2_h_disabled
#define __dynamicCast_KernelSec2(pThis) \
    ((KernelSec2*) __nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelSec2)))
#endif //__nvoc_kernel_sec2_h_disabled

// Property macros
#define PDB_PROP_KSEC2_GSP_MODE_GSPRM_BASE_CAST
#define PDB_PROP_KSEC2_GSP_MODE_GSPRM_BASE_NAME PDB_PROP_KSEC2_GSP_MODE_GSPRM
#define PDB_PROP_KSEC2_BOOT_GSPFMC_BASE_CAST
#define PDB_PROP_KSEC2_BOOT_GSPFMC_BASE_NAME PDB_PROP_KSEC2_BOOT_GSPFMC
#define PDB_PROP_KSEC2_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
#define PDB_PROP_KSEC2_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
#define PDB_PROP_KSEC2_BOOT_COMMAND_OK_BASE_CAST
#define PDB_PROP_KSEC2_BOOT_COMMAND_OK_BASE_NAME PDB_PROP_KSEC2_BOOT_COMMAND_OK
#define PDB_PROP_KSEC2_DISABLE_GSPFMC_BASE_CAST
#define PDB_PROP_KSEC2_DISABLE_GSPFMC_BASE_NAME PDB_PROP_KSEC2_DISABLE_GSPFMC
#define PDB_PROP_KSEC2_RM_BOOT_GSP_BASE_CAST
#define PDB_PROP_KSEC2_RM_BOOT_GSP_BASE_NAME PDB_PROP_KSEC2_RM_BOOT_GSP


NV_STATUS __nvoc_objCreateDynamic_KernelSec2(KernelSec2**, Dynamic*, NvU32, va_list);

NV_STATUS __nvoc_objCreate_KernelSec2(KernelSec2**, Dynamic*, NvU32);
#define __objCreate_KernelSec2(ppNewObj, pParent, createFlags) \
    __nvoc_objCreate_KernelSec2((ppNewObj), staticCast((pParent), Dynamic), (createFlags))


// Wrapper macros for implementation functions
void ksec2Destruct_IMPL(struct KernelSec2 *pKernelSec2);
#define __nvoc_ksec2Destruct(pKernelSec2) ksec2Destruct_IMPL(pKernelSec2)

void ksec2CleanupBootState_IMPL(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);
#ifdef __nvoc_kernel_sec2_h_disabled
static inline void ksec2CleanupBootState(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    NV_ASSERT_FAILED_PRECOMP("KernelSec2 was disabled!");
}
#else // __nvoc_kernel_sec2_h_disabled
#define ksec2CleanupBootState(pGpu, pKernelSec2) ksec2CleanupBootState_IMPL(pGpu, pKernelSec2)
#endif // __nvoc_kernel_sec2_h_disabled

void ksec2ReleaseProxyImage_IMPL(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);
#ifdef __nvoc_kernel_sec2_h_disabled
static inline void ksec2ReleaseProxyImage(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    NV_ASSERT_FAILED_PRECOMP("KernelSec2 was disabled!");
}
#else // __nvoc_kernel_sec2_h_disabled
#define ksec2ReleaseProxyImage(pGpu, pKernelSec2) ksec2ReleaseProxyImage_IMPL(pGpu, pKernelSec2)
#endif // __nvoc_kernel_sec2_h_disabled

NV_STATUS ksec2SendAndReadMessage_IMPL(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pPayload, NvU32 size, NvU32 nvdmType, NvU8 *pResponsePayload, NvU32 responseBufferSize);
#ifdef __nvoc_kernel_sec2_h_disabled
static inline NV_STATUS ksec2SendAndReadMessage(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pPayload, NvU32 size, NvU32 nvdmType, NvU8 *pResponsePayload, NvU32 responseBufferSize) {
    NV_ASSERT_FAILED_PRECOMP("KernelSec2 was disabled!");
    return NV_ERR_NOT_SUPPORTED;
}
#else // __nvoc_kernel_sec2_h_disabled
#define ksec2SendAndReadMessage(pGpu, pKernelSec2, pPayload, size, nvdmType, pResponsePayload, responseBufferSize) ksec2SendAndReadMessage_IMPL(pGpu, pKernelSec2, pPayload, size, nvdmType, pResponsePayload, responseBufferSize)
#endif // __nvoc_kernel_sec2_h_disabled

NV_STATUS ksec2PollForCanSend_IMPL(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);
#ifdef __nvoc_kernel_sec2_h_disabled
static inline NV_STATUS ksec2PollForCanSend(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    NV_ASSERT_FAILED_PRECOMP("KernelSec2 was disabled!");
    return NV_ERR_NOT_SUPPORTED;
}
#else // __nvoc_kernel_sec2_h_disabled
#define ksec2PollForCanSend(pGpu, pKernelSec2) ksec2PollForCanSend_IMPL(pGpu, pKernelSec2)
#endif // __nvoc_kernel_sec2_h_disabled

NV_STATUS ksec2PollForResponse_IMPL(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);
#ifdef __nvoc_kernel_sec2_h_disabled
static inline NV_STATUS ksec2PollForResponse(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    NV_ASSERT_FAILED_PRECOMP("KernelSec2 was disabled!");
    return NV_ERR_NOT_SUPPORTED;
}
#else // __nvoc_kernel_sec2_h_disabled
#define ksec2PollForResponse(pGpu, pKernelSec2) ksec2PollForResponse_IMPL(pGpu, pKernelSec2)
#endif // __nvoc_kernel_sec2_h_disabled

NV_STATUS ksec2SendMessage_IMPL(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pPayload, NvU32 size, NvU32 nvdmType);
#ifdef __nvoc_kernel_sec2_h_disabled
static inline NV_STATUS ksec2SendMessage(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pPayload, NvU32 size, NvU32 nvdmType) {
    NV_ASSERT_FAILED_PRECOMP("KernelSec2 was disabled!");
    return NV_ERR_NOT_SUPPORTED;
}
#else // __nvoc_kernel_sec2_h_disabled
#define ksec2SendMessage(pGpu, pKernelSec2, pPayload, size, nvdmType) ksec2SendMessage_IMPL(pGpu, pKernelSec2, pPayload, size, nvdmType)
#endif // __nvoc_kernel_sec2_h_disabled


// Wrapper macros for halified functions
#define ksec2ConstructEngine_FNPTR(pKernelSec2) pKernelSec2->__nvoc_metadata_ptr->vtable.__ksec2ConstructEngine__
#define ksec2ConstructEngine(pGpu, pKernelSec2, arg3) ksec2ConstructEngine_DISPATCH(pGpu, pKernelSec2, arg3)
#define ksec2ConstructEngine_HAL(pGpu, pKernelSec2, arg3) ksec2ConstructEngine_DISPATCH(pGpu, pKernelSec2, arg3)
#define ksec2RegisterIntrService_FNPTR(pKernelSec2) pKernelSec2->__nvoc_metadata_ptr->vtable.__ksec2RegisterIntrService__
#define ksec2RegisterIntrService(pGpu, pKernelSec2, pRecords) ksec2RegisterIntrService_DISPATCH(pGpu, pKernelSec2, pRecords)
#define ksec2RegisterIntrService_HAL(pGpu, pKernelSec2, pRecords) ksec2RegisterIntrService_DISPATCH(pGpu, pKernelSec2, pRecords)
#define ksec2ServiceNotificationInterrupt_FNPTR(arg_this) arg_this->__nvoc_metadata_ptr->vtable.__ksec2ServiceNotificationInterrupt__
#define ksec2ServiceNotificationInterrupt(arg1, arg_this, arg3) ksec2ServiceNotificationInterrupt_DISPATCH(arg1, arg_this, arg3)
#define ksec2ServiceNotificationInterrupt_HAL(arg1, arg_this, arg3) ksec2ServiceNotificationInterrupt_DISPATCH(arg1, arg_this, arg3)
#define ksec2StateUnload_FNPTR(pKernelSec2) pKernelSec2->__nvoc_metadata_ptr->vtable.__ksec2StateUnload__
#define ksec2StateUnload(pGpu, pKernelSec2, flags) ksec2StateUnload_DISPATCH(pGpu, pKernelSec2, flags)
#define ksec2ConfigureFalcon_FNPTR(pKernelSec2) pKernelSec2->__ksec2ConfigureFalcon__
#define ksec2ConfigureFalcon(pGpu, pKernelSec2) ksec2ConfigureFalcon_DISPATCH(pGpu, pKernelSec2)
#define ksec2ConfigureFalcon_HAL(pGpu, pKernelSec2) ksec2ConfigureFalcon_DISPATCH(pGpu, pKernelSec2)
#define ksec2ResetHw_FNPTR(pKernelSec2) pKernelSec2->__ksec2ResetHw__
#define ksec2ResetHw(pGpu, pKernelSec2) ksec2ResetHw_DISPATCH(pGpu, pKernelSec2)
#define ksec2ResetHw_HAL(pGpu, pKernelSec2) ksec2ResetHw_DISPATCH(pGpu, pKernelSec2)
#define ksec2StateLoad_FNPTR(pKernelSec2) pKernelSec2->__ksec2StateLoad__
#define ksec2StateLoad(pGpu, pKernelSec2, arg3) ksec2StateLoad_DISPATCH(pGpu, pKernelSec2, arg3)
#define ksec2StateLoad_HAL(pGpu, pKernelSec2, arg3) ksec2StateLoad_DISPATCH(pGpu, pKernelSec2, arg3)
#define ksec2StateDestroy_FNPTR(pKernelSec2) pKernelSec2->__ksec2StateDestroy__
#define ksec2StateDestroy(pGpu, pKernelSec2) ksec2StateDestroy_DISPATCH(pGpu, pKernelSec2)
#define ksec2StateDestroy_HAL(pGpu, pKernelSec2) ksec2StateDestroy_DISPATCH(pGpu, pKernelSec2)
#define ksec2ReadUcodeFuseVersion_FNPTR(pKernelSec2) pKernelSec2->__ksec2ReadUcodeFuseVersion__
#define ksec2ReadUcodeFuseVersion(pGpu, pKernelSec2, ucodeId) ksec2ReadUcodeFuseVersion_DISPATCH(pGpu, pKernelSec2, ucodeId)
#define ksec2ReadUcodeFuseVersion_HAL(pGpu, pKernelSec2, ucodeId) ksec2ReadUcodeFuseVersion_DISPATCH(pGpu, pKernelSec2, ucodeId)
#define ksec2GetBinArchiveBlUcode_FNPTR(pKernelSec2) pKernelSec2->__ksec2GetBinArchiveBlUcode__
#define ksec2GetBinArchiveBlUcode(pGpu, pKernelSec2) ksec2GetBinArchiveBlUcode_DISPATCH(pGpu, pKernelSec2)
#define ksec2GetBinArchiveBlUcode_HAL(pGpu, pKernelSec2) ksec2GetBinArchiveBlUcode_DISPATCH(pGpu, pKernelSec2)
#define ksec2GetGenericBlUcode_FNPTR(pKernelSec2) pKernelSec2->__ksec2GetGenericBlUcode__
#define ksec2GetGenericBlUcode(pGpu, pKernelSec2, ppDesc, ppImg) ksec2GetGenericBlUcode_DISPATCH(pGpu, pKernelSec2, ppDesc, ppImg)
#define ksec2GetGenericBlUcode_HAL(pGpu, pKernelSec2, ppDesc, ppImg) ksec2GetGenericBlUcode_DISPATCH(pGpu, pKernelSec2, ppDesc, ppImg)
#define ksec2GetBinArchiveSecurescrubUcode_FNPTR(pKernelSec2) pKernelSec2->__ksec2GetBinArchiveSecurescrubUcode__
#define ksec2GetBinArchiveSecurescrubUcode(pGpu, pKernelSec2) ksec2GetBinArchiveSecurescrubUcode_DISPATCH(pGpu, pKernelSec2)
#define ksec2GetBinArchiveSecurescrubUcode_HAL(pGpu, pKernelSec2) ksec2GetBinArchiveSecurescrubUcode_DISPATCH(pGpu, pKernelSec2)
#define ksec2SetupGspImages_FNPTR(pKernelSec2) pKernelSec2->__ksec2SetupGspImages__
#define ksec2SetupGspImages(pGpu, pKernelSec2, pCotPayload) ksec2SetupGspImages_DISPATCH(pGpu, pKernelSec2, pCotPayload)
#define ksec2SetupGspImages_HAL(pGpu, pKernelSec2, pCotPayload) ksec2SetupGspImages_DISPATCH(pGpu, pKernelSec2, pCotPayload)
#define ksec2PrepareBootCommands_FNPTR(pKernelSec2) pKernelSec2->__ksec2PrepareBootCommands__
#define ksec2PrepareBootCommands(pGpu, pKernelSec2) ksec2PrepareBootCommands_DISPATCH(pGpu, pKernelSec2)
#define ksec2PrepareBootCommands_HAL(pGpu, pKernelSec2) ksec2PrepareBootCommands_DISPATCH(pGpu, pKernelSec2)
#define ksec2SafeToSendBootCommands_FNPTR(pKernelSec2) pKernelSec2->__ksec2SafeToSendBootCommands__
#define ksec2SafeToSendBootCommands(pGpu, pKernelSec2) ksec2SafeToSendBootCommands_DISPATCH(pGpu, pKernelSec2)
#define ksec2SafeToSendBootCommands_HAL(pGpu, pKernelSec2) ksec2SafeToSendBootCommands_DISPATCH(pGpu, pKernelSec2)
#define ksec2SendBootCommands_FNPTR(pKernelSec2) pKernelSec2->__ksec2SendBootCommands__
#define ksec2SendBootCommands(pGpu, pKernelSec2) ksec2SendBootCommands_DISPATCH(pGpu, pKernelSec2)
#define ksec2SendBootCommands_HAL(pGpu, pKernelSec2) ksec2SendBootCommands_DISPATCH(pGpu, pKernelSec2)
#define ksec2PrepareAndSendBootCommands_FNPTR(pKernelSec2) pKernelSec2->__ksec2PrepareAndSendBootCommands__
#define ksec2PrepareAndSendBootCommands(pGpu, pKernelSec2) ksec2PrepareAndSendBootCommands_DISPATCH(pGpu, pKernelSec2)
#define ksec2PrepareAndSendBootCommands_HAL(pGpu, pKernelSec2) ksec2PrepareAndSendBootCommands_DISPATCH(pGpu, pKernelSec2)
#define ksec2CanSendPacket_FNPTR(pKernelSec2) pKernelSec2->__ksec2CanSendPacket__
#define ksec2CanSendPacket(pGpu, pKernelSec2) ksec2CanSendPacket_DISPATCH(pGpu, pKernelSec2)
#define ksec2CanSendPacket_HAL(pGpu, pKernelSec2) ksec2CanSendPacket_DISPATCH(pGpu, pKernelSec2)
#define ksec2GetMaxSendPacketSize_FNPTR(pKernelSec2) pKernelSec2->__ksec2GetMaxSendPacketSize__
#define ksec2GetMaxSendPacketSize(pGpu, pKernelSec2) ksec2GetMaxSendPacketSize_DISPATCH(pGpu, pKernelSec2)
#define ksec2GetMaxSendPacketSize_HAL(pGpu, pKernelSec2) ksec2GetMaxSendPacketSize_DISPATCH(pGpu, pKernelSec2)
#define ksec2CreateNvdmHeader_FNPTR(pKernelSec2) pKernelSec2->__ksec2CreateNvdmHeader__
#define ksec2CreateNvdmHeader(pGpu, pKernelSec2, nvdmType) ksec2CreateNvdmHeader_DISPATCH(pGpu, pKernelSec2, nvdmType)
#define ksec2CreateNvdmHeader_HAL(pGpu, pKernelSec2, nvdmType) ksec2CreateNvdmHeader_DISPATCH(pGpu, pKernelSec2, nvdmType)
#define ksec2CreateMctpHeader_FNPTR(pKernelSec2) pKernelSec2->__ksec2CreateMctpHeader__
#define ksec2CreateMctpHeader(pGpu, pKernelSec2, som, eom, seid, seq) ksec2CreateMctpHeader_DISPATCH(pGpu, pKernelSec2, som, eom, seid, seq)
#define ksec2CreateMctpHeader_HAL(pGpu, pKernelSec2, som, eom, seid, seq) ksec2CreateMctpHeader_DISPATCH(pGpu, pKernelSec2, som, eom, seid, seq)
#define ksec2SendPacket_FNPTR(pKernelSec2) pKernelSec2->__ksec2SendPacket__
#define ksec2SendPacket(pGpu, pKernelSec2, pPacket, packetSize) ksec2SendPacket_DISPATCH(pGpu, pKernelSec2, pPacket, packetSize)
#define ksec2SendPacket_HAL(pGpu, pKernelSec2, pPacket, packetSize) ksec2SendPacket_DISPATCH(pGpu, pKernelSec2, pPacket, packetSize)
#define ksec2WaitForGspTargetMaskReleased_FNPTR(pKernelSec2) pKernelSec2->__ksec2WaitForGspTargetMaskReleased__
#define ksec2WaitForGspTargetMaskReleased(pGpu, pKernelSec2) ksec2WaitForGspTargetMaskReleased_DISPATCH(pGpu, pKernelSec2)
#define ksec2WaitForGspTargetMaskReleased_HAL(pGpu, pKernelSec2) ksec2WaitForGspTargetMaskReleased_DISPATCH(pGpu, pKernelSec2)
#define ksec2ReadPacket_FNPTR(pKernelSec2) pKernelSec2->__ksec2ReadPacket__
#define ksec2ReadPacket(pGpu, pKernelSec2, pPacket, maxPacketSize, bytesRead) ksec2ReadPacket_DISPATCH(pGpu, pKernelSec2, pPacket, maxPacketSize, bytesRead)
#define ksec2ReadPacket_HAL(pGpu, pKernelSec2, pPacket, maxPacketSize, bytesRead) ksec2ReadPacket_DISPATCH(pGpu, pKernelSec2, pPacket, maxPacketSize, bytesRead)
#define ksec2IsResponseAvailable_FNPTR(pKernelSec2) pKernelSec2->__ksec2IsResponseAvailable__
#define ksec2IsResponseAvailable(pGpu, pKernelSec2) ksec2IsResponseAvailable_DISPATCH(pGpu, pKernelSec2)
#define ksec2IsResponseAvailable_HAL(pGpu, pKernelSec2) ksec2IsResponseAvailable_DISPATCH(pGpu, pKernelSec2)
#define ksec2GspFmcIsEnforced_FNPTR(pKernelSec2) pKernelSec2->__ksec2GspFmcIsEnforced__
#define ksec2GspFmcIsEnforced(pGpu, pKernelSec2) ksec2GspFmcIsEnforced_DISPATCH(pGpu, pKernelSec2)
#define ksec2GspFmcIsEnforced_HAL(pGpu, pKernelSec2) ksec2GspFmcIsEnforced_DISPATCH(pGpu, pKernelSec2)
#define ksec2WaitForSecureBoot_FNPTR(pKernelSec2) pKernelSec2->__ksec2WaitForSecureBoot__
#define ksec2WaitForSecureBoot(pGpu, pKernelSec2) ksec2WaitForSecureBoot_DISPATCH(pGpu, pKernelSec2)
#define ksec2WaitForSecureBoot_HAL(pGpu, pKernelSec2) ksec2WaitForSecureBoot_DISPATCH(pGpu, pKernelSec2)
#define ksec2GetMaxRecvPacketSize_FNPTR(pKernelSec2) pKernelSec2->__ksec2GetMaxRecvPacketSize__
#define ksec2GetMaxRecvPacketSize(pGpu, pKernelSec2) ksec2GetMaxRecvPacketSize_DISPATCH(pGpu, pKernelSec2)
#define ksec2GetMaxRecvPacketSize_HAL(pGpu, pKernelSec2) ksec2GetMaxRecvPacketSize_DISPATCH(pGpu, pKernelSec2)
#define ksec2NvdmToSeid_FNPTR(pKernelSec2) pKernelSec2->__ksec2NvdmToSeid__
#define ksec2NvdmToSeid(pGpu, pKernelSec2, nvdmType) ksec2NvdmToSeid_DISPATCH(pGpu, pKernelSec2, nvdmType)
#define ksec2NvdmToSeid_HAL(pGpu, pKernelSec2, nvdmType) ksec2NvdmToSeid_DISPATCH(pGpu, pKernelSec2, nvdmType)
#define ksec2GetPacketInfo_FNPTR(pKernelSec2) pKernelSec2->__ksec2GetPacketInfo__
#define ksec2GetPacketInfo(pGpu, pKernelSec2, pBuffer, size, pPacketState, pTag) ksec2GetPacketInfo_DISPATCH(pGpu, pKernelSec2, pBuffer, size, pPacketState, pTag)
#define ksec2GetPacketInfo_HAL(pGpu, pKernelSec2, pBuffer, size, pPacketState, pTag) ksec2GetPacketInfo_DISPATCH(pGpu, pKernelSec2, pBuffer, size, pPacketState, pTag)
#define ksec2ValidateMctpPayloadHeader_FNPTR(pKernelSec2) pKernelSec2->__ksec2ValidateMctpPayloadHeader__
#define ksec2ValidateMctpPayloadHeader(pGpu, pKernelSec2, pBuffer, size) ksec2ValidateMctpPayloadHeader_DISPATCH(pGpu, pKernelSec2, pBuffer, size)
#define ksec2ValidateMctpPayloadHeader_HAL(pGpu, pKernelSec2, pBuffer, size) ksec2ValidateMctpPayloadHeader_DISPATCH(pGpu, pKernelSec2, pBuffer, size)
#define ksec2ProcessNvdmMessage_FNPTR(pKernelSec2) pKernelSec2->__ksec2ProcessNvdmMessage__
#define ksec2ProcessNvdmMessage(pGpu, pKernelSec2, pBuffer, size) ksec2ProcessNvdmMessage_DISPATCH(pGpu, pKernelSec2, pBuffer, size)
#define ksec2ProcessNvdmMessage_HAL(pGpu, pKernelSec2, pBuffer, size) ksec2ProcessNvdmMessage_DISPATCH(pGpu, pKernelSec2, pBuffer, size)
#define ksec2ProcessCommandResponse_FNPTR(pKernelSec2) pKernelSec2->__ksec2ProcessCommandResponse__
#define ksec2ProcessCommandResponse(pGpu, pKernelSec2, pBuffer, size) ksec2ProcessCommandResponse_DISPATCH(pGpu, pKernelSec2, pBuffer, size)
#define ksec2ProcessCommandResponse_HAL(pGpu, pKernelSec2, pBuffer, size) ksec2ProcessCommandResponse_DISPATCH(pGpu, pKernelSec2, pBuffer, size)
#define ksec2DumpDebugState_FNPTR(pKernelSec2) pKernelSec2->__ksec2DumpDebugState__
#define ksec2DumpDebugState(pGpu, pKernelSec2) ksec2DumpDebugState_DISPATCH(pGpu, pKernelSec2)
#define ksec2DumpDebugState_HAL(pGpu, pKernelSec2) ksec2DumpDebugState_DISPATCH(pGpu, pKernelSec2)
#define ksec2ErrorCode2NvStatusMap_FNPTR(pKernelSec2) pKernelSec2->__ksec2ErrorCode2NvStatusMap__
#define ksec2ErrorCode2NvStatusMap(pGpu, pKernelSec2, errorCode) ksec2ErrorCode2NvStatusMap_DISPATCH(pGpu, pKernelSec2, errorCode)
#define ksec2ErrorCode2NvStatusMap_HAL(pGpu, pKernelSec2, errorCode) ksec2ErrorCode2NvStatusMap_DISPATCH(pGpu, pKernelSec2, errorCode)
#define ksec2InitMissing_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateInitMissing__
#define ksec2InitMissing(pGpu, pEngstate) ksec2InitMissing_DISPATCH(pGpu, pEngstate)
#define ksec2StatePreInitLocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreInitLocked__
#define ksec2StatePreInitLocked(pGpu, pEngstate) ksec2StatePreInitLocked_DISPATCH(pGpu, pEngstate)
#define ksec2StatePreInitUnlocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreInitUnlocked__
#define ksec2StatePreInitUnlocked(pGpu, pEngstate) ksec2StatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
#define ksec2StateInitLocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStateInitLocked__
#define ksec2StateInitLocked(pGpu, pEngstate) ksec2StateInitLocked_DISPATCH(pGpu, pEngstate)
#define ksec2StateInitUnlocked_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStateInitUnlocked__
#define ksec2StateInitUnlocked(pGpu, pEngstate) ksec2StateInitUnlocked_DISPATCH(pGpu, pEngstate)
#define ksec2StatePreLoad_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreLoad__
#define ksec2StatePreLoad(pGpu, pEngstate, arg3) ksec2StatePreLoad_DISPATCH(pGpu, pEngstate, arg3)
#define ksec2StatePostLoad_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePostLoad__
#define ksec2StatePostLoad(pGpu, pEngstate, arg3) ksec2StatePostLoad_DISPATCH(pGpu, pEngstate, arg3)
#define ksec2StatePreUnload_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePreUnload__
#define ksec2StatePreUnload(pGpu, pEngstate, arg3) ksec2StatePreUnload_DISPATCH(pGpu, pEngstate, arg3)
#define ksec2StatePostUnload_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateStatePostUnload__
#define ksec2StatePostUnload(pGpu, pEngstate, arg3) ksec2StatePostUnload_DISPATCH(pGpu, pEngstate, arg3)
#define ksec2IsPresent_FNPTR(pEngstate) pEngstate->__nvoc_base_OBJENGSTATE.__nvoc_metadata_ptr->vtable.__engstateIsPresent__
#define ksec2IsPresent(pGpu, pEngstate) ksec2IsPresent_DISPATCH(pGpu, pEngstate)
#define ksec2ClearInterrupt_FNPTR(pIntrService) pIntrService->__nvoc_base_IntrService.__nvoc_metadata_ptr->vtable.__intrservClearInterrupt__
#define ksec2ClearInterrupt(pGpu, pIntrService, pParams) ksec2ClearInterrupt_DISPATCH(pGpu, pIntrService, pParams)
#define ksec2ServiceInterrupt_FNPTR(pIntrService) pIntrService->__nvoc_base_IntrService.__nvoc_metadata_ptr->vtable.__intrservServiceInterrupt__
#define ksec2ServiceInterrupt(pGpu, pIntrService, pParams) ksec2ServiceInterrupt_DISPATCH(pGpu, pIntrService, pParams)
#define ksec2RegRead_FNPTR(pKernelFlcn) pKernelFlcn->__nvoc_base_KernelFalcon.__kflcnRegRead__
#define ksec2RegRead(pGpu, pKernelFlcn, offset) ksec2RegRead_DISPATCH(pGpu, pKernelFlcn, offset)
#define ksec2RegRead_HAL(pGpu, pKernelFlcn, offset) ksec2RegRead_DISPATCH(pGpu, pKernelFlcn, offset)
#define ksec2RegWrite_FNPTR(pKernelFlcn) pKernelFlcn->__nvoc_base_KernelFalcon.__kflcnRegWrite__
#define ksec2RegWrite(pGpu, pKernelFlcn, offset, data) ksec2RegWrite_DISPATCH(pGpu, pKernelFlcn, offset, data)
#define ksec2RegWrite_HAL(pGpu, pKernelFlcn, offset, data) ksec2RegWrite_DISPATCH(pGpu, pKernelFlcn, offset, data)
#define ksec2MaskDmemAddr_FNPTR(pKernelFlcn) pKernelFlcn->__nvoc_base_KernelFalcon.__kflcnMaskDmemAddr__
#define ksec2MaskDmemAddr(pGpu, pKernelFlcn, addr) ksec2MaskDmemAddr_DISPATCH(pGpu, pKernelFlcn, addr)
#define ksec2MaskDmemAddr_HAL(pGpu, pKernelFlcn, addr) ksec2MaskDmemAddr_DISPATCH(pGpu, pKernelFlcn, addr)
#define ksec2Configured_FNPTR(arg_this) arg_this->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_metadata_ptr->vtable.__kcrashcatEngineConfigured__
#define ksec2Configured(arg_this) ksec2Configured_DISPATCH(arg_this)
#define ksec2Unload_FNPTR(arg_this) arg_this->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_metadata_ptr->vtable.__kcrashcatEngineUnload__
#define ksec2Unload(arg_this) ksec2Unload_DISPATCH(arg_this)
#define ksec2Vprintf_FNPTR(arg_this) arg_this->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_metadata_ptr->vtable.__kcrashcatEngineVprintf__
#define ksec2Vprintf(arg_this, bReportStart, fmt, args) ksec2Vprintf_DISPATCH(arg_this, bReportStart, fmt, args)
#define ksec2PriRead_FNPTR(arg_this) arg_this->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_metadata_ptr->vtable.__kcrashcatEnginePriRead__
#define ksec2PriRead(arg_this, offset) ksec2PriRead_DISPATCH(arg_this, offset)
#define ksec2PriWrite_FNPTR(arg_this) arg_this->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_metadata_ptr->vtable.__kcrashcatEnginePriWrite__
#define ksec2PriWrite(arg_this, offset, data) ksec2PriWrite_DISPATCH(arg_this, offset, data)
#define ksec2MapBufferDescriptor_FNPTR(arg_this) arg_this->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_metadata_ptr->vtable.__kcrashcatEngineMapBufferDescriptor__
#define ksec2MapBufferDescriptor(arg_this, pBufDesc) ksec2MapBufferDescriptor_DISPATCH(arg_this, pBufDesc)
#define ksec2UnmapBufferDescriptor_FNPTR(arg_this) arg_this->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_metadata_ptr->vtable.__kcrashcatEngineUnmapBufferDescriptor__
#define ksec2UnmapBufferDescriptor(arg_this, pBufDesc) ksec2UnmapBufferDescriptor_DISPATCH(arg_this, pBufDesc)
#define ksec2SyncBufferDescriptor_FNPTR(arg_this) arg_this->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_metadata_ptr->vtable.__kcrashcatEngineSyncBufferDescriptor__
#define ksec2SyncBufferDescriptor(arg_this, pBufDesc, offset, size) ksec2SyncBufferDescriptor_DISPATCH(arg_this, pBufDesc, offset, size)
#define ksec2ReadDmem_FNPTR(arg_this) arg_this->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__kcrashcatEngineReadDmem__
#define ksec2ReadDmem(arg_this, offset, size, pBuf) ksec2ReadDmem_DISPATCH(arg_this, offset, size, pBuf)
#define ksec2ReadDmem_HAL(arg_this, offset, size, pBuf) ksec2ReadDmem_DISPATCH(arg_this, offset, size, pBuf)
#define ksec2ReadEmem_FNPTR(arg_this) arg_this->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_metadata_ptr->vtable.__kcrashcatEngineReadEmem__
#define ksec2ReadEmem(arg_this, offset, size, pBuf) ksec2ReadEmem_DISPATCH(arg_this, offset, size, pBuf)
#define ksec2ReadEmem_HAL(arg_this, offset, size, pBuf) ksec2ReadEmem_DISPATCH(arg_this, offset, size, pBuf)
#define ksec2GetScratchOffsets_FNPTR(arg_this) arg_this->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__kcrashcatEngineGetScratchOffsets__
#define ksec2GetScratchOffsets(arg_this, scratchGroupId) ksec2GetScratchOffsets_DISPATCH(arg_this, scratchGroupId)
#define ksec2GetScratchOffsets_HAL(arg_this, scratchGroupId) ksec2GetScratchOffsets_DISPATCH(arg_this, scratchGroupId)
#define ksec2GetWFL0Offset_FNPTR(arg_this) arg_this->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__kcrashcatEngineGetWFL0Offset__
#define ksec2GetWFL0Offset(arg_this) ksec2GetWFL0Offset_DISPATCH(arg_this)
#define ksec2GetWFL0Offset_HAL(arg_this) ksec2GetWFL0Offset_DISPATCH(arg_this)

// Dispatch functions
static inline NV_STATUS ksec2ConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, ENGDESCRIPTOR arg3) {
    return pKernelSec2->__nvoc_metadata_ptr->vtable.__ksec2ConstructEngine__(pGpu, pKernelSec2, arg3);
}

static inline void ksec2RegisterIntrService_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, IntrServiceRecord pRecords[179]) {
    pKernelSec2->__nvoc_metadata_ptr->vtable.__ksec2RegisterIntrService__(pGpu, pKernelSec2, pRecords);
}

static inline NV_STATUS ksec2ServiceNotificationInterrupt_DISPATCH(struct OBJGPU *arg1, struct KernelSec2 *arg_this, IntrServiceServiceNotificationInterruptArguments *arg3) {
    return arg_this->__nvoc_metadata_ptr->vtable.__ksec2ServiceNotificationInterrupt__(arg1, arg_this, arg3);
}

static inline NV_STATUS ksec2StateUnload_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU32 flags) {
    return pKernelSec2->__nvoc_metadata_ptr->vtable.__ksec2StateUnload__(pGpu, pKernelSec2, flags);
}

static inline NV_STATUS ksec2ConfigureFalcon_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return pKernelSec2->__ksec2ConfigureFalcon__(pGpu, pKernelSec2);
}

static inline NV_STATUS ksec2ResetHw_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return pKernelSec2->__ksec2ResetHw__(pGpu, pKernelSec2);
}

static inline NV_STATUS ksec2StateLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU32 arg3) {
    return pKernelSec2->__ksec2StateLoad__(pGpu, pKernelSec2, arg3);
}

static inline void ksec2StateDestroy_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    pKernelSec2->__ksec2StateDestroy__(pGpu, pKernelSec2);
}

static inline NvU32 ksec2ReadUcodeFuseVersion_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU32 ucodeId) {
    return pKernelSec2->__ksec2ReadUcodeFuseVersion__(pGpu, pKernelSec2, ucodeId);
}

static inline const BINDATA_ARCHIVE * ksec2GetBinArchiveBlUcode_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return pKernelSec2->__ksec2GetBinArchiveBlUcode__(pGpu, pKernelSec2);
}

static inline NV_STATUS ksec2GetGenericBlUcode_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, const RM_FLCN_BL_DESC **ppDesc, const NvU8 **ppImg) {
    return pKernelSec2->__ksec2GetGenericBlUcode__(pGpu, pKernelSec2, ppDesc, ppImg);
}

static inline const BINDATA_ARCHIVE * ksec2GetBinArchiveSecurescrubUcode_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return pKernelSec2->__ksec2GetBinArchiveSecurescrubUcode__(pGpu, pKernelSec2);
}

static inline NV_STATUS ksec2SetupGspImages_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NVDM_PAYLOAD_COT *pCotPayload) {
    return pKernelSec2->__ksec2SetupGspImages__(pGpu, pKernelSec2, pCotPayload);
}

static inline NV_STATUS ksec2PrepareBootCommands_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return pKernelSec2->__ksec2PrepareBootCommands__(pGpu, pKernelSec2);
}

static inline NV_STATUS ksec2SafeToSendBootCommands_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return pKernelSec2->__ksec2SafeToSendBootCommands__(pGpu, pKernelSec2);
}

static inline NV_STATUS ksec2SendBootCommands_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return pKernelSec2->__ksec2SendBootCommands__(pGpu, pKernelSec2);
}

static inline NV_STATUS ksec2PrepareAndSendBootCommands_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return pKernelSec2->__ksec2PrepareAndSendBootCommands__(pGpu, pKernelSec2);
}

static inline NvBool ksec2CanSendPacket_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return pKernelSec2->__ksec2CanSendPacket__(pGpu, pKernelSec2);
}

static inline NvU32 ksec2GetMaxSendPacketSize_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return pKernelSec2->__ksec2GetMaxSendPacketSize__(pGpu, pKernelSec2);
}

static inline NvU32 ksec2CreateNvdmHeader_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU32 nvdmType) {
    return pKernelSec2->__ksec2CreateNvdmHeader__(pGpu, pKernelSec2, nvdmType);
}

static inline NvU32 ksec2CreateMctpHeader_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 som, NvU8 eom, NvU8 seid, NvU8 seq) {
    return pKernelSec2->__ksec2CreateMctpHeader__(pGpu, pKernelSec2, som, eom, seid, seq);
}

static inline NV_STATUS ksec2SendPacket_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pPacket, NvU32 packetSize) {
    return pKernelSec2->__ksec2SendPacket__(pGpu, pKernelSec2, pPacket, packetSize);
}

static inline NV_STATUS ksec2WaitForGspTargetMaskReleased_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return pKernelSec2->__ksec2WaitForGspTargetMaskReleased__(pGpu, pKernelSec2);
}

static inline NV_STATUS ksec2ReadPacket_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pPacket, NvU32 maxPacketSize, NvU32 *bytesRead) {
    return pKernelSec2->__ksec2ReadPacket__(pGpu, pKernelSec2, pPacket, maxPacketSize, bytesRead);
}

static inline NvBool ksec2IsResponseAvailable_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return pKernelSec2->__ksec2IsResponseAvailable__(pGpu, pKernelSec2);
}

static inline NvBool ksec2GspFmcIsEnforced_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return pKernelSec2->__ksec2GspFmcIsEnforced__(pGpu, pKernelSec2);
}

static inline NV_STATUS ksec2WaitForSecureBoot_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return pKernelSec2->__ksec2WaitForSecureBoot__(pGpu, pKernelSec2);
}

static inline NvU32 ksec2GetMaxRecvPacketSize_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return pKernelSec2->__ksec2GetMaxRecvPacketSize__(pGpu, pKernelSec2);
}

static inline NvU8 ksec2NvdmToSeid_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 nvdmType) {
    return pKernelSec2->__ksec2NvdmToSeid__(pGpu, pKernelSec2, nvdmType);
}

static inline NV_STATUS ksec2GetPacketInfo_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pBuffer, NvU32 size, MCTP_PACKET_STATE *pPacketState, NvU8 *pTag) {
    return pKernelSec2->__ksec2GetPacketInfo__(pGpu, pKernelSec2, pBuffer, size, pPacketState, pTag);
}

static inline NV_STATUS ksec2ValidateMctpPayloadHeader_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pBuffer, NvU32 size) {
    return pKernelSec2->__ksec2ValidateMctpPayloadHeader__(pGpu, pKernelSec2, pBuffer, size);
}

static inline NV_STATUS ksec2ProcessNvdmMessage_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pBuffer, NvU32 size) {
    return pKernelSec2->__ksec2ProcessNvdmMessage__(pGpu, pKernelSec2, pBuffer, size);
}

static inline NV_STATUS ksec2ProcessCommandResponse_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pBuffer, NvU32 size) {
    return pKernelSec2->__ksec2ProcessCommandResponse__(pGpu, pKernelSec2, pBuffer, size);
}

static inline void ksec2DumpDebugState_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    pKernelSec2->__ksec2DumpDebugState__(pGpu, pKernelSec2);
}

static inline NV_STATUS ksec2ErrorCode2NvStatusMap_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU32 errorCode) {
    return pKernelSec2->__ksec2ErrorCode2NvStatusMap__(pGpu, pKernelSec2, errorCode);
}

static inline void ksec2InitMissing_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pEngstate) {
    pEngstate->__nvoc_metadata_ptr->vtable.__ksec2InitMissing__(pGpu, pEngstate);
}

static inline NV_STATUS ksec2StatePreInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pEngstate) {
    return pEngstate->__nvoc_metadata_ptr->vtable.__ksec2StatePreInitLocked__(pGpu, pEngstate);
}

static inline NV_STATUS ksec2StatePreInitUnlocked_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pEngstate) {
    return pEngstate->__nvoc_metadata_ptr->vtable.__ksec2StatePreInitUnlocked__(pGpu, pEngstate);
}

static inline NV_STATUS ksec2StateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pEngstate) {
    return pEngstate->__nvoc_metadata_ptr->vtable.__ksec2StateInitLocked__(pGpu, pEngstate);
}

static inline NV_STATUS ksec2StateInitUnlocked_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pEngstate) {
    return pEngstate->__nvoc_metadata_ptr->vtable.__ksec2StateInitUnlocked__(pGpu, pEngstate);
}

static inline NV_STATUS ksec2StatePreLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pEngstate, NvU32 arg3) {
    return pEngstate->__nvoc_metadata_ptr->vtable.__ksec2StatePreLoad__(pGpu, pEngstate, arg3);
}

static inline NV_STATUS ksec2StatePostLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pEngstate, NvU32 arg3) {
    return pEngstate->__nvoc_metadata_ptr->vtable.__ksec2StatePostLoad__(pGpu, pEngstate, arg3);
}

static inline NV_STATUS ksec2StatePreUnload_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pEngstate, NvU32 arg3) {
    return pEngstate->__nvoc_metadata_ptr->vtable.__ksec2StatePreUnload__(pGpu, pEngstate, arg3);
}

static inline NV_STATUS ksec2StatePostUnload_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pEngstate, NvU32 arg3) {
    return pEngstate->__nvoc_metadata_ptr->vtable.__ksec2StatePostUnload__(pGpu, pEngstate, arg3);
}

static inline NvBool ksec2IsPresent_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pEngstate) {
    return pEngstate->__nvoc_metadata_ptr->vtable.__ksec2IsPresent__(pGpu, pEngstate);
}

static inline NvBool ksec2ClearInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pIntrService, IntrServiceClearInterruptArguments *pParams) {
    return pIntrService->__nvoc_metadata_ptr->vtable.__ksec2ClearInterrupt__(pGpu, pIntrService, pParams);
}

static inline NvU32 ksec2ServiceInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pIntrService, IntrServiceServiceInterruptArguments *pParams) {
    return pIntrService->__nvoc_metadata_ptr->vtable.__ksec2ServiceInterrupt__(pGpu, pIntrService, pParams);
}

static inline NvU32 ksec2RegRead_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelFlcn, NvU32 offset) {
    return pKernelFlcn->__ksec2RegRead__(pGpu, pKernelFlcn, offset);
}

static inline void ksec2RegWrite_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelFlcn, NvU32 offset, NvU32 data) {
    pKernelFlcn->__ksec2RegWrite__(pGpu, pKernelFlcn, offset, data);
}

static inline NvU32 ksec2MaskDmemAddr_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelFlcn, NvU32 addr) {
    return pKernelFlcn->__ksec2MaskDmemAddr__(pGpu, pKernelFlcn, addr);
}

static inline NvBool ksec2Configured_DISPATCH(struct KernelSec2 *arg_this) {
    return arg_this->__nvoc_metadata_ptr->vtable.__ksec2Configured__(arg_this);
}

static inline void ksec2Unload_DISPATCH(struct KernelSec2 *arg_this) {
    arg_this->__nvoc_metadata_ptr->vtable.__ksec2Unload__(arg_this);
}

static inline void ksec2Vprintf_DISPATCH(struct KernelSec2 *arg_this, NvBool bReportStart, const char *fmt, va_list args) {
    arg_this->__nvoc_metadata_ptr->vtable.__ksec2Vprintf__(arg_this, bReportStart, fmt, args);
}

static inline NvU32 ksec2PriRead_DISPATCH(struct KernelSec2 *arg_this, NvU32 offset) {
    return arg_this->__nvoc_metadata_ptr->vtable.__ksec2PriRead__(arg_this, offset);
}

static inline void ksec2PriWrite_DISPATCH(struct KernelSec2 *arg_this, NvU32 offset, NvU32 data) {
    arg_this->__nvoc_metadata_ptr->vtable.__ksec2PriWrite__(arg_this, offset, data);
}

static inline void * ksec2MapBufferDescriptor_DISPATCH(struct KernelSec2 *arg_this, CrashCatBufferDescriptor *pBufDesc) {
    return arg_this->__nvoc_metadata_ptr->vtable.__ksec2MapBufferDescriptor__(arg_this, pBufDesc);
}

static inline void ksec2UnmapBufferDescriptor_DISPATCH(struct KernelSec2 *arg_this, CrashCatBufferDescriptor *pBufDesc) {
    arg_this->__nvoc_metadata_ptr->vtable.__ksec2UnmapBufferDescriptor__(arg_this, pBufDesc);
}

static inline void ksec2SyncBufferDescriptor_DISPATCH(struct KernelSec2 *arg_this, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
    arg_this->__nvoc_metadata_ptr->vtable.__ksec2SyncBufferDescriptor__(arg_this, pBufDesc, offset, size);
}

static inline void ksec2ReadDmem_DISPATCH(struct KernelSec2 *arg_this, NvU32 offset, NvU32 size, void *pBuf) {
    arg_this->__ksec2ReadDmem__(arg_this, offset, size, pBuf);
}

static inline void ksec2ReadEmem_DISPATCH(struct KernelSec2 *arg_this, NvU64 offset, NvU64 size, void *pBuf) {
    arg_this->__nvoc_metadata_ptr->vtable.__ksec2ReadEmem__(arg_this, offset, size, pBuf);
}

static inline const NvU32 * ksec2GetScratchOffsets_DISPATCH(struct KernelSec2 *arg_this, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
    return arg_this->__ksec2GetScratchOffsets__(arg_this, scratchGroupId);
}

static inline NvU32 ksec2GetWFL0Offset_DISPATCH(struct KernelSec2 *arg_this) {
    return arg_this->__ksec2GetWFL0Offset__(arg_this);
}

NV_STATUS ksec2ConstructEngine_IMPL(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, ENGDESCRIPTOR arg3);

static inline NV_STATUS ksec2ConstructEngine_395e98(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, ENGDESCRIPTOR arg3) {
    return NV_ERR_NOT_SUPPORTED;
}

void ksec2RegisterIntrService_IMPL(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, IntrServiceRecord pRecords[179]);

NV_STATUS ksec2ServiceNotificationInterrupt_IMPL(struct OBJGPU *arg1, struct KernelSec2 *arg2, IntrServiceServiceNotificationInterruptArguments *arg3);

NV_STATUS ksec2StateUnload_IMPL(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU32 flags);

NV_STATUS ksec2ConfigureFalcon_TU102(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

NV_STATUS ksec2ConfigureFalcon_GA100(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

NV_STATUS ksec2ConfigureFalcon_GA102(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

static inline NV_STATUS ksec2ConfigureFalcon_92bfc3(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    NV_ASSERT_PRECOMP(0);
    return NV_ERR_NOT_SUPPORTED;
}

NV_STATUS ksec2ResetHw_TU102(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

static inline NV_STATUS ksec2ResetHw_5baef9(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
}

NV_STATUS ksec2StateLoad_GH100(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU32 arg3);

static inline NV_STATUS ksec2StateLoad_56cd7a(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU32 arg3) {
    return NV_OK;
}

void ksec2StateDestroy_IMPL(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

static inline void ksec2StateDestroy_b3696a(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return;
}

static inline NvU32 ksec2ReadUcodeFuseVersion_b2b553(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU32 ucodeId) {
    return 0;
}

NvU32 ksec2ReadUcodeFuseVersion_GA100(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU32 ucodeId);

static inline NvU32 ksec2ReadUcodeFuseVersion_474d46(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU32 ucodeId) {
    NV_ASSERT_OR_RETURN_PRECOMP(0, 0);
}

const BINDATA_ARCHIVE *ksec2GetBinArchiveBlUcode_TU102(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

static inline const BINDATA_ARCHIVE *ksec2GetBinArchiveBlUcode_80f438(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    NV_ASSERT_OR_RETURN_PRECOMP(0, ((void *)0));
}

NV_STATUS ksec2GetGenericBlUcode_TU102(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, const RM_FLCN_BL_DESC **ppDesc, const NvU8 **ppImg);

static inline NV_STATUS ksec2GetGenericBlUcode_5baef9(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, const RM_FLCN_BL_DESC **ppDesc, const NvU8 **ppImg) {
    NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
}

const BINDATA_ARCHIVE *ksec2GetBinArchiveSecurescrubUcode_AD10X(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

static inline const BINDATA_ARCHIVE *ksec2GetBinArchiveSecurescrubUcode_80f438(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    NV_ASSERT_OR_RETURN_PRECOMP(0, ((void *)0));
}

NV_STATUS ksec2SetupGspImages_GB10B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NVDM_PAYLOAD_COT *pCotPayload);

NV_STATUS ksec2SetupGspImages_GB20B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NVDM_PAYLOAD_COT *pCotPayload);

static inline NV_STATUS ksec2SetupGspImages_ac1694(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NVDM_PAYLOAD_COT *pCotPayload) {
    return NV_OK;
}

NV_STATUS ksec2PrepareBootCommands_GB10B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

NV_STATUS ksec2PrepareBootCommands_GB20B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

static inline NV_STATUS ksec2PrepareBootCommands_395e98(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return NV_ERR_NOT_SUPPORTED;
}

NV_STATUS ksec2SafeToSendBootCommands_GB10B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

NV_STATUS ksec2SafeToSendBootCommands_GB20B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

static inline NV_STATUS ksec2SafeToSendBootCommands_ac1694(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return NV_OK;
}

NV_STATUS ksec2SendBootCommands_GB10B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

NV_STATUS ksec2SendBootCommands_GB20B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

static inline NV_STATUS ksec2SendBootCommands_ac1694(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return NV_OK;
}

NV_STATUS ksec2PrepareAndSendBootCommands_GB10B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

NV_STATUS ksec2PrepareAndSendBootCommands_GB20B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

static inline NV_STATUS ksec2PrepareAndSendBootCommands_ac1694(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return NV_OK;
}

NvBool ksec2CanSendPacket_GB10B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

static inline NvBool ksec2CanSendPacket_d69453(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return NV_FALSE;
}

NvU32 ksec2GetMaxSendPacketSize_GB10B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

static inline NvU32 ksec2GetMaxSendPacketSize_b2b553(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return 0;
}

NvU32 ksec2CreateNvdmHeader_GB10B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU32 nvdmType);

static inline NvU32 ksec2CreateNvdmHeader_b2b553(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU32 nvdmType) {
    return 0;
}

static inline NvU32 ksec2CreateMctpHeader_539ab4(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 som, NvU8 eom, NvU8 seid, NvU8 seq) {
    return 1;
}

NvU32 ksec2CreateMctpHeader_GB20B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 som, NvU8 eom, NvU8 seid, NvU8 seq);

static inline NvU32 ksec2CreateMctpHeader_4a4dee(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 som, NvU8 eom, NvU8 seid, NvU8 seq) {
    return 0;
}

NV_STATUS ksec2SendPacket_GB10B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pPacket, NvU32 packetSize);

static inline NV_STATUS ksec2SendPacket_395e98(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pPacket, NvU32 packetSize) {
    return NV_ERR_NOT_SUPPORTED;
}

NV_STATUS ksec2WaitForGspTargetMaskReleased_GB10B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

static inline NV_STATUS ksec2WaitForGspTargetMaskReleased_395e98(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return NV_ERR_NOT_SUPPORTED;
}

NV_STATUS ksec2ReadPacket_GB20B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pPacket, NvU32 maxPacketSize, NvU32 *bytesRead);

static inline NV_STATUS ksec2ReadPacket_395e98(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pPacket, NvU32 maxPacketSize, NvU32 *bytesRead) {
    return NV_ERR_NOT_SUPPORTED;
}

NvBool ksec2IsResponseAvailable_GB20B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

static inline NvBool ksec2IsResponseAvailable_d69453(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return NV_FALSE;
}

NvBool ksec2GspFmcIsEnforced_GB20B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

static inline NvBool ksec2GspFmcIsEnforced_e661f0(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return NV_TRUE;
}

NV_STATUS ksec2WaitForSecureBoot_GB20B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

static inline NV_STATUS ksec2WaitForSecureBoot_395e98(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return NV_ERR_NOT_SUPPORTED;
}

NvU32 ksec2GetMaxRecvPacketSize_GB20B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

static inline NvU32 ksec2GetMaxRecvPacketSize_ac1694(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return NV_OK;
}

NvU8 ksec2NvdmToSeid_GB20B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 nvdmType);

static inline NvU8 ksec2NvdmToSeid_b2b553(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 nvdmType) {
    return 0;
}

NV_STATUS ksec2GetPacketInfo_GB20B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pBuffer, NvU32 size, MCTP_PACKET_STATE *pPacketState, NvU8 *pTag);

static inline NV_STATUS ksec2GetPacketInfo_395e98(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pBuffer, NvU32 size, MCTP_PACKET_STATE *pPacketState, NvU8 *pTag) {
    return NV_ERR_NOT_SUPPORTED;
}

NV_STATUS ksec2ValidateMctpPayloadHeader_GB20B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pBuffer, NvU32 size);

static inline NV_STATUS ksec2ValidateMctpPayloadHeader_395e98(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pBuffer, NvU32 size) {
    return NV_ERR_NOT_SUPPORTED;
}

NV_STATUS ksec2ProcessNvdmMessage_GB20B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pBuffer, NvU32 size);

static inline NV_STATUS ksec2ProcessNvdmMessage_395e98(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pBuffer, NvU32 size) {
    return NV_ERR_NOT_SUPPORTED;
}

NV_STATUS ksec2ProcessCommandResponse_GB20B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pBuffer, NvU32 size);

static inline NV_STATUS ksec2ProcessCommandResponse_ac1694(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU8 *pBuffer, NvU32 size) {
    return NV_OK;
}

void ksec2DumpDebugState_GB20B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2);

static inline void ksec2DumpDebugState_d44104(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) {
    return;
}

NV_STATUS ksec2ErrorCode2NvStatusMap_GB20B(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU32 errorCode);

static inline NV_STATUS ksec2ErrorCode2NvStatusMap_395e98(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU32 errorCode) {
    return NV_ERR_NOT_SUPPORTED;
}

#undef PRIVATE_FIELD


#endif  // KERNEL_SEC2_H

#ifdef __cplusplus
} // extern "C"
#endif

#endif // _G_KERNEL_SEC2_NVOC_H_
