﻿# CMakeList.txt: 顶层 CMake 项目文件，在此处执行全局配置
# 并包含子项目。
#
cmake_minimum_required (VERSION 3.17)


project ("dlm_infer")

set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)


set(MY_CUDA_VERSION "11.1" CACHE STRING "10.2 or 11.1")
mark_as_advanced(MY_CUDA_VERSION)

set(MY_TRT_VERSION "8" CACHE STRING "7 or 8")
mark_as_advanced(MY_CUDA_VERSION)

add_definitions(-D_WIN32_WINNT=0x0601)

Set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /Zi")
Set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /DEBUG /OPT:REF /OPT:ICF")
Set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /DEBUG /OPT:REF /OPT:ICF")

# 设置默认构建类型为 Release 模式
if(NOT CMAKE_BUILD_TYPE)
    set(CMAKE_BUILD_TYPE Release)
endif()
message(STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}")

include(${CMAKE_CURRENT_LIST_DIR}/preproc.cmake)


# 设置编译输出目录
if (EXISTS ${CMAKE_CURRENT_LIST_DIR}/output.cmake) 
    include(${CMAKE_CURRENT_LIST_DIR}/output.cmake)
endif()

#设置依赖模块
if (EXISTS ${CMAKE_CURRENT_LIST_DIR}/modules.cmake)
    include(${CMAKE_CURRENT_LIST_DIR}/modules.cmake)
endif()


# 添加推理后端选项
set(INFERENCE_BACKEND "CUDA" CACHE STRING "Inference backend: CUDA or OPENVINO")
message(STATUS "Using inference backend: ${INFERENCE_BACKEND}")

if(NOT ${INFERENCE_BACKEND} STREQUAL "CUDA" AND NOT ${INFERENCE_BACKEND} STREQUAL "OPENVINO")
    message(FATAL_ERROR "Invalid inference backend specified: ${INFERENCE_BACKEND}")
endif()

message(STATUS "Building with ${INFERENCE_BACKEND} backend")




set(DLM_STD         OFF)
set(DLM_CLS         ON)
set(DLM_KEYPOINT    OFF)
set(DLM_SEGMENT     ON)
set(DLM_REC         OFF)

# 包含子项目。
if(DLM_STD)
    if(${INFERENCE_BACKEND} STREQUAL "OPENVINO")
        add_subdirectory("encode_dlm_v5_o")
        add_subdirectory("txr_algo_dlm_v5_o")
        add_subdirectory("txr_algo_dlm")
        add_subdirectory("txr_algo_dlm_demo")
        add_dependencies(txr_algo_dlm txr_algo_dlm_v5_o)
        add_dependencies(txr_algo_dlm_demo txr_algo_dlm)
    elseif(${INFERENCE_BACKEND} STREQUAL "CUDA")
        add_subdirectory("encode_dlm_v1")
        add_subdirectory("encode_dlm_v3")
        add_subdirectory("encode_dlm_v5")
        add_subdirectory("txr_algo_dlm_v1")
        add_subdirectory("txr_algo_dlm_v3")
        add_subdirectory("txr_algo_dlm_v5")
        add_subdirectory("txr_algo_dlm")
        add_subdirectory("txr_algo_dlm_demo")
        add_dependencies(txr_algo_dlm txr_algo_dlm_v5)
        add_dependencies(txr_algo_dlm_demo txr_algo_dlm)
    endif()
endif()

if(DLM_CLS)
    if(${INFERENCE_BACKEND} STREQUAL "OPENVINO")
        add_subdirectory("encode_dlm_cls_v2_o")
        add_subdirectory("txr_algo_dlm_cls_v2_o")
        add_subdirectory("txr_algo_dlm_cls")
        add_subdirectory("txr_algo_dlm_cls_demo")
        add_dependencies(txr_algo_dlm_cls txr_algo_dlm_cls_v2_o)
        add_dependencies(txr_algo_dlm_cls_demo txr_algo_dlm_cls)
    elseif(${INFERENCE_BACKEND} STREQUAL "CUDA")
        add_subdirectory("encode_dlm_cls_v1")
        add_subdirectory("encode_dlm_cls_v2")
        add_subdirectory("txr_algo_dlm_cls_v1")
        add_subdirectory("txr_algo_dlm_cls_v2")
        add_subdirectory("txr_algo_dlm_cls")
        add_subdirectory("txr_algo_dlm_cls_demo")
        add_dependencies(txr_algo_dlm_cls txr_algo_dlm_cls_v2 txr_algo_dlm_cls_v1)
        add_dependencies(txr_algo_dlm_cls_demo txr_algo_dlm_cls)
    endif()
endif()

if(DLM_KEYPOINT)
    add_subdirectory("encode_dlm_keypoint_v1")
    add_subdirectory("txr_algo_dlm_keypoint")
    add_subdirectory("txr_algo_dlm_keypoint_v1")
    add_subdirectory("txr_algo_dlm_keypoint_demo")
    add_dependencies(txr_algo_dlm_keypoint txr_algo_dlm_keypoint_v1)
    add_dependencies(txr_algo_dlm_keypoint_demo txr_algo_dlm_keypoint)
endif()

if(DLM_SEGMENT)
    if(${INFERENCE_BACKEND} STREQUAL "OPENVINO")
        add_subdirectory("encode_dlm_seg_v2_o")
        add_subdirectory("txr_algo_dlm_seg_v2_o") 
        add_subdirectory("txr_algo_dlm_seg")
        add_subdirectory("txr_algo_dlm_seg_demo")
        add_dependencies(txr_algo_dlm_seg txr_algo_dlm_seg_v2_o)
        add_dependencies(txr_algo_dlm_seg_demo txr_algo_dlm_seg)
    elseif(${INFERENCE_BACKEND} STREQUAL "CUDA")
        add_subdirectory("encode_dlm_seg_v1")
        add_subdirectory("encode_dlm_seg_v2")
        add_subdirectory("txr_algo_dlm_seg")
        add_subdirectory("txr_algo_dlm_seg_v1")
        add_subdirectory("txr_algo_dlm_seg_v2")
        add_subdirectory("txr_algo_dlm_seg_demo")
        add_dependencies(txr_algo_dlm_seg txr_algo_dlm_seg_v1 txr_algo_dlm_seg_v2)
        add_dependencies(txr_algo_dlm_seg_demo txr_algo_dlm_seg)
    endif()
endif()

if(DLM_REC)
    if(${INFERENCE_BACKEND} STREQUAL "OPENVINO")
        add_subdirectory("encode_dlm_rec_v1_o")
        add_subdirectory("txr_algo_dlm_rec_v1_o")
        add_subdirectory("txr_algo_dlm_rec") 
        add_subdirectory("txr_algo_dlm_rec_demo")
        add_dependencies(txr_algo_dlm_rec txr_algo_dlm_rec_v1_o)
        add_dependencies(txr_algo_dlm_rec_demo txr_algo_dlm_rec)
    elseif(${INFERENCE_BACKEND} STREQUAL "CUDA")
        add_subdirectory("encode_dlm_rec_v1")
        add_subdirectory("txr_algo_dlm_rec_v1")
        add_subdirectory("txr_algo_dlm_rec")
        add_subdirectory("txr_algo_dlm_rec_demo")
        add_dependencies(txr_algo_dlm_rec txr_algo_dlm_rec_v1)
        add_dependencies(txr_algo_dlm_rec_demo txr_algo_dlm_rec)
    endif()
endif()

#设置后处理命令模块
if (EXISTS ${CMAKE_CURRENT_LIST_DIR}/postcmd.cmake)
    include(${CMAKE_CURRENT_LIST_DIR}/postcmd.cmake)
endif()
