#windows treat symbolic file as a real file, which is different with unix
#We create a hidden file and compile it instead of origin source file.
function(windows_symbolic TARGET)
  set(oneValueArgs "")
  set(multiValueArgs SRCS PATH)
  cmake_parse_arguments(windows_symbolic "${options}" "${oneValueArgs}"
                        "${multiValueArgs}" ${ARGN})
  set(final_path ${CMAKE_CURRENT_SOURCE_DIR}/${windows_symbolic_PATH})
  foreach(src ${windows_symbolic_SRCS})
    get_filename_component(src ${src} NAME_WE)
    if(NOT EXISTS ${final_path}/${src}.cc OR NOT EXISTS ${final_path}/${src}.cu)
      message(
        FATAL
        " ${src}.cc and ${src}.cu must exists, and ${src}.cu must be symbolic file."
      )
    endif()

    file(
      GENERATE
      OUTPUT ${final_path}/.${src}.cu
      INPUT ${final_path}/${src}.cc)

    add_custom_command(
      OUTPUT ${final_path}/.${src}.cu
      COMMAND ${CMAKE_COMMAND} -E copy_if_different "${final_path}/${src}.cc"
              "${final_path}/.${src}.cu"
      COMMENT "create hidden file of ${src}.cu")
    add_custom_target(${TARGET} ALL DEPENDS ${final_path}/.${src}.cu)
  endforeach()
endfunction()

# Usage: pass_library(target inference) will append to paddle_inference_pass.h
set(pass_file
    ${PADDLE_BINARY_DIR}/paddle/fluid/inference/api/paddle_inference_pass.h.tmp)
set(pass_file_final
    ${PADDLE_BINARY_DIR}/paddle/fluid/inference/api/paddle_inference_pass.h)
file(
  WRITE ${pass_file}
  "// Generated by the paddle/fluid/framework/ir/CMakeLists.txt.  DO NOT EDIT!\n\n"
)
file(APPEND ${pass_file} "\#pragma once\n")
file(APPEND ${pass_file} "\#include \"paddle/fluid/framework/ir/pass.h\"\n")
copy_if_different(${pass_file} ${pass_file_final})

function(pass_library TARGET DEST)
  set(options "")
  set(oneValueArgs "")
  set(multiValueArgs SRCS DEPS DIR)
  set(targetPrefix "")

  cmake_parse_arguments(pass_library "${options}" "${oneValueArgs}"
                        "${multiValueArgs}" ${ARGN})
  if(pass_library_DIR)
    cc_library(
      ${TARGET}
      SRCS ${pass_library_DIR}/${TARGET}.cc
      DEPS graph_pattern_detector pass fuse_pass_base op_version_registry
           quantize_helper ${pass_library_DEPS})
  else()
    cc_library(
      ${TARGET}
      SRCS ${TARGET}.cc
      DEPS graph_pattern_detector pass fuse_pass_base op_version_registry
           quantize_helper ${pass_library_DEPS})
  endif()

  # add more DEST here, such as train, dist and collect USE_PASS into a file automatically.
  if(${DEST} STREQUAL "base" OR ${DEST} STREQUAL "inference")
    if(NOT CMAKE_BUILD_TYPE STREQUAL "Release")
      message(STATUS "add pass ${TARGET} ${DEST}")
    endif()
    file(APPEND ${pass_file} "USE_PASS(${TARGET});\n")
    set(INFER_IR_PASSES
        ${INFER_IR_PASSES} ${TARGET}
        CACHE INTERNAL "")
  endif()
endfunction()

add_subdirectory(ir)
add_subdirectory(details)
add_subdirectory(fleet)
add_subdirectory(io)
add_subdirectory(new_executor)

proto_library(op_def_proto SRCS op_def.proto DEPS framework_proto)
cc_library(
  op_def_api
  SRCS op_def_api.cc
  DEPS op_def_proto)

file(GLOB OP_DEF_FILES
     ${PADDLE_SOURCE_DIR}/paddle/fluid/operators/compat/*.pbtxt)
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/op_def.pbtxt
     "namespace { \n"
     "const std::unordered_map<std::string, std::string> op_def_map =  { \n")
foreach(OP_DEF_FILE ${OP_DEF_FILES})
  file(READ ${OP_DEF_FILE} OP_DEF_CONTENT)
  get_filename_component(OP_NAME ${OP_DEF_FILE} NAME_WE)
  file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/op_def.pbtxt
       "{\"${OP_NAME}\",R\"(${OP_DEF_CONTENT})\"},\n")
endforeach()
file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/op_def.pbtxt "{\"\",\"\"}};\n}")

cc_library(
  data_type
  SRCS data_type.cc
  DEPS phi)

cc_library(
  tensor
  SRCS tensor_util.cc
  DEPS data_type dlpack_tensor device_context phi common)

cc_library(
  lod_tensor
  SRCS lod_tensor.cc
  DEPS phi common tensor version)

cc_library(
  garbage_collector
  SRCS garbage_collector.cc
  DEPS device_context phi common glog)

cc_library(
  var_type_traits
  SRCS var_type_traits.cc
  DEPS scope phi common)

# every source file that includes "dnnl.h" must depends on onednn
# or, the first one should depends on onednn
if(WITH_ONEDNN)
  add_dependencies(var_type_traits onednn)
endif()

cc_library(
  scope
  SRCS scope.cc
  DEPS glog phi common xxhash var_type_traits)
cc_library(
  device_worker
  SRCS device_worker.cc
  DEPS lod_tensor scope)
cc_library(
  scope_pool
  SRCS scope_pool.cc
  DEPS scope)

cc_library(
  data_device_transform
  SRCS data_device_transform.cc
  DEPS tensor)

if(WITH_GPU)
  if(WIN32)
    #windows treat symbolic file as a real file, which is different with unix
    #We create a hidden file and compile it instead of origin source file.
    windows_symbolic(hidden_file SRCS data_type_transform.cu)
    nv_library(
      data_type_transform
      SRCS .data_type_transform.cu
      DEPS tensor)
    add_dependencies(data_type_transform hidden_file)
  else()
    nv_library(
      data_type_transform
      SRCS data_type_transform.cu
      DEPS tensor)
  endif()
elseif(WITH_ROCM)
  hip_library(
    data_type_transform
    SRCS data_type_transform.cu
    DEPS tensor)
elseif(WITH_XPU)
  cc_library(
    data_type_transform
    SRCS data_type_transform.cc
    DEPS tensor xpulib)
else()
  cc_library(
    data_type_transform
    SRCS data_type_transform.cc
    DEPS tensor)
endif()

cc_library(
  data_layout_transform
  SRCS data_layout_transform.cc
  DEPS tensor phi common)

cc_library(
  data_transform
  SRCS data_transform.cc
  DEPS tensor
       selected_rows_utils
       data_device_transform
       data_type_transform
       data_layout_transform
       phi
       common)

cc_library(
  attribute
  SRCS attribute.cc
  DEPS phi common)
cc_library(
  op_version_proto
  SRCS op_version_proto.cc
  DEPS phi)

cc_library(
  op_version_registry
  SRCS op_version_registry.cc
  DEPS op_version_proto phi)

cc_library(
  op_proto_maker
  SRCS op_proto_maker.cc
  DEPS phi attribute ops_extra_info glog auto_parallel_proto)
cc_library(
  no_need_buffer_vars_inference
  SRCS no_need_buffer_vars_inference.cc
  DEPS attribute device_context)
cc_library(
  op_info
  SRCS op_info.cc
  DEPS attribute phi no_need_buffer_vars_inference)
cc_library(
  shape_inference
  SRCS shape_inference.cc
  DEPS phi common attribute selected_rows_utils)

# every source file that includes "dnnl.h" must depends on onednn
# or, the first one should depends on onednn
if(WITH_ONEDNN)
  add_dependencies(shape_inference onednn)
endif()

cc_library(
  op_kernel_type
  SRCS op_kernel_type.cc
  DEPS device_context phi common)

if(WITH_XPU)
  cc_library(
    phi_utils
    SRCS phi_utils.cc
    DEPS lod_tensor selected_rows_utils phi common var_type_traits op_info)
else()
  cc_library(
    phi_utils
    SRCS phi_utils.cc
    DEPS lod_tensor selected_rows_utils phi common var_type_traits op_info)
endif()

set(OPERETER_DEPS
    op_info
    proto_desc
    tensor
    scope
    glog
    shape_inference
    data_transform
    lod_tensor
    op_kernel_type
    op_call_stack
    detail_op_handle
    phi_utils
    phi
    common
    op_compat_infos
    type_info
    supplement_tracing)

if(WITH_NCCL OR WITH_RCCL)
  set(OPERETER_DEPS ${OPERETER_DEPS} process_group_nccl)
elseif(WITH_CUSTOM_DEVICE)
  set(OPERETER_DEPS ${OPERETER_DEPS} process_group_custom)
endif()

cc_library(
  operator
  SRCS operator.cc transfer_scope_cache.cc unused_var_check.cc
       infershape_utils.cc
  DEPS ${OPERETER_DEPS})

cc_library(version SRCS version.cc)

add_library(proto_desc_base OBJECT var_desc.cc op_desc.cc block_desc.cc
                                   program_desc.cc)
add_dependencies(
  proto_desc_base
  attribute
  ops_extra_info
  shape_inference
  op_info
  glog
  version
  xxhash
  phi
  common)

cc_library(
  proto_desc
  SRCS $<TARGET_OBJECTS:proto_desc_base> program_converter.cc
  DEPS attribute
       ops_extra_info
       shape_inference
       op_info
       operator
       glog
       version
       xxhash
       op_dist_attr
       phi
       common
       op_version_proto
       op_version_registry)

cc_library(
  op_registry
  SRCS op_registry.cc
  DEPS op_proto_maker op_info operator ops_extra_info glog proto_desc)

cc_library(
  op_call_stack
  SRCS op_call_stack.cc
  DEPS op_proto_maker phi common)

cc_library(
  program_utils
  SRCS program_utils.cc
  DEPS proto_desc)

if(WITH_PYTHON)
  py_proto_compile(framework_py_proto SRCS framework.proto data_feed.proto)
  py_proto_compile(trainer_py_proto SRCS trainer_desc.proto data_feed.proto)
  py_proto_compile(distributed_strategy_py_proto SRCS
                   distributed_strategy.proto)
  py_proto_compile(pass_desc_py_proto SRCS pass_desc.proto)
  #Generate an empty \
  #__init__.py to make framework_py_proto as a valid python module.
  add_custom_target(fleet_proto_init)
  add_custom_command(
    TARGET fleet_proto_init
    POST_BUILD
    COMMAND ${CMAKE_COMMAND} -E make_directory
            ${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto
    COMMAND
      ${CMAKE_COMMAND} -E touch
      ${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto/__init__.py)
  file(TOUCH ${CMAKE_CURRENT_BINARY_DIR}/__init__.py)
  add_dependencies(
    framework_py_proto
    trainer_py_proto
    distributed_strategy_py_proto
    fleet_proto_init
    pass_desc_py_proto
    ps_py_proto
    pslib_py_proto
    ps_py_proto_init)
  if(NOT WIN32)
    add_custom_command(
      TARGET framework_py_proto
      POST_BUILD
      COMMAND ${CMAKE_COMMAND} -E make_directory
              ${PADDLE_BINARY_DIR}/python/paddle/base/proto
      COMMAND cp *.py ${PADDLE_BINARY_DIR}/python/paddle/base/proto/
      COMMAND cp distributed_strategy_*.py
              ${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto
      COMMENT "Copy generated python proto into directory paddle/fluid/proto."
      WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
  else()
    string(REPLACE "/" "\\" proto_dstpath
                   "${PADDLE_BINARY_DIR}/python/paddle/base/proto/")
    string(
      REPLACE "/" "\\" fleet_proto_dstpath
              "${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto/")
    add_custom_command(
      TARGET framework_py_proto
      POST_BUILD
      COMMAND ${CMAKE_COMMAND} -E make_directory
              ${PADDLE_BINARY_DIR}/python/paddle/base/proto
      COMMAND copy /Y *.py ${proto_dstpath}
      COMMAND copy /Y distributed_strategy_*.py ${fleet_proto_dstpath}
      COMMENT
        "Copy generated python proto into paddle/fluid/proto and paddle/distributed/fleet/proto directories."
      WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
  endif()
endif()

cc_library(
  feed_fetch_method
  SRCS feed_fetch_method.cc
  DEPS lod_tensor scope glog)

cc_library(
  feed_hook
  SRCS feed_hook.cc
  DEPS lod_tensor scope glog pir)

cc_library(
  variable_helper
  SRCS variable_helper.cc
  DEPS lod_tensor)

set(NAIVE_EXECUTOR_DEPS
    op_registry
    scope
    phi
    glog
    feed_fetch_method
    feed_hook
    graph_to_program_pass
    standalone_executor
    variable_helper)

if(TENSORRT_FOUND)
  set(NAIVE_EXECUTOR_DEPS ${NAIVE_EXECUTOR_DEPS} tensorrt_engine_op)
endif()

if(WITH_OPENVINO)
  set(NAIVE_EXECUTOR_DEPS ${NAIVE_EXECUTOR_DEPS} openvino_engine_op)
endif()

cc_library(
  naive_executor
  SRCS naive_executor.cc
  DEPS ${NAIVE_EXECUTOR_DEPS})

cc_library(
  executor_gc_helper
  SRCS executor_gc_helper.cc
  DEPS while_op_helper
       conditional_block_op_helper
       pylayer_op_helper
       scope
       proto_desc
       operator
       garbage_collector
       op_registry)
if(WITH_DISTRIBUTE)
  cc_library(
    executor
    SRCS executor.cc
         multi_trainer.cc
         pipeline_trainer.cc
         dataset_factory.cc
         dist_multi_trainer.cc
         trainer_factory.cc
         trainer.cc
         data_feed_factory.cc
         heterxpu_trainer.cc
         data_feed.cc
         device_worker.cc
         hogwild_worker.cc
         hetercpu_worker.cc
         ps_gpu_worker.cc
         ps_gpu_trainer.cc
         downpour_worker.cc
         downpour_worker_opt.cc
         data_feed.cu
         pull_dense_worker.cc
         section_worker.cc
         device_worker_factory.cc
         data_set.cc
    DEPS op_registry
         scope
         glog
         framework_io
         fleet_wrapper
         heter_wrapper
         ps_gpu_wrapper
         box_wrapper
         metrics
         densetensor_printer
         feed_fetch_method
         feed_hook
         graph_to_program_pass
         variable_helper)
else()
  cc_library(
    executor
    SRCS executor.cc
         multi_trainer.cc
         pipeline_trainer.cc
         dataset_factory.cc
         dist_multi_trainer.cc
         trainer_factory.cc
         trainer.cc
         data_feed_factory.cc
         heterxpu_trainer.cc
         data_feed.cc
         device_worker.cc
         hogwild_worker.cc
         hetercpu_worker.cc
         ps_gpu_worker.cc
         ps_gpu_trainer.cc
         downpour_worker.cc
         downpour_worker_opt.cc
         data_feed.cu
         pull_dense_worker.cc
         section_worker.cc
         device_worker_factory.cc
         data_set.cc
    DEPS op_registry
         scope
         glog
         framework_io
         fleet_wrapper
         heter_wrapper
         ps_gpu_wrapper
         box_wrapper
         densetensor_printer
         feed_fetch_method
         feed_hook
         graph_to_program_pass
         variable_helper)
endif()

target_link_libraries(
  executor
  while_op_helper
  executor_gc_helper
  static_prim_api
  static_utils
  get_expected_kernel_func
  conditional_block_op_helper
  pylayer_op_helper)

cc_library(
  compiled_program
  SRCS compiled_program.cc
  DEPS graph build_strategy eager_deletion_pass)

cc_library(
  executor_cache
  SRCS executor_cache.cc
  DEPS pir_transforms pir)
cc_library(
  prune
  SRCS prune.cc
  DEPS phi auto_parallel_proto proto_desc)
cc_library(
  selected_rows_utils
  SRCS selected_rows_utils.cc
  DEPS phi)

cc_library(
  dlpack_tensor
  SRCS dlpack_tensor.cc
  DEPS dlpack phi)

cc_library(
  op_compatible_info
  SRCS op_compatible_info.cc
  DEPS string_helper proto_desc)

# Get the current working branch
execute_process(
  COMMAND git rev-parse --abbrev-ref HEAD
  WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
  OUTPUT_VARIABLE PADDLE_BRANCH
  OUTPUT_STRIP_TRAILING_WHITESPACE)

# Get the latest abbreviated commit hash of the working branch
execute_process(
  COMMAND git log -1 --format=%h
  WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
  OUTPUT_VARIABLE PADDLE_COMMIT
  OUTPUT_STRIP_TRAILING_WHITESPACE)

message(STATUS "commit: ${PADDLE_COMMIT}")
message(STATUS "branch: ${PADDLE_BRANCH}")

configure_file(commit.h.in commit.h)

cc_library(
  custom_operator
  SRCS custom_operator.cc
  DEPS tensor
       attribute
       op_registry
       operator
       string_helper
       phi
       common
       imperative_flag
       layer
       op_dialect_vjp)

cc_library(
  type_info
  SRCS type_info.cc type_defs.cc
  DEPS common)
target_link_libraries(type_info pir op_dialect)
add_dependencies(type_info framework_proto auto_parallel_proto xxhash)
if(WITH_ONEDNN)
  add_dependencies(type_info onednn)
endif()

set(FLUID_FRAMEWORK_MODULES
    proto_desc
    lod_tensor
    executor
    layer
    phi
    common
    custom_operator)

cc_library(paddle_framework DEPS ${FLUID_FRAMEWORK_MODULES})
