// Copyright (c) 2024 IndustrialTBB Framework
// Licensed under MIT License

#ifndef ITBB_CORE_TBB_FLOW_PIPELINE_H_
#define ITBB_CORE_TBB_FLOW_PIPELINE_H_

#include <oneapi/tbb.h>
#include <oneapi/tbb/flow_graph.h>

#include <atomic>
#include <chrono>
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>

namespace itbb {
// TBB流水线节点类型
enum class PipelineNodeType {
  kSource,     // 数据源节点
  kFunction,   // 函数处理节点
  kMulticast,  // 广播节点
  kJoin,       // 合并节点
  kSplit,      // 分割节点
  kBuffer,     // 缓冲节点
  kSink        // 数据汇聚节点
};

// TBB流水线节点统计
struct PipelineNodeStats {
  std::atomic<uint64_t> items_processed{0};
  std::atomic<uint64_t> items_dropped{0};
  std::atomic<uint64_t> total_processing_time_ns{0};
  std::atomic<uint64_t> max_processing_time_ns{0};
  std::atomic<uint64_t> min_processing_time_ns{UINT64_MAX};
  std::atomic<bool> is_active{false};

  void RecordProcessing(std::chrono::nanoseconds duration) {
    items_processed.fetch_add(1, std::memory_order_relaxed);

    uint64_t duration_ns = duration.count();
    total_processing_time_ns.fetch_add(duration_ns, std::memory_order_relaxed);

    // 更新最大处理时间
    uint64_t current_max =
        max_processing_time_ns.load(std::memory_order_acquire);
    while (duration_ns > current_max &&
           !max_processing_time_ns.compare_exchange_weak(
               current_max, duration_ns, std::memory_order_release)) {
      current_max = max_processing_time_ns.load(std::memory_order_acquire);
    }

    // 更新最小处理时间
    uint64_t current_min =
        min_processing_time_ns.load(std::memory_order_acquire);
    while (duration_ns < current_min &&
           !min_processing_time_ns.compare_exchange_weak(
               current_min, duration_ns, std::memory_order_release)) {
      current_min = min_processing_time_ns.load(std::memory_order_acquire);
    }
  }

  double GetAverageProcessingTime() const {
    uint64_t total_items = items_processed.load(std::memory_order_acquire);
    if (total_items == 0) return 0.0;

    uint64_t total_time =
        total_processing_time_ns.load(std::memory_order_acquire);
    return static_cast<double>(total_time) / total_items;
  }
};

// TBB原生的高级流水线管理器
class TbbAdvancedPipeline {
 public:
  TbbAdvancedPipeline() : flow_graph_(), node_stats_() {}

  ~TbbAdvancedPipeline() { Stop(); }

  // 创建数据源节点
  template <typename DataType>
  std::string CreateSourceNode(const std::string& name,
                               std::function<bool(DataType&)> source_function,
                               bool is_active = true) {
    auto source_node = std::make_unique<tbb::flow::source_node<DataType>>(
        flow_graph_,
        [this, name, source_function](DataType& data) -> bool {
          auto start_time = std::chrono::high_resolution_clock::now();

          bool result = source_function(data);

          auto end_time = std::chrono::high_resolution_clock::now();
          auto duration = std::chrono::duration_cast<std::chrono::nanoseconds>(
              end_time - start_time);

          node_stats_[name].RecordProcessing(duration);
          return result;
        },
        /*is_active=*/is_active);

    source_nodes_[name] = std::move(source_node);
    node_stats_[name].is_active.store(is_active, std::memory_order_release);

    return name;
  }

  // 创建函数处理节点
  template <typename InputType, typename OutputType>
  std::string CreateFunctionNode(
      const std::string& name,
      std::function<OutputType(const InputType&)> function,
      size_t concurrency = tbb::flow::unlimited) {
    auto function_node =
        std::make_unique<tbb::flow::function_node<InputType, OutputType>>(
            flow_graph_, concurrency,
            [this, name, function](const InputType& input) -> OutputType {
              auto start_time = std::chrono::high_resolution_clock::now();

              OutputType result = function(input);

              auto end_time = std::chrono::high_resolution_clock::now();
              auto duration =
                  std::chrono::duration_cast<std::chrono::nanoseconds>(
                      end_time - start_time);

              node_stats_[name].RecordProcessing(duration);
              return result;
            });

    function_nodes_[name] = std::move(function_node);
    node_stats_[name].is_active.store(true, std::memory_order_release);

    return name;
  }

  // 创建广播节点
  template <typename DataType>
  std::string CreateBroadcastNode(const std::string& name) {
    auto broadcast_node =
        std::make_unique<tbb::flow::broadcast_node<DataType>>(flow_graph_);

    broadcast_nodes_[name] = std::move(broadcast_node);
    node_stats_[name].is_active.store(true, std::memory_order_release);

    return name;
  }

  // 创建缓冲节点
  template <typename DataType>
  std::string CreateBufferNode(const std::string& name, size_t capacity = 0) {
    std::unique_ptr<tbb::flow::graph_node> buffer_node;

    if (capacity > 0) {
      // 有限容量缓冲
      buffer_node = std::make_unique<tbb::flow::limiter_node<DataType>>(
          flow_graph_, capacity);
    } else {
      // 无限容量缓冲
      buffer_node =
          std::make_unique<tbb::flow::buffer_node<DataType>>(flow_graph_);
    }

    buffer_nodes_[name] = std::move(buffer_node);
    node_stats_[name].is_active.store(true, std::memory_order_release);

    return name;
  }

  // 创建数据汇聚节点
  template <typename DataType>
  std::string CreateSinkNode(
      const std::string& name,
      std::function<void(const DataType&)> sink_function) {
    auto sink_node = std::make_unique<tbb::flow::function_node<DataType, void>>(
        flow_graph_, tbb::flow::unlimited,
        [this, name, sink_function](const DataType& data) {
          auto start_time = std::chrono::high_resolution_clock::now();

          sink_function(data);

          auto end_time = std::chrono::high_resolution_clock::now();
          auto duration = std::chrono::duration_cast<std::chrono::nanoseconds>(
              end_time - start_time);

          node_stats_[name].RecordProcessing(duration);
        });

    sink_nodes_[name] = std::move(sink_node);
    node_stats_[name].is_active.store(true, std::memory_order_release);

    return name;
  }

  // 连接节点
  template <typename DataType>
  bool ConnectNodes(const std::string& from_node, const std::string& to_node) {
    // 这里需要根据节点类型进行适当的连接
    // 简化实现，实际需要更复杂的类型检查和连接逻辑

    // 记录连接关系
    connections_[from_node].push_back(to_node);
    return true;
  }

  // 启动流水线
  void Start() {
    is_running_.store(true, std::memory_order_release);

    // 激活所有源节点
    for (auto& [name, source_node] : source_nodes_) {
      // 激活源节点的逻辑
      node_stats_[name].is_active.store(true, std::memory_order_release);
    }
  }

  // 停止流水线
  void Stop() {
    is_running_.store(false, std::memory_order_release);

    // 等待所有任务完成
    flow_graph_.wait_for_all();

    // 停用所有节点
    for (auto& [name, stats] : node_stats_) {
      stats.is_active.store(false, std::memory_order_release);
    }
  }

  // 等待流水线完成
  void WaitForCompletion() { flow_graph_.wait_for_all(); }

  // 获取节点统计信息
  PipelineNodeStats GetNodeStats(const std::string& node_name) const {
    auto it = node_stats_.find(node_name);
    if (it != node_stats_.end()) {
      return it->second;
    }
    return PipelineNodeStats{};
  }

  // 获取所有节点统计信息
  std::unordered_map<std::string, PipelineNodeStats> GetAllNodeStats() const {
    return node_stats_;
  }

  // 获取流水线整体统计
  struct PipelineStats {
    size_t total_nodes;
    size_t active_nodes;
    uint64_t total_items_processed;
    double average_throughput;  // items per second
    std::chrono::nanoseconds total_processing_time;
  };

  PipelineStats GetPipelineStats() const {
    PipelineStats stats{};
    stats.total_nodes = node_stats_.size();

    for (const auto& [name, node_stats] : node_stats_) {
      if (node_stats.is_active.load(std::memory_order_acquire)) {
        stats.active_nodes++;
      }

      stats.total_items_processed +=
          node_stats.items_processed.load(std::memory_order_acquire);
      stats.total_processing_time += std::chrono::nanoseconds(
          node_stats.total_processing_time_ns.load(std::memory_order_acquire));
    }

    // 计算吞吐量
    if (stats.total_processing_time.count() > 0) {
      double seconds =
          static_cast<double>(stats.total_processing_time.count()) / 1e9;
      stats.average_throughput =
          static_cast<double>(stats.total_items_processed) / seconds;
    }

    return stats;
  }

  // 动态调整节点并发度
  bool SetNodeConcurrency(const std::string& node_name, size_t concurrency) {
    // TBB flow_graph不支持动态调整并发度
    // 这里可以记录配置，在重建节点时使用
    node_concurrency_[node_name] = concurrency;
    return true;
  }

  // 动态启用/禁用节点
  bool SetNodeActive(const std::string& node_name, bool active) {
    auto it = node_stats_.find(node_name);
    if (it != node_stats_.end()) {
      it->second.is_active.store(active, std::memory_order_release);
      return true;
    }
    return false;
  }

  // 获取流水线拓扑信息
  std::unordered_map<std::string, std::vector<std::string>> GetTopology()
      const {
    return connections_;
  }

 private:
  tbb::flow::graph flow_graph_;
  std::atomic<bool> is_running_{false};

  // 节点存储 - 使用类型擦除存储不同类型的节点
  std::unordered_map<std::string, std::unique_ptr<tbb::flow::graph_node>>
      source_nodes_;
  std::unordered_map<std::string, std::unique_ptr<tbb::flow::graph_node>>
      function_nodes_;
  std::unordered_map<std::string, std::unique_ptr<tbb::flow::graph_node>>
      broadcast_nodes_;
  std::unordered_map<std::string, std::unique_ptr<tbb::flow::graph_node>>
      buffer_nodes_;
  std::unordered_map<std::string, std::unique_ptr<tbb::flow::graph_node>>
      sink_nodes_;

  // 统计信息
  mutable std::unordered_map<std::string, PipelineNodeStats> node_stats_;

  // 连接关系
  std::unordered_map<std::string, std::vector<std::string>> connections_;

  // 节点配置
  std::unordered_map<std::string, size_t> node_concurrency_;
};

// 预定义的工业数据流水线模板
class IndustrialDataPipelineTemplates {
 public:
  // ADwin数据处理流水线
  static void SetupADwinPipeline(TbbAdvancedPipeline& pipeline) {
    // 数据源：ADwin设备数据读取
    pipeline.CreateSourceNode<ADwinRawData>(
        "adwin_source",
        [](ADwinRawData& data) -> bool { return ReadADwinData(data); });

    // 数据验证节点
    pipeline.CreateFunctionNode<ADwinRawData, ValidatedData>(
        "data_validator", [](const ADwinRawData& raw) -> ValidatedData {
          return ValidateADwinData(raw);
        });

    // 数据处理节点 - 高并发
    pipeline.CreateFunctionNode<ValidatedData, ProcessedData>(
        "data_processor",
        [](const ValidatedData& validated) -> ProcessedData {
          return ProcessDataWithTBB(validated);
        },
        4);  // 4个并发处理器

    // 广播节点 - 将处理后的数据分发到多个目标
    pipeline.CreateBroadcastNode<ProcessedData>("data_broadcast");

    // 数据存储节点
    pipeline.CreateSinkNode<ProcessedData>(
        "data_storage",
        [](const ProcessedData& data) { StoreProcessedData(data); });

    // 报警检查节点
    pipeline.CreateFunctionNode<ValidatedData, AlarmData>(
        "alarm_checker", [](const ValidatedData& data) -> AlarmData {
          return CheckAlarmConditions(data);
        });

    // 报警处理节点
    pipeline.CreateSinkNode<AlarmData>("alarm_handler",
                                       [](const AlarmData& alarm) {
                                         if (alarm.is_triggered) {
                                           HandleAlarm(alarm);
                                         }
                                       });

    // 建立连接
    pipeline.ConnectNodes<ADwinRawData>("adwin_source", "data_validator");
    pipeline.ConnectNodes<ValidatedData>("data_validator", "data_processor");
    pipeline.ConnectNodes<ProcessedData>("data_processor", "data_broadcast");
    pipeline.ConnectNodes<ProcessedData>("data_broadcast", "data_storage");

    // 分支：报警检查
    pipeline.ConnectNodes<ValidatedData>("data_validator", "alarm_checker");
    pipeline.ConnectNodes<AlarmData>("alarm_checker", "alarm_handler");
  }

  // 通用设备数据流水线
  template <typename DeviceDataType>
  static void SetupGenericDevicePipeline(TbbAdvancedPipeline& pipeline,
                                         const std::string& device_type) {
    std::string source_name = device_type + "_source";
    std::string validator_name = device_type + "_validator";
    std::string processor_name = device_type + "_processor";
    std::string storage_name = device_type + "_storage";

    // 创建通用设备数据流水线
    pipeline.CreateSourceNode<DeviceDataType>(
        source_name, [](DeviceDataType& data) -> bool {
          return ReadGenericDeviceData(data);
        });

    pipeline.CreateFunctionNode<DeviceDataType, DeviceDataType>(
        validator_name, [](const DeviceDataType& data) -> DeviceDataType {
          return ValidateGenericData(data);
        });

    pipeline.CreateFunctionNode<DeviceDataType, DeviceDataType>(
        processor_name, [](const DeviceDataType& data) -> DeviceDataType {
          return ProcessGenericData(data);
        });

    pipeline.CreateSinkNode<DeviceDataType>(
        storage_name,
        [](const DeviceDataType& data) { StoreGenericData(data); });

    // 建立连接
    pipeline.ConnectNodes<DeviceDataType>(source_name, validator_name);
    pipeline.ConnectNodes<DeviceDataType>(validator_name, processor_name);
    pipeline.ConnectNodes<DeviceDataType>(processor_name, storage_name);
  }

 private:
  // 辅助函数声明
  struct ADwinRawData {};
  struct ValidatedData {};
  struct ProcessedData {};
  struct AlarmData {
    bool is_triggered = false;
  };

  static bool ReadADwinData(ADwinRawData& data) { return true; }
  static ValidatedData ValidateADwinData(const ADwinRawData& raw) {
    return ValidatedData{};
  }
  static ProcessedData ProcessDataWithTBB(const ValidatedData& validated) {
    return ProcessedData{};
  }
  static void StoreProcessedData(const ProcessedData& data) {}
  static AlarmData CheckAlarmConditions(const ValidatedData& data) {
    return AlarmData{};
  }
  static void HandleAlarm(const AlarmData& alarm) {}

  template <typename T>
  static bool ReadGenericDeviceData(T& data) {
    return true;
  }
  template <typename T>
  static T ValidateGenericData(const T& data) {
    return data;
  }
  template <typename T>
  static T ProcessGenericData(const T& data) {
    return data;
  }
  template <typename T>
  static void StoreGenericData(const T& data) {}
};
}  // namespace itbb

#endif  // ITBB_CORE_TBB_FLOW_PIPELINE_H_
