﻿// Copyright (c) 2024 IndustrialTBB Framework
// Licensed under MIT License

#ifndef ITBB_CORE_FRAMEWORK_H_
#define ITBB_CORE_FRAMEWORK_H_

#include <itbb/devices/tbb_device_interface.h>
#include <oneapi/tbb.h>
#include <oneapi/tbb/concurrent_hash_map.h>
#include <oneapi/tbb/concurrent_priority_queue.h>
#include <oneapi/tbb/concurrent_vector.h>
#include <oneapi/tbb/enumerable_thread_specific.h>
#include <oneapi/tbb/flow_graph.h>
#include <oneapi/tbb/global_control.h>
#include <oneapi/tbb/info.h>
#include <oneapi/tbb/task_arena.h>
#include <oneapi/tbb/task_group.h>
#include <oneapi/tbb/task_scheduler_observer.h>

#include <atomic>
#include <chrono>
#include <functional>
#include <memory>
#include <string>
#include <thread>
#include <vector>

namespace itbb {
// TBB原生的配置系统 - 运行时动态调整
class TbbDynamicConfig {
 public:
  // 使用TBB的concurrent_hash_map存储配置
  using ConfigMap = tbb::concurrent_hash_map<std::string, std::atomic<double>>;
  using StringConfigMap = tbb::concurrent_hash_map<std::string, std::string>;

  // 动态配置访问 - 完全无锁
  void SetNumericConfig(const std::string& key, double value) {
    ConfigMap::accessor accessor;
    numeric_configs_.insert(accessor, key);
    accessor->second.store(value, std::memory_order_release);

    // 触发回调通知
    NotifyConfigChange(key, value);
  }

  double GetConfig(const std::string& key, double default_value = 0.0) const {
    ConfigMap::const_accessor accessor;
    if (numeric_configs_.find(accessor, key)) {
      return accessor->second.load(std::memory_order_acquire);
    }
    return default_value;
  }

  void SetStringConfig(const std::string& key, const std::string& value) {
    StringConfigMap::accessor accessor;
    string_configs_.insert(accessor, key);
    accessor->second = value;
  }

  std::string GetStringConfig(const std::string& key,
                              const std::string& default_value = "") const {
    StringConfigMap::const_accessor accessor;
    if (string_configs_.find(accessor, key)) {
      return accessor->second;
    }
    return default_value;
  }

  // 配置变化通知 - 使用TBB的concurrent_vector
  using ConfigChangeCallback =
      std::function<void(const std::string&, double, double)>;
  void RegisterConfigChangeCallback(const std::string& key,
                                    ConfigChangeCallback callback) {
    CallbackMap::accessor accessor;
    callbacks_.insert(accessor, key);
    accessor->second.push_back(callback);
  }

 private:
  TbbDynamicConfig() = default;

  ConfigMap numeric_configs_;
  StringConfigMap string_configs_;

  using CallbackVector = tbb::concurrent_vector<ConfigChangeCallback>;
  using CallbackMap = tbb::concurrent_hash_map<std::string, CallbackVector>;
  CallbackMap callbacks_;

  void NotifyConfigChange(const std::string& key, double new_value) {
    CallbackMap::const_accessor accessor;
    if (callbacks_.find(accessor, key)) {
      for (const auto& callback : accessor->second) {
        if (callback) {
          callback(key, new_value,
                   new_value);  // 简化版本，old_value = new_value
        }
      }
    }
  }
};

// TBB原生的性能观察器
class IndustrialTaskObserver : public tbb::task_scheduler_observer {
 public:
  IndustrialTaskObserver() : tbb::task_scheduler_observer() {
    observe(true);  // 开始观察
  }

  ~IndustrialTaskObserver() {
    observe(false);  // 停止观察
  }

  // TBB回调 - 完全集成TBB监控系统
  void on_scheduler_entry(bool is_worker) override {
    if (is_worker) {
      worker_thread_count_.fetch_add(1, std::memory_order_relaxed);

      // 记录线程开始时间
      thread_start_times_.local() = std::chrono::high_resolution_clock::now();
    }
  }

  void on_scheduler_exit(bool is_worker) override {
    if (is_worker) {
      worker_thread_count_.fetch_sub(1, std::memory_order_relaxed);

      // 计算执行时间
      auto start_time = thread_start_times_.local();
      auto end_time = std::chrono::high_resolution_clock::now();
      auto duration = std::chrono::duration_cast<std::chrono::nanoseconds>(
          end_time - start_time);

      total_execution_time_.fetch_add(duration.count(),
                                      std::memory_order_relaxed);
    }
  }

  // 性能指标访问
  size_t GetActiveWorkerCount() const {
    return worker_thread_count_.load(std::memory_order_acquire);
  }

  std::chrono::nanoseconds GetTotalExecutionTime() const {
    return std::chrono::nanoseconds(
        total_execution_time_.load(std::memory_order_acquire));
  }

 private:
  std::atomic<size_t> worker_thread_count_{0};
  std::atomic<uint64_t> total_execution_time_{0};
  tbb::enumerable_thread_specific<
      std::chrono::high_resolution_clock::time_point>
      thread_start_times_;
};

// TBB原生的周期性任务调度器 - 零锁设计
class TbbPeriodicScheduler {
 public:
  struct PeriodicTask {
    std::function<void()> task_function;
    std::atomic<std::chrono::nanoseconds> period;
    std::atomic<std::chrono::steady_clock::time_point> next_execution;
    std::atomic<bool> enabled{true};
    std::atomic<uint64_t> execution_count{0};
    std::string name;

    PeriodicTask(const std::string& task_name,
                 std::chrono::nanoseconds task_period,
                 std::function<void()> func)
        : task_function(std::move(func)),
          period(task_period),
          next_execution(std::chrono::steady_clock::now() + task_period),
          name(task_name) {}
  };

  TbbPeriodicScheduler()
      : scheduler_arena_(1)  // 单线程调度器
        ,
        running_(false) {}

  ~TbbPeriodicScheduler() { Stop(); }

  // 添加周期性任务 - 使用TBB concurrent_vector
  void AddPeriodicTask(const std::string& name, std::chrono::nanoseconds period,
                       std::function<void()> task) {
    auto periodic_task =
        std::make_shared<PeriodicTask>(name, period, std::move(task));
    tasks_.push_back(periodic_task);
  }

  void Start() {
    bool expected = false;
    if (running_.compare_exchange_strong(expected, true,
                                         std::memory_order_acq_rel)) {
      scheduler_task_group_.run([this]() { SchedulerLoop(); });
    }
  }

  void Stop() {
    running_.store(false, std::memory_order_release);
    scheduler_task_group_.wait();
  }

  // 获取任务统计 - 无锁访问
  struct TaskStats {
    std::string name;
    uint64_t execution_count = 0;
    std::chrono::nanoseconds period;
    bool enabled = false;
  };

  std::vector<TaskStats> GetTaskStats() const {
    std::vector<TaskStats> stats;
    stats.reserve(tasks_.size());

    for (const auto& task : tasks_) {
      if (task) {
        TaskStats stat;
        stat.name = task->name;
        stat.execution_count =
            task->execution_count.load(std::memory_order_acquire);
        stat.period = task->period.load(std::memory_order_acquire);
        stat.enabled = task->enabled.load(std::memory_order_acquire);
        stats.push_back(stat);
      }
    }

    return stats;
  }

 private:
  void SchedulerLoop() {
    while (running_.load(std::memory_order_acquire)) {
      auto now = std::chrono::steady_clock::now();

      // 检查所有任务
      for (auto& task : tasks_) {
        if (!task || !task->enabled.load(std::memory_order_acquire)) {
          continue;
        }

        auto next_exec = task->next_execution.load(std::memory_order_acquire);
        if (now >= next_exec) {
          // 执行任务
          scheduler_arena_.execute([task, this]() {
            try {
              task->task_function();
              task->execution_count.fetch_add(1, std::memory_order_relaxed);

              // 计算下次执行时间
              auto period = task->period.load(std::memory_order_acquire);
              auto next_time = std::chrono::steady_clock::now() + period;
              task->next_execution.store(next_time, std::memory_order_release);
            } catch (const std::exception& e) {
              HandleTaskError(task->name, e);
            }
          });
        }
      }

      // 短暂休眠，避免忙等待
      std::this_thread::sleep_for(std::chrono::milliseconds(1));
    }
  }

  void HandleTaskError(const std::string& task_name, const std::exception& e) {
    error_count_.fetch_add(1, std::memory_order_relaxed);
    // 可以添加日志记录或其他错误处理逻辑
  }

  // 使用TBB的并发容器
  tbb::concurrent_vector<std::shared_ptr<PeriodicTask>> tasks_;

  // TBB任务管理
  tbb::task_arena scheduler_arena_;
  tbb::task_group scheduler_task_group_;

  std::atomic<bool> running_{false};
  std::atomic<uint64_t> error_count_{0};
};

// TBB原生的数据流管道
class TbbIndustrialPipeline {
 public:
  // 管道统计信息 - 非原子版本，可安全复制
  struct PipelineStats {
    size_t queue_size;
    double throughput;
    uint64_t total_processed;
    size_t peak_queue_size;
    uint64_t total_input_items;
    uint64_t total_output_items;
    std::chrono::steady_clock::time_point start_time;

    // 默认构造函数
    PipelineStats()
        : queue_size(0),
          throughput(0.0),
          total_processed(0),
          peak_queue_size(0),
          total_input_items(0),
          total_output_items(0),
          start_time(std::chrono::steady_clock::now()) {}

    // 有参构造函数
    PipelineStats(size_t qs, double tp, uint64_t tproc, size_t pqs,
                  uint64_t tin, uint64_t tout,
                  std::chrono::steady_clock::time_point st)
        : queue_size(qs),
          throughput(tp),
          total_processed(tproc),
          peak_queue_size(pqs),
          total_input_items(tin),
          total_output_items(tout),
          start_time(st) {}

    // 显式复制构造函数
    PipelineStats(const PipelineStats& other)
        : queue_size(other.queue_size),
          throughput(other.throughput),
          total_processed(other.total_processed),
          peak_queue_size(other.peak_queue_size),
          total_input_items(other.total_input_items),
          total_output_items(other.total_output_items),
          start_time(other.start_time) {}

    // 显式赋值操作符
    PipelineStats& operator=(const PipelineStats& other) {
      if (this != &other) {
        queue_size = other.queue_size;
        throughput = other.throughput;
        total_processed = other.total_processed;
        peak_queue_size = other.peak_queue_size;
        total_input_items = other.total_input_items;
        total_output_items = other.total_output_items;
        start_time = other.start_time;
      }
      return *this;
    }

    double GetCurrentThroughput() const {
      auto now = std::chrono::steady_clock::now();
      auto duration =
          std::chrono::duration_cast<std::chrono::seconds>(now - start_time);

      if (duration.count() > 0) {
        return static_cast<double>(total_processed) / duration.count();
      }
      return 0.0;
    }
  };

 private:
  // 内部原子统计信息 - 仅用于线程安全的内部操作
  struct AtomicStats {
    std::atomic<size_t> queue_size{0};
    std::atomic<double> throughput{0.0};
    std::atomic<uint64_t> total_processed{0};
    std::atomic<size_t> peak_queue_size{0};
    std::atomic<uint64_t> total_input_items{0};
    std::atomic<uint64_t> total_output_items{0};
    std::atomic<std::chrono::steady_clock::time_point> start_time{
        std::chrono::steady_clock::now()};

    // 重置原子统计（通过单独设置每个成员）
    void Reset() {
      queue_size.store(0, std::memory_order_release);
      throughput.store(0.0, std::memory_order_release);
      total_processed.store(0, std::memory_order_release);
      peak_queue_size.store(0, std::memory_order_release);
      total_input_items.store(0, std::memory_order_release);
      total_output_items.store(0, std::memory_order_release);
      start_time.store(std::chrono::steady_clock::now(),
                       std::memory_order_release);
    }

    // 转换为非原子的 PipelineStats
    PipelineStats ToPipelineStats() const {
      return PipelineStats(queue_size.load(std::memory_order_acquire),
                           throughput.load(std::memory_order_acquire),
                           total_processed.load(std::memory_order_acquire),
                           peak_queue_size.load(std::memory_order_acquire),
                           total_input_items.load(std::memory_order_acquire),
                           total_output_items.load(std::memory_order_acquire),
                           start_time.load(std::memory_order_acquire));
    }
  };

  tbb::flow::graph flow_graph_;
  std::vector<std::shared_ptr<void>> nodes_;

  // 数据队列
  tbb::concurrent_queue<itbb::TbbDataValue> data_queue_;

  // 原子统计信息
  mutable AtomicStats atomic_stats_;

  // 控制标志
  std::atomic<bool> running_{false};
  std::atomic<size_t> queue_capacity_limit_{10000};

  // 性能监控
  tbb::task_group monitoring_task_group_;

 public:
  TbbIndustrialPipeline()
      : flow_graph_(), data_queue_(), atomic_stats_(), running_(false) {}

  ~TbbIndustrialPipeline() { Stop(); }

  // 使用TBB flow_graph构建数据管道
  template <typename DataType>
  void SetupDeviceDataPipeline() {
    using namespace tbb::flow;

    // 重置统计信息
    atomic_stats_.Reset();

    // 数据源节点 - 从队列读取数据
    auto device_source = std::make_shared<input_node<DataType>>(
        flow_graph_, [this](tbb::flow_control& fc) -> DataType {
          DataType data;
          if (!this->PopDataInternal(data)) {
            fc.stop();
            return DataType{};
          }
          // 更新统计信息
          this->atomic_stats_.total_input_items.fetch_add(
              1, std::memory_order_relaxed);
          return data;
        });

    // 数据验证节点 - 并行处理
    auto validator = std::make_shared<function_node<DataType, DataType>>(
        flow_graph_,
        unlimited,  // TBB自动负载均衡
        [this](const DataType& input) -> DataType {
          // 验证数据
          auto validated = this->ValidateData(input);

          //// 更新验证统计
          // if (this->IsValidData(validated)) {
          //   this->atomic_stats_.valid_items.fetch_add(
          //       1, std::memory_order_relaxed);
          // } else {
          //   this->atomic_stats_.invalid_items.fetch_add(
          //       1, std::memory_order_relaxed);
          // }

          return validated;
        });

    // 数据处理节点 - 利用TBB并行算法
    auto processor = std::make_shared<function_node<DataType, DataType>>(
        flow_graph_, unlimited, [this](const DataType& input) -> DataType {
          auto start_time = std::chrono::high_resolution_clock::now();

          auto result = this->ProcessDataWithTBB(input);

          // 更新处理统计
          // auto end_time = std::chrono::high_resolution_clock::now();
          // auto duration =
          // std::chrono::duration_cast<std::chrono::microseconds>(
          //                    end_time - start_time)
          //                    .count();

          this->atomic_stats_.total_processed.fetch_add(
              1, std::memory_order_relaxed);
          // this->atomic_stats_.total_processing_time_us.fetch_add(
          //     duration, std::memory_order_relaxed);

          return result;
        });

    // 数据存储节点
    auto storage =
        std::make_shared<function_node<DataType, tbb::flow::continue_msg>>(
            flow_graph_, unlimited,
            [this](const DataType& data) -> tbb::flow::continue_msg {
              this->StoreData(data);
              this->atomic_stats_.total_output_items.fetch_add(
                  1, std::memory_order_relaxed);
              return tbb::flow::continue_msg{};
            });

    // 报警分支处理
    auto alarm_checker = std::make_shared<function_node<DataType, bool>>(
        flow_graph_, unlimited, [this](const DataType& data) -> bool {
          return this->CheckAlarmConditions(data);
        });

    auto alarm_handler =
        std::make_shared<function_node<bool, tbb::flow::continue_msg>>(
            flow_graph_, unlimited,
            [this](bool alarm_triggered) -> tbb::flow::continue_msg {
              if (alarm_triggered) {
                this->HandleAlarm();
              }
              return tbb::flow::continue_msg{};
            });

    // 构建TBB流图
    make_edge(*device_source, *validator);
    make_edge(*validator, *processor);
    make_edge(*processor, *storage);

    // 分支处理
    make_edge(*validator, *alarm_checker);
    make_edge(*alarm_checker, *alarm_handler);

    // 保存节点引用
    nodes_.clear();
    nodes_.push_back(device_source);
    nodes_.push_back(validator);
    nodes_.push_back(processor);
    nodes_.push_back(storage);
    nodes_.push_back(alarm_checker);
    nodes_.push_back(alarm_handler);

    running_.store(true, std::memory_order_release);

    // 启动数据流
    device_source->activate();

    // 启动性能监控
    StartPerformanceMonitoring();
  }

  // 推送数据到管道
  template <typename DataType>
  void PushData(const DataType& data) {
    if (!running_.load(std::memory_order_acquire)) {
      return;
    }

    // 检查队列容量限制
    size_t current_size = data_queue_.unsafe_size();
    if (current_size >= queue_capacity_limit_.load(std::memory_order_acquire)) {
      return;  // 队列已满，丢弃数据
    }

    // 将数据转换为TbbDataValue并推送到队列
    itbb::TbbDataValue data_value(data);
    data_queue_.push(data_value);

    // 更新统计
    current_size = data_queue_.unsafe_size();
    atomic_stats_.queue_size.store(current_size, std::memory_order_release);
    atomic_stats_.total_input_items.fetch_add(1, std::memory_order_relaxed);

    // 更新峰值队列大小
    size_t current_peak =
        atomic_stats_.peak_queue_size.load(std::memory_order_acquire);
    while (current_size > current_peak &&
           !atomic_stats_.peak_queue_size.compare_exchange_weak(current_peak,
                                                                current_size)) {
    }
  }

  // 推送TbbDataValue
  void PushData(const itbb::TbbDataValue& data) {
    if (!running_.load(std::memory_order_acquire)) {
      return;
    }

    // 检查队列容量限制
    size_t current_size = data_queue_.unsafe_size();
    if (current_size >= queue_capacity_limit_.load(std::memory_order_acquire)) {
      return;  // 队列已满，丢弃数据
    }

    data_queue_.push(data);

    // 更新统计
    current_size = data_queue_.unsafe_size();
    atomic_stats_.queue_size.store(current_size, std::memory_order_release);
    atomic_stats_.total_input_items.fetch_add(1, std::memory_order_relaxed);

    // 更新峰值队列大小
    size_t current_peak =
        atomic_stats_.peak_queue_size.load(std::memory_order_acquire);
    while (current_size > current_peak &&
           !atomic_stats_.peak_queue_size.compare_exchange_weak(current_peak,
                                                                current_size)) {
    }
  }

  // 从管道弹出数据
  itbb::TbbDataValue PopData() {
    itbb::TbbDataValue data;
    if (data_queue_.try_pop(data)) {
      // 更新队列大小
      atomic_stats_.queue_size.store(data_queue_.unsafe_size(),
                                     std::memory_order_release);
      return data;
    }

    // 返回空数据值
    return itbb::TbbDataValue();
  }

  // 获取管道统计信息 - 修复版本
  PipelineStats GetPipelineStats() const {
    // 获取原子统计的快照
    auto stats_snapshot = atomic_stats_.ToPipelineStats();

    // 计算当前吞吐量并更新
    double current_throughput = stats_snapshot.GetCurrentThroughput();
    stats_snapshot.throughput = current_throughput;

    // 同时更新原子变量中的吞吐量
    atomic_stats_.throughput.store(current_throughput,
                                   std::memory_order_release);

    return stats_snapshot;
  }

  // 检查数据是否为空
  bool IsEmpty() const { return data_queue_.empty(); }

  // 获取队列大小
  size_t GetQueueSize() const {
    return atomic_stats_.queue_size.load(std::memory_order_acquire);
  }

  // 等待管道完成
  void WaitForCompletion() { flow_graph_.wait_for_all(); }

  // 停止管道
  void Stop() {
    running_.store(false, std::memory_order_release);

    // 停止性能监控
    StopPerformanceMonitoring();

    // 等待流图完成
    flow_graph_.wait_for_all();

    // 清空队列
    itbb::TbbDataValue dummy;
    while (data_queue_.try_pop(dummy)) {
      // 清空队列
    }
  }

  // 重置管道 - 修复版本
  void Reset() {
    Stop();

    // 重置统计信息
    atomic_stats_.Reset();

    // 清空节点
    nodes_.clear();
  }

  // 动态调整管道参数
  void SetConcurrency(const std::string& node_name, size_t concurrency) {
    auto& config = itbb::GlobalConfigManager::GetInstance();
    config.SetConfig("pipeline." + node_name + ".concurrency",
                     static_cast<double>(concurrency));
  }

  // 设置队列容量限制
  void SetQueueCapacityLimit(size_t limit) {
    queue_capacity_limit_.store(limit, std::memory_order_release);
  }

  // 获取队列容量限制
  size_t GetQueueCapacityLimit() const {
    return queue_capacity_limit_.load(std::memory_order_acquire);
  }

  // 检查管道是否正在运行
  bool IsRunning() const { return running_.load(std::memory_order_acquire); }

 private:
  // 从队列中弹出数据（内部使用）
  template <typename DataType>
  bool PopDataInternal(DataType& data) {
    if (!running_.load(std::memory_order_acquire)) {
      return false;
    }

    itbb::TbbDataValue queue_data;
    if (data_queue_.try_pop(queue_data)) {
      // 尝试将TbbDataValue转换为目标类型
      try {
        bool conversion_success = false;
        queue_data.Visit([&data, &conversion_success](const auto& value) {
          using T = std::decay_t<decltype(value)>;
          if constexpr (std::is_same_v<T, DataType>) {
            data = value;
            conversion_success = true;
          }
        });

        if (conversion_success) {
          // 更新队列大小
          atomic_stats_.queue_size.store(data_queue_.unsafe_size(),
                                         std::memory_order_release);
          return true;
        }
      } catch (const std::exception& e) {
        // 类型转换失败，返回false
        return false;
      }
    }

    return false;
  }

  // 数据验证
  template <typename DataType>
  DataType ValidateData(const DataType& input) {
    // 基本验证逻辑
    return input;
  }

  // 数据处理
  template <typename DataType>
  DataType ProcessDataWithTBB(const DataType& input) {
    // 使用TBB并行算法处理数据
    if constexpr (std::is_same_v<DataType, std::vector<int32_t>>) {
      auto result = input;

      // 使用TBB并行变换
      tbb::parallel_for(tbb::blocked_range<size_t>(0, result.size()),
                        [&result](const tbb::blocked_range<size_t>& range) {
                          for (size_t i = range.begin(); i != range.end();
                               ++i) {
                            // 简单的数据处理示例
                            result[i] = result[i] * 2 + 1;
                          }
                        });

      return result;
    } else {
      return input;
    }
  }

  // 数据存储
  template <typename DataType>
  void StoreData(const DataType& data) {
    // 更新配置系统
    auto& config = itbb::GlobalConfigManager::GetInstance();
    config.SetConfig(
        "pipeline.last_stored_timestamp",
        static_cast<double>(
            std::chrono::duration_cast<std::chrono::milliseconds>(
                std::chrono::steady_clock::now().time_since_epoch())
                .count()));
  }

  // 报警条件检查
  template <typename DataType>
  bool CheckAlarmConditions(const DataType& data) {
    if constexpr (std::is_same_v<DataType, std::vector<int32_t>>) {
      if (!data.empty()) {
        auto max_element = *std::max_element(data.begin(), data.end());
        return max_element > 10000;  // 阈值检查
      }
    }
    return false;
  }

  // 报警处理
  void HandleAlarm() {
    auto& config = itbb::GlobalConfigManager::GetInstance();
    config.SetConfig("pipeline.alarm_count",
                     config.GetConfig("pipeline.alarm_count", 0.0) + 1.0);
    config.SetConfig(
        "pipeline.last_alarm_timestamp",
        static_cast<double>(
            std::chrono::duration_cast<std::chrono::milliseconds>(
                std::chrono::steady_clock::now().time_since_epoch())
                .count()));
  }

  // 启动性能监控
  void StartPerformanceMonitoring() {
    monitoring_task_group_.run([this]() {
      while (running_.load(std::memory_order_acquire)) {
        // 计算并更新吞吐量统计
        auto stats = atomic_stats_.ToPipelineStats();
        double current_throughput = stats.GetCurrentThroughput();
        atomic_stats_.throughput.store(current_throughput,
                                       std::memory_order_release);

        // 更新配置系统
        auto& config = itbb::GlobalConfigManager::GetInstance();
        config.SetConfig("pipeline.throughput", current_throughput);
        config.SetConfig("pipeline.queue_size",
                         static_cast<double>(atomic_stats_.queue_size.load(
                             std::memory_order_acquire)));
        config.SetConfig("pipeline.total_processed",
                         static_cast<double>(atomic_stats_.total_processed.load(
                             std::memory_order_acquire)));

        std::this_thread::sleep_for(std::chrono::seconds(1));
      }
    });
  }

  // 停止性能监控
  void StopPerformanceMonitoring() { monitoring_task_group_.wait(); }
};

// TBB原生的错误恢复系统
class TbbErrorRecoveryManager {
 public:
  struct DeviceErrorState {
    std::atomic<int> consecutive_errors{0};
    std::atomic<std::chrono::steady_clock::time_point> last_error{
        std::chrono::steady_clock::now()};
    std::atomic<std::chrono::milliseconds> backoff_delay{
        std::chrono::milliseconds(100)};
    std::atomic<bool> in_recovery{false};
  };

  TbbErrorRecoveryManager() = default;

  ~TbbErrorRecoveryManager() { recovery_task_group_.wait(); }

  void HandleError(const std::string& device_id, const std::exception& error) {
    // 使用TBB concurrent_hash_map
    DeviceErrorMap::accessor accessor;
    error_states_.insert(accessor, device_id);

    auto& state = accessor->second;
    int current_errors =
        state.consecutive_errors.fetch_add(1, std::memory_order_acq_rel) + 1;
    state.last_error.store(std::chrono::steady_clock::now(),
                           std::memory_order_release);

    // 动态调整退避延时
    auto new_delay =
        std::chrono::milliseconds(100 * (1 << std::min(current_errors, 10)));
    state.backoff_delay.store(new_delay, std::memory_order_release);

    // 启动恢复过程 - 使用TBB任务
    if (current_errors >= 3 &&
        !state.in_recovery.exchange(true, std::memory_order_acq_rel)) {
      recovery_task_group_.run(
          [this, device_id]() { StartRecoveryProcess(device_id); });
    }
  }

  void NotifySuccess(const std::string& device_id) {
    DeviceErrorMap::accessor accessor;
    if (error_states_.find(accessor, device_id)) {
      accessor->second.consecutive_errors.store(0, std::memory_order_release);
      accessor->second.in_recovery.store(false, std::memory_order_release);
    }
  }

 private:
  using DeviceErrorMap =
      tbb::concurrent_hash_map<std::string, DeviceErrorState>;
  DeviceErrorMap error_states_;
  tbb::task_group recovery_task_group_;

  void StartRecoveryProcess(const std::string& device_id) {
    // 恢复逻辑 - 可以使用TBB的并行算法
    try {
      // 恢复步骤
      RecoverDevice(device_id);
      NotifySuccess(device_id);
    } catch (const std::exception& e) {
      // 延时后重试
      DeviceErrorMap::const_accessor accessor;
      if (error_states_.find(accessor, device_id)) {
        auto delay =
            accessor->second.backoff_delay.load(std::memory_order_acquire);
        std::this_thread::sleep_for(delay);

        // 递归重试
        recovery_task_group_.run(
            [this, device_id]() { StartRecoveryProcess(device_id); });
      }
    }
  }

  void RecoverDevice(const std::string& device_id) {
    // 设备恢复逻辑
    // 这里应该实现具体的设备恢复步骤
  }
};

// 主框架类 - 完全TBB化
class IndustrialFramework {
 public:
  IndustrialFramework()
      : task_arena_(tbb::task_arena::automatic),
        global_control_(tbb::global_control::max_allowed_parallelism,
                        tbb::info::default_concurrency()) {}

  ~IndustrialFramework() { Shutdown(); }

  // 初始化 - 完全基于TBB
  void Initialize() {
    // 启动周期性调度器
    periodic_scheduler_.Start();

    // 更新性能指标的周期性任务
    AddPeriodicTask("performance_metrics_update", std::chrono::seconds(1),
                    [this]() { UpdatePerformanceMetrics(); });
  }

  void Shutdown() {
    // 停止周期性调度器
    periodic_scheduler_.Stop();

    // 等待所有任务完成
    main_task_group_.wait();
  }

  // 添加周期性任务 - 直接委托给TBB调度器
  void AddPeriodicTask(const std::string& name,
                       std::chrono::milliseconds period,
                       std::function<void()> task) {
    periodic_scheduler_.AddPeriodicTask(
        name, std::chrono::duration_cast<std::chrono::nanoseconds>(period),
        std::move(task));
  }

  // 提交任务 - 使用TBB任务组
  void SubmitTask(std::function<void()> task) {
    auto wrapper = [this, task_ = std::move(task)]() {
      try {
        task_();
      } catch (const std::exception& e) {
        HandleTaskException(e);
      } catch (...) {
        HandleUnknownException();
      }
    };

    main_task_group_.run(std::move(wrapper));
  }

  // 模板便利接口：自动转换为 std::function
  template <typename Task>
  void SubmitTask(Task&& task) {
    static_assert(std::is_invocable_v<std::decay_t<Task>>,
                  "Task must be invocable with no arguments");

    // 🔧 核心思路：让 std::function 处理所有复杂的类型转换
    SubmitTask(std::function<void()>{std::forward<Task>(task)});
  }

  // 带返回值的任务提交
  template <typename Task>
  auto SubmitTaskWithResult(Task&& task)
      -> std::future<std::invoke_result_t<Task>> {
    using ReturnType = std::invoke_result_t<Task>;

    // 创建 promise/future 对
    auto promise = std::make_shared<std::promise<ReturnType>>();
    auto future = promise->get_future();

    // 提交任务
    main_task_group_.run(
        [task = std::forward<Task>(task), promise]() mutable -> void {
          try {
            if constexpr (std::is_void_v<ReturnType>) {
              task();
              promise->set_value();
            } else {
              auto result = task();
              promise->set_value(std::move(result));
            }
          } catch (...) {
            promise->set_exception(std::current_exception());
          }
        });

    return future;
  }

  // 批量提交任务
  template <typename TaskContainer>
  void SubmitTaskBatch(TaskContainer&& tasks) {
    for (auto&& task : tasks) {
      SubmitTask(std::forward<decltype(task)>(task));
    }
  }

  // 并行执行任务组
  template <typename... Tasks>
  void SubmitTaskGroup(Tasks&&... tasks) {
    (SubmitTask(std::forward<Tasks>(tasks)), ...);
  }

  // 带优先级的任务提交
  enum class TaskPriority { Low, Normal, High, Critical };

  template <typename Task>
  void SubmitTaskWithPriority(Task&& task,
                              TaskPriority priority = TaskPriority::Normal) {
    switch (priority) {
      case TaskPriority::Critical:
      case TaskPriority::High:
        // 高优先级任务立即执行
        task_arena_.execute([this, task = std::forward<Task>(task)]() mutable {
          SubmitTask(std::move(task));
        });
        break;

      case TaskPriority::Normal:
      case TaskPriority::Low:
      default:
        // 普通优先级任务
        SubmitTask(std::forward<Task>(task));
        break;
    }
  }

  // 延迟任务提交
  template <typename Task>
  void SubmitDelayedTask(Task&& task, std::chrono::milliseconds delay) {
    std::thread([this, task = std::forward<Task>(task), delay]() mutable {
      std::this_thread::sleep_for(delay);
      SubmitTask(std::move(task));
    }).detach();
  }

  // 周期性任务提交
  template <typename Task>
  void SubmitPeriodicTask(
      Task&& task, std::chrono::milliseconds period,
      size_t max_iterations = std::numeric_limits<size_t>::max()) {
    auto shared_task =
        std::make_shared<std::decay_t<Task>>(std::forward<Task>(task));
    auto counter = std::make_shared<std::atomic<size_t>>(0);

    std::thread([this, shared_task, period, max_iterations, counter]() {
      while (counter->fetch_add(1) < max_iterations) {
        SubmitTask([shared_task]() { (*shared_task)(); });
        std::this_thread::sleep_for(period);
      }
    }).detach();
  }

  // 条件任务提交
  template <typename Task, typename Condition>
  void SubmitConditionalTask(Task&& task, Condition&& condition) {
    SubmitTask([task = std::forward<Task>(task),
                condition = std::forward<Condition>(condition)]() mutable {
      if (condition()) {
        task();
      }
    });
  }

  // 等待所有任务完成
  void WaitForAllTasks() { main_task_group_.wait(); }

  // 取消所有未完成的任务
  void CancelAllTasks() { main_task_group_.cancel(); }

  // 获取任务组状态
  bool IsTaskGroupActive() const {
    // 这里可能需要额外的状态跟踪
    return true;  // 简化实现
  }

  // 获取性能指标 - 集成TBB监控
  struct PerformanceMetrics {
    size_t active_threads;
    std::chrono::nanoseconds total_execution_time;
    size_t tasks_executed;
    double cpu_utilization;
  };

  PerformanceMetrics GetPerformanceMetrics() const {
    PerformanceMetrics metrics{};
    metrics.active_threads = task_observer_.GetActiveWorkerCount();
    metrics.total_execution_time = task_observer_.GetTotalExecutionTime();

    // 从配置中获取其他指标
    auto& config = GlobalConfigManager::GetInstance();
    metrics.tasks_executed =
        static_cast<size_t>(config.GetConfig("framework.tasks_executed", 0.0));
    metrics.cpu_utilization =
        config.GetConfig("framework.cpu_utilization", 0.0);

    return metrics;
  }

  // 动态配置接口
  void SetMaxThreads(size_t max_threads) {
    // 注意：在现代TBB中，global_control需要重新创建来改变线程数
    auto& config = GlobalConfigManager::GetInstance();
    config.SetConfig("framework.max_threads", static_cast<double>(max_threads));
  }

  void SetSchedulerPrecision(std::chrono::milliseconds precision) {
    auto& config = GlobalConfigManager::GetInstance();
    config.SetConfig("framework.scheduler_precision_ms",
                     static_cast<double>(precision.count()));
  }

  // 获取各个组件的引用
  TbbPeriodicScheduler& GetScheduler() { return periodic_scheduler_; }
  TbbIndustrialPipeline& GetPipeline() { return data_pipeline_; }
  TbbErrorRecoveryManager& GetErrorManager() { return error_manager_; }

 private:
  // TBB组件
  IndustrialTaskObserver task_observer_;
  TbbPeriodicScheduler periodic_scheduler_;
  TbbIndustrialPipeline data_pipeline_;
  TbbErrorRecoveryManager error_manager_;
  tbb::task_group main_task_group_;
  tbb::task_arena task_arena_;
  tbb::global_control global_control_;

  void UpdatePerformanceMetrics() {
    auto& config = GlobalConfigManager::GetInstance();

    // 更新CPU利用率等指标
    auto stats = periodic_scheduler_.GetTaskStats();
    size_t total_executions = 0;
    for (const auto& stat : stats) {
      total_executions += static_cast<size_t>(stat.execution_count);
    }

    config.SetConfig("framework.tasks_executed",
                     static_cast<double>(total_executions));

    // 简化的CPU利用率计算
    double cpu_util = std::min(
        100.0, static_cast<double>(task_observer_.GetActiveWorkerCount()) /
                   tbb::info::default_concurrency() * 100.0);
    config.SetConfig("framework.cpu_utilization", cpu_util);
  }

  void HandleTaskException(const std::exception& e) {
    // 记录异常日志
    std::cerr << "Task exception: " << e.what() << std::endl;
  }

  void HandleUnknownException() {
    // 记录未知异常
    std::cerr << "Unknown task exception" << std::endl;
  }
};
}  // namespace itbb

#endif  // ITBB_CORE_FRAMEWORK_H_
