// Copyright (c) 2024 IndustrialTBB Framework
// Licensed under MIT License

#ifndef ITBB_CORE_TBB_CONCURRENT_CONTAINERS_H_
#define ITBB_CORE_TBB_CONCURRENT_CONTAINERS_H_

#include <oneapi/tbb.h>
#include <oneapi/tbb/concurrent_hash_map.h>
#include <oneapi/tbb/concurrent_priority_queue.h>
#include <oneapi/tbb/concurrent_queue.h>
#include <oneapi/tbb/concurrent_vector.h>

#include <atomic>
#include <chrono>
#include <functional>
#include <memory>

namespace itbb {
// TBB原生的时间序列数据容器
template <typename DataType>
class TbbTimeSeriesContainer {
 public:
  struct TimeSeriesEntry {
    std::chrono::steady_clock::time_point timestamp;
    DataType data;
    std::atomic<bool> is_valid{true};

    TimeSeriesEntry(const DataType& d)
        : timestamp(std::chrono::steady_clock::now()), data(d) {}
  };

  TbbTimeSeriesContainer(size_t max_size = 10000) : max_size_(max_size) {}

  // 添加数据点
  void AddDataPoint(const DataType& data) {
    entries_.push_back(TimeSeriesEntry(data));

    // 如果超过最大大小，标记旧数据为无效
    if (entries_.size() > max_size_) {
      size_t excess = entries_.size() - max_size_;
      for (size_t i = 0; i < excess; ++i) {
        if (i < entries_.size()) {
          entries_[i].is_valid.store(false, std::memory_order_release);
        }
      }
    }
  }

  // 获取指定时间范围内的数据
  std::vector<DataType> GetDataInRange(
      std::chrono::steady_clock::time_point start,
      std::chrono::steady_clock::time_point end) const {
    std::vector<DataType> result;

    // 使用TBB并行算法过滤数据
    tbb::concurrent_vector<DataType> filtered_data;

    tbb::parallel_for_each(
        entries_.begin(), entries_.end(), [&](const TimeSeriesEntry& entry) {
          if (entry.is_valid.load(std::memory_order_acquire) &&
              entry.timestamp >= start && entry.timestamp <= end) {
            filtered_data.push_back(entry.data);
          }
        });

    result.assign(filtered_data.begin(), filtered_data.end());
    return result;
  }

  // 获取最新的N个数据点
  std::vector<DataType> GetLatestData(size_t count) const {
    std::vector<DataType> result;
    result.reserve(count);

    size_t start_index = entries_.size() > count ? entries_.size() - count : 0;

    for (size_t i = start_index; i < entries_.size(); ++i) {
      if (entries_[i].is_valid.load(std::memory_order_acquire)) {
        result.push_back(entries_[i].data);
      }
    }

    return result;
  }

  // 清理无效数据
  void Cleanup() {
    // 创建新的vector，只包含有效数据
    tbb::concurrent_vector<TimeSeriesEntry> new_entries;

    tbb::parallel_for_each(
        entries_.begin(), entries_.end(), [&](const TimeSeriesEntry& entry) {
          if (entry.is_valid.load(std::memory_order_acquire)) {
            new_entries.push_back(entry);
          }
        });

    entries_ = std::move(new_entries);
  }

  size_t Size() const { return entries_.size(); }
  bool Empty() const { return entries_.empty(); }

 private:
  tbb::concurrent_vector<TimeSeriesEntry> entries_;
  size_t max_size_;
};

// TBB原生的LRU缓存
template <typename KeyType, typename ValueType>
class TbbLRUCache {
 public:
  explicit TbbLRUCache(size_t max_size) : max_size_(max_size) {}

  // 获取缓存项
  bool Get(const KeyType& key, ValueType& value) {
    CacheMap::const_accessor accessor;
    if (cache_map_.find(accessor, key)) {
      value = accessor->second.value;

      // 更新访问时间
      accessor->second.last_access.store(std::chrono::steady_clock::now(),
                                         std::memory_order_release);
      accessor->second.access_count.fetch_add(1, std::memory_order_relaxed);

      return true;
    }
    return false;
  }

  // 设置缓存项
  void Set(const KeyType& key, const ValueType& value) {
    CacheMap::accessor accessor;

    if (cache_map_.insert(accessor, key)) {
      // 新插入的项
      accessor->second.value = value;
      accessor->second.last_access.store(std::chrono::steady_clock::now(),
                                         std::memory_order_release);
      accessor->second.access_count.store(1, std::memory_order_release);

      // 检查是否需要清理
      if (cache_map_.size() > max_size_) {
        CleanupOldEntries();
      }
    } else {
      // 更新现有项
      accessor->second.value = value;
      accessor->second.last_access.store(std::chrono::steady_clock::now(),
                                         std::memory_order_release);
      accessor->second.access_count.fetch_add(1, std::memory_order_relaxed);
    }
  }

  // 删除缓存项
  bool Remove(const KeyType& key) { return cache_map_.erase(key); }

  // 清空缓存
  void Clear() { cache_map_.clear(); }

  // 获取缓存统计
  struct CacheStats {
    size_t total_items;
    size_t total_accesses;
    double hit_rate;
    std::chrono::steady_clock::time_point oldest_access;
    std::chrono::steady_clock::time_point newest_access;
  };

  CacheStats GetStats() const {
    CacheStats stats{};
    stats.total_items = cache_map_.size();

    auto oldest = std::chrono::steady_clock::now();
    auto newest = std::chrono::steady_clock::time_point::min();

    for (CacheMap::const_iterator it = cache_map_.begin();
         it != cache_map_.end(); ++it) {
      stats.total_accesses +=
          it->second.access_count.load(std::memory_order_acquire);

      auto access_time = it->second.last_access.load(std::memory_order_acquire);
      if (access_time < oldest) oldest = access_time;
      if (access_time > newest) newest = access_time;
    }

    stats.oldest_access = oldest;
    stats.newest_access = newest;

    // 计算命中率需要额外的统计信息
    stats.hit_rate = 0.0;  // 简化实现

    return stats;
  }

 private:
  struct CacheEntry {
    ValueType value;
    std::atomic<std::chrono::steady_clock::time_point> last_access{
        std::chrono::steady_clock::now()};
    std::atomic<uint64_t> access_count{0};
  };

  using CacheMap = tbb::concurrent_hash_map<KeyType, CacheEntry>;
  CacheMap cache_map_;
  size_t max_size_;

  void CleanupOldEntries() {
    if (cache_map_.size() <= max_size_) return;

    // 收集所有条目的访问时间
    std::vector<std::pair<KeyType, std::chrono::steady_clock::time_point>>
        entries;

    for (CacheMap::const_iterator it = cache_map_.begin();
         it != cache_map_.end(); ++it) {
      entries.emplace_back(
          it->first, it->second.last_access.load(std::memory_order_acquire));
    }

    // 按访问时间排序
    tbb::parallel_sort(
        entries.begin(), entries.end(),
        [](const auto& a, const auto& b) { return a.second < b.second; });

    // 删除最旧的条目
    size_t to_remove = cache_map_.size() - max_size_;
    for (size_t i = 0; i < to_remove && i < entries.size(); ++i) {
      cache_map_.erase(entries[i].first);
    }
  }
};

// TBB原生的优先级队列包装器
template <typename DataType, typename Compare = std::less<DataType>>
class TbbPriorityBuffer {
 public:
  explicit TbbPriorityBuffer(size_t max_size = SIZE_MAX)
      : max_size_(max_size) {}

  // 添加元素
  bool Push(const DataType& item) {
    if (queue_.size() >= max_size_) {
      return false;  // 队列已满
    }

    queue_.push(item);
    return true;
  }

  // 获取并移除最高优先级元素
  bool Pop(DataType& item) { return queue_.try_pop(item); }

  // 批量获取元素
  std::vector<DataType> PopBatch(size_t max_count) {
    std::vector<DataType> result;
    result.reserve(max_count);

    DataType item;
    for (size_t i = 0; i < max_count && queue_.try_pop(item); ++i) {
      result.push_back(std::move(item));
    }

    return result;
  }

  // 获取队列大小
  size_t Size() const { return queue_.size(); }

  bool Empty() const { return queue_.empty(); }

  // 清空队列
  void Clear() {
    DataType item;
    while (queue_.try_pop(item)) {
      // 清空队列
    }
  }

 private:
  tbb::concurrent_priority_queue<DataType, Compare> queue_;
  size_t max_size_;
};

// TBB原生的环形缓冲区
template <typename DataType>
class TbbRingBuffer {
 public:
  explicit TbbRingBuffer(size_t capacity)
      : buffer_(capacity), capacity_(capacity), head_(0), tail_(0), size_(0) {}

  // 添加元素（覆盖旧数据）
  void Push(const DataType& item) {
    size_t current_tail = tail_.load(std::memory_order_acquire);
    buffer_[current_tail] = item;

    size_t next_tail = (current_tail + 1) % capacity_;
    tail_.store(next_tail, std::memory_order_release);

    // 更新大小和头指针
    size_t current_size = size_.load(std::memory_order_acquire);
    if (current_size < capacity_) {
      size_.fetch_add(1, std::memory_order_release);
    } else {
      // 缓冲区已满，移动头指针
      head_.store((head_.load(std::memory_order_acquire) + 1) % capacity_,
                  std::memory_order_release);
    }
  }

  // 获取元素（不移除）
  bool Get(size_t index, DataType& item) const {
    size_t current_size = size_.load(std::memory_order_acquire);
    if (index >= current_size) {
      return false;
    }

    size_t current_head = head_.load(std::memory_order_acquire);
    size_t actual_index = (current_head + index) % capacity_;
    item = buffer_[actual_index];

    return true;
  }

  // 获取最新的元素
  bool GetLatest(DataType& item) const {
    size_t current_size = size_.load(std::memory_order_acquire);
    if (current_size == 0) {
      return false;
    }

    size_t current_tail = tail_.load(std::memory_order_acquire);
    size_t latest_index = (current_tail + capacity_ - 1) % capacity_;
    item = buffer_[latest_index];

    return true;
  }

  // 获取所有数据的副本
  std::vector<DataType> GetAllData() const {
    std::vector<DataType> result;
    size_t current_size = size_.load(std::memory_order_acquire);
    result.reserve(current_size);

    size_t current_head = head_.load(std::memory_order_acquire);
    for (size_t i = 0; i < current_size; ++i) {
      size_t index = (current_head + i) % capacity_;
      result.push_back(buffer_[index]);
    }

    return result;
  }

  size_t Size() const { return size_.load(std::memory_order_acquire); }

  size_t Capacity() const { return capacity_; }

  bool Empty() const { return size_.load(std::memory_order_acquire) == 0; }

  bool Full() const {
    return size_.load(std::memory_order_acquire) == capacity_;
  }

 private:
  tbb::concurrent_vector<DataType> buffer_;
  size_t capacity_;
  std::atomic<size_t> head_;
  std::atomic<size_t> tail_;
  std::atomic<size_t> size_;
};

// TBB原生的多生产者多消费者队列
template <typename DataType>
class TbbMPMCQueue {
 public:
  TbbMPMCQueue() = default;

  // 生产者接口
  void Produce(const DataType& item) {
    queue_.push(item);
    produced_count_.fetch_add(1, std::memory_order_relaxed);
  }

  void ProduceBatch(const std::vector<DataType>& items) {
    for (const auto& item : items) {
      queue_.push(item);
    }
    produced_count_.fetch_add(items.size(), std::memory_order_relaxed);
  }

  // 消费者接口
  bool Consume(DataType& item) {
    bool success = queue_.try_pop(item);
    if (success) {
      consumed_count_.fetch_add(1, std::memory_order_relaxed);
    }
    return success;
  }

  std::vector<DataType> ConsumeBatch(size_t max_count) {
    std::vector<DataType> result;
    result.reserve(max_count);

    DataType item;
    for (size_t i = 0; i < max_count && queue_.try_pop(item); ++i) {
      result.push_back(std::move(item));
    }

    consumed_count_.fetch_add(result.size(), std::memory_order_relaxed);
    return result;
  }

  // 统计信息
  struct QueueStats {
    uint64_t produced_count;
    uint64_t consumed_count;
    uint64_t pending_count;
    double throughput;  // items per second
  };

  QueueStats GetStats() const {
    uint64_t produced = produced_count_.load(std::memory_order_acquire);
    uint64_t consumed = consumed_count_.load(std::memory_order_acquire);

    return {
        produced, consumed, produced - consumed,
        0.0  // 需要时间统计来计算吞吐量
    };
  }

  bool Empty() const { return queue_.empty(); }

 private:
  tbb::concurrent_queue<DataType> queue_;
  std::atomic<uint64_t> produced_count_{0};
  std::atomic<uint64_t> consumed_count_{0};
};
}  // namespace itbb

#endif  // ITBB_CORE_TBB_CONCURRENT_CONTAINERS_H_
