#include <iostream>
#include <list>
#include <deque>
#include <vector>
#include <thread>
#include <atomic>
#include <mutex>
#include <chrono>
#include "concurrentqueue.h"  // Include ConcurrentQueue library

// 定义小数据结构
struct SmallItem {
  int data[20];
};

// 定义大数据结构
struct LargeItem {
  int data[1024];
};

// 测试参数
const int NUM_THREADS = 30;
const int NUM_ITEMS = 10000;

// 测试函数模板
template<typename T>
void testPerformance(std::atomic_flag& lock, std::list<T>& container, std::vector<T>& data) {
  auto start = std::chrono::high_resolution_clock::now();

  std::vector<std::thread> threads;
  for (int i = 0; i < NUM_THREADS; ++i) {
    threads.push_back(std::thread([&]() {
      for (const auto& item : data) {
        while (lock.test_and_set(std::memory_order_acquire));
        container.push_back(item);
        lock.clear(std::memory_order_release);
      }
      }));
  }

  for (auto& t : threads) {
    t.join();
  }

  auto end = std::chrono::high_resolution_clock::now();
  std::chrono::duration<double> elapsed = end - start;
  std::cout << "Push time: " << elapsed.count() << " seconds\n";

  start = std::chrono::high_resolution_clock::now();
  threads.clear();
  for (int i = 0; i < NUM_THREADS; ++i) {
    threads.push_back(std::thread([&]() {
      for (int j = 0; j < NUM_ITEMS / NUM_THREADS; ++j) {
        while (lock.test_and_set(std::memory_order_acquire));
        if (!container.empty()) {
          container.pop_front();
        }
        lock.clear(std::memory_order_release);
      }
      }));
  }

  for (auto& t : threads) {
    t.join();
  }

  end = std::chrono::high_resolution_clock::now();
  elapsed = end - start;
  std::cout << "Pop time: " << elapsed.count() << " seconds\n";
}

template<typename T>
void testPerformance(std::mutex& mtx, std::list<T>& container, std::vector<T>& data) {
  auto start = std::chrono::high_resolution_clock::now();

  std::vector<std::thread> threads;
  for (int i = 0; i < NUM_THREADS; ++i) {
    threads.push_back(std::thread([&]() {
      for (const auto& item : data) {
        std::lock_guard<std::mutex> lock(mtx);
        container.push_back(item);
      }
      }));
  }

  for (auto& t : threads) {
    t.join();
  }

  auto end = std::chrono::high_resolution_clock::now();
  std::chrono::duration<double> elapsed = end - start;
  std::cout << "Push time: " << elapsed.count() << " seconds\n";

  start = std::chrono::high_resolution_clock::now();
  threads.clear();
  for (int i = 0; i < NUM_THREADS; ++i) {
    threads.push_back(std::thread([&]() {
      for (int j = 0; j < NUM_ITEMS / NUM_THREADS; ++j) {
        std::lock_guard<std::mutex> lock(mtx);
        if (!container.empty()) {
          container.pop_front();
        }
      }
      }));
  }

  for (auto& t : threads) {
    t.join();
  }

  end = std::chrono::high_resolution_clock::now();
  elapsed = end - start;
  std::cout << "Pop time: " << elapsed.count() << " seconds\n";
}

template<typename T>
void testPerformance(moodycamel::ConcurrentQueue<T>& queue, std::vector<T>& data) {
  auto start = std::chrono::high_resolution_clock::now();

  std::vector<std::thread> threads;
  for (int i = 0; i < NUM_THREADS; ++i) {
    threads.push_back(std::thread([&]() {
      for (const auto& item : data) {
        queue.enqueue(item);
      }
      }));
  }

  for (auto& t : threads) {
    t.join();
  }

  auto end = std::chrono::high_resolution_clock::now();
  std::chrono::duration<double> elapsed = end - start;
  std::cout << "Push time: " << elapsed.count() << " seconds\n";

  start = std::chrono::high_resolution_clock::now();
  threads.clear();
  for (int i = 0; i < NUM_THREADS; ++i) {
    threads.push_back(std::thread([&]() {
      T item;
      for (int j = 0; j < NUM_ITEMS / NUM_THREADS; ++j) {
        queue.try_dequeue(item);
      }
      }));
  }

  for (auto& t : threads) {
    t.join();
  }

  end = std::chrono::high_resolution_clock::now();
  elapsed = end - start;
  std::cout << "Pop time: " << elapsed.count() << " seconds\n";
}

int main() {
  std::vector<SmallItem> smallData(NUM_ITEMS);
  std::vector<LargeItem> largeData(NUM_ITEMS);

  std::cout << "Testing with std::atomic_flag and SmallItem\n";
  std::list<SmallItem> smallList;
  std::atomic_flag atomicFlag = ATOMIC_FLAG_INIT;
  testPerformance(atomicFlag, smallList, smallData);

  std::cout << "Testing with std::mutex and SmallItem\n";
  std::list<SmallItem> smallListMutex;
  std::mutex mtx;
  testPerformance(mtx, smallListMutex, smallData);

  std::cout << "Testing with ConcurrentQueue and SmallItem\n";
  moodycamel::ConcurrentQueue<SmallItem> smallQueue;
  testPerformance(smallQueue, smallData);

  std::cout << "Testing with std::atomic_flag and LargeItem\n";
  std::list<LargeItem> largeList;
  testPerformance(atomicFlag, largeList, largeData);

  std::cout << "Testing with std::mutex and LargeItem\n";
  std::list<LargeItem> largeListMutex;
  testPerformance(mtx, largeListMutex, largeData);

  std::cout << "Testing with ConcurrentQueue and LargeItem\n";
  moodycamel::ConcurrentQueue<LargeItem> largeQueue;
  testPerformance(largeQueue, largeData);

  return 0;
}
