#pragma once
#include <thread>
#include <mutex>
#include <atomic>
#include <condition_variable>
#include <functional>
#include <vector>
#include <queue>



#include <mutex>
#include <condition_variable>
#include <functional>
#include <queue>
#include <thread>


// class fixed_thread_pool {
// public:
// 	explicit fixed_thread_pool(size_t thread_count)
// 		: data_(std::make_shared<data>()) {
// 		for (size_t i = 0; i < thread_count; ++i) {
// 			std::thread([data = data_] {
// 				std::unique_lock<std::mutex> lk(data->mtx_);
// 				for (;;) {
// 					if (!data->tasks_.empty()) {
// 						auto current = std::move(data->tasks_.front());
// 						data->tasks_.pop();
// 						lk.unlock();
// 						current();
// 						lk.lock();
// 					}
// 					else if (data->is_shutdown_) {
// 						break;
// 					}
// 					else {
// 						data->cond_.wait(lk);
// 					}
// 				}
// 				}).detach();
// 		}
// 	}
// 
// 	fixed_thread_pool() = default;
// 	fixed_thread_pool(fixed_thread_pool&&) = default;
// 
// 	~fixed_thread_pool() {
// 		int a;
// 		while ((bool)data_) {
// 			{
// 				std::lock_guard<std::mutex> lk(data_->mtx_);
// 				data_->is_shutdown_ = true;
// 			}
// 			
// 			data_->cond_.notify_all();
// 		}
// 	}
// 
// 	template <class F>
// 	void execute(F&& task) {
// 		{
// 			std::lock_guard<std::mutex> lk(data_->mtx_);
// 			data_->tasks_.emplace(std::forward<F>(task));
// 		}
// 		data_->cond_.notify_one();
// 	}
// 
// private:
// 	struct data {
// 		std::mutex mtx_;
// 		std::condition_variable cond_;
// 		bool is_shutdown_ = false;
// 		std::queue<std::function<void()>> tasks_;
// 	};
// 	std::shared_ptr<data> data_;
// };

class GGiThreadPool
{
public:
	using Task = std::function<void()>;

	explicit GGiThreadPool(int num = std::thread::hardware_concurrency()) : _thread_num(num), _is_running(false)
	{}

	~GGiThreadPool()
	{
		if (_is_running)
			stop();
	}


	void start()
	{
		_is_running = true;

		// start threads
		for (int i = 0; i < _thread_num; i++)
			_threads.emplace_back(std::thread(&GGiThreadPool::work, this));
	}

	void stop()
	{
		while (!_tasks.empty())
		{
			Sleep(100);
		}
		{
			// stop thread pool, should notify all threads to wake
			std::unique_lock<std::mutex> lk(_mtx);
			_is_running = false;
			_cond.notify_all(); // must do this to avoid thread block
		}

		// terminate every thread job
		for (std::thread& t : _threads)
		{
			if (t.joinable())
				t.join();
		}

		
	}

	void appendTask(const Task& task)
	{
		if (_is_running)
		{
			std::unique_lock<std::mutex> lk(_mtx);
			_tasks.push(task);
			_cond.notify_one(); // wake a thread to to the task
		}
	}

private:
	void work()
	{
		// every thread will compete to pick up task from the queue to do the task
		while (_is_running)
		{
			Task task;
			{
				std::unique_lock<std::mutex> lk(_mtx);
				if (!_tasks.empty())
				{
					// if tasks not empty, 
					// must finish the task whether thread pool is running or not
					task = _tasks.front();
					_tasks.pop(); // remove the task
				}
				else if (_is_running && _tasks.empty())
					_cond.wait(lk);
			}

			if (task)
				task();
		}


	}

public:
	GGiThreadPool(const GGiThreadPool&) = delete;
	GGiThreadPool& operator=(const GGiThreadPool& other) = delete;

private:
	std::atomic_bool _is_running;
	std::mutex _mtx;
	std::condition_variable _cond;
	int _thread_num;
	std::vector<std::thread> _threads;
	std::queue<Task> _tasks;
};
 
