/**
 *  多线程：这里是在windows系统下
 *  1、使用 windows Api
 *  2、使用C++11创建
 */

#include <iostream>
#include <windows.h>
#include <stdio.h>
#include <vector>
#include <queue>

#include <thread>
#include <mutex>
#include <atomic>
#include <condition_variable>
#include <functional>
#include <future>


// windows 下API 创建线程的方式
DWORD WINAPI thread_function(LPVOID lpParam) {
    printf("This is a new thread. Current thread ID: %lu\n", GetCurrentThreadId());
    
    return 0;
}

// C++11 创建多线程//////////////////////////////////////////////
// 1、函数指针
void func() {
    std::cout << "Thread created with function pointer.\n" << std::endl;
}

// 3、类的成员函数
class MyClass {
public:
    void memberFunction() {
        std::cout << "MyClass Thread created with member function." << std::endl;
    }
};

// 线程安全
// 1、std::mutex 加锁
std::mutex mtx;
int shared_data = 0;

void increment() {
    for (int i = 0; i < 30; ++i) {
        mtx.lock();
        ++shared_data;
        // 获取当前线程Id;
        printf("lock id:%d -> shared_data:%d\n",std::this_thread::get_id(), shared_data);
        mtx.unlock();
    }
}

void increment1() {
    for (int i = 0; i < 10; ++i) {
        std::lock_guard<std::mutex> lock(mtx);
        ++shared_data;
        // 获取当前线程Id;
        printf("lock_guard id:%d -> shared_data:%d\n",std::this_thread::get_id(), shared_data);
    }
}

// 原子操作
std::atomic<int> atomic_data(0);

void atomic_increment() {
    for (int i = 0; i < 20; ++i) {
        ++atomic_data;
        printf("atomic id:%d -> shared_data:",std::this_thread::get_id());
        std::cout << atomic_data <<std::endl;
    }
}


//condition_variable 模仿生产者消费者
std::condition_variable cv;
bool ready = false;
void worker() {
    std::unique_lock<std::mutex> lock(mtx);
    std::cout << "thread Id:"<< std::this_thread::get_id() << " Worker thread enter." << std::endl;
    cv.wait(lock, [] { return ready; });
    std::cout << "thread Id:"<< std::this_thread::get_id() << " Worker thread is working." << std::endl;
}


// 线程池的用法
class ThreadPool
{
public:
    ThreadPool(size_t numThreads) : m_stop(false)
    {
        for (size_t i = 0; i < numThreads; ++i) 
        {
            // 创建线程
            m_vecWorkers.emplace_back(
                [this, i] {
                while (true) 
                {
                    std::function<void()> task;
                    {
                        std::unique_lock<std::mutex> lock(this->m_queueMutex);

                        // 任务队列不为空
                        this->m_condition.wait(lock, [this] { return this->m_stop || !this->m_queTasks.empty(); });

                        if (this->m_stop && this->m_queTasks.empty())
                        {
                            std::cout << "thread Id:"<< std::this_thread::get_id() << " 线程运行停止并且任务结束" <<std::endl;
                            return;
                        }
                                
                        task = std::move(this->m_queTasks.front());
                        this->m_queTasks.pop();
                    }
                    task();
                }
            }
        );
        }
    }
    
    ~ThreadPool() 
    {
        {
            std::unique_lock<std::mutex> lock(m_queueMutex);
            std::cout << "thread Id:"<< std::this_thread::get_id() << " ThreadPool 析构" << std::endl;
            m_stop = true;
        }
        m_condition.notify_all();
        for (std::thread& worker : m_vecWorkers)
        {
            // std::cout << "thread Id:"<< std::this_thread::get_id() <<  "child thead join" << std::endl;
            worker.join();
        }
    }
    
    template<class F, class... Args>
    auto enqueue(F&& f, Args&&... args) -> std::future<typename std::result_of<F(Args...)>::type> 
    {
            using return_type = typename std::result_of<F(Args...)>::type;
            auto task = std::make_shared< std::packaged_task<return_type()>>(std::bind(std::forward<F>(f), std::forward<Args>(args)...));
            
            std::future<return_type> res = task->get_future();
            {
                std::unique_lock<std::mutex> lock(m_queueMutex);
                if (m_stop)
                {
                    throw std::runtime_error("enqueue on stopped ThreadPool");
                }
                // 队列尾部增加一个函数
                m_queTasks.emplace([task]() { (*task)(); });
            }
            m_condition.notify_one();
            return res;
    }
    
private:
    std::vector<std::thread> m_vecWorkers;
    // std::function 是 C++ 标准库 <functional> 头文件中提供的一个通用的多态函数包装器
    std::queue<std::function<void()>> m_queTasks;
        
    std::mutex m_queueMutex;
    std::condition_variable m_condition;
    bool m_stop;
};

// 使用示例
void exampleTask(int id) {
    std::cout << "thread Id:"<< std::this_thread::get_id() << " Task " << id << " is running." << std::endl;
}


int main() {
    
    if(0)
    {
        // Windows api创建线程
        HANDLE hThread;
        DWORD threadId;
        
         // 打印主线程 ID
        printf("Main thread ID: %lu\n", GetCurrentThreadId());
        // CreateThread();  创建线程 windows 创建线程的api, 返回的是线程指针
        hThread = CreateThread(NULL, 0, thread_function, NULL, 0, &threadId);
        if (hThread == NULL) {
            printf("Thread creation failed.\n");
            return 1;
        }
        else{
            printf("Child thread ID: %lu\n", threadId);
        }
        
        WaitForSingleObject(hThread, INFINITE);
        CloseHandle(hThread);
    } 

    { // C++11 创建线程， 推荐
        // join 模式下，主线程会等待子线程执行完毕后再继续执行。
        // 这种方式可以确保子线程的资源被正确释放，避免出现资源泄漏的问题。
        if(0)
        {
            // 1、函数指针
            std::thread t(func);
            // 一个线程对象还没有被 `join` 或 `detach`，则 `joinable()` 返回 `true`；
            if(t.joinable())
            {
                t.join();
            }
        }

        if(0)
        {
            // 2、Lambda 表达式方式
            std::thread t([]() {
                std::cout << "Thread created with lambda expression." << std::endl;
            });
            t.join();
        }

        if(0)
        {
            // 3、类的成员函数
            MyClass obj;
            std::thread t(&MyClass::memberFunction, &obj);
            t.join();
        }
         
        // detach 模式：独立后台任务
        if(0)
        {
            std::thread t(func);
            t.detach();
            std::cout << "Main thread continues without waiting." << std::endl;
        }

        // 线程安全
        if(0)
        {
            // std::mutex 加锁
            if(0)
            {
                std::thread t1(increment);
                std::thread t2(increment);

                t1.join();
                t2.join();
            }

            {
                std::thread t1(increment1);
                std::thread t2(increment1);

                t1.join();
                t2.join();
            }
            std::cout << "Shared data: " << shared_data << std::endl;
        }

        // 线程安全，原子操作
        if(0)
        {
            // 原子操作保证线程安全 atomic
            std::thread t1(atomic_increment);
            std::thread t2(atomic_increment);
        
            t1.join();
            t2.join();
        
            std::cout << "Atomic data: " << atomic_data << std::endl;
            return 0;
        }

        //condition_variable 模仿生产者消费者
        if(0)
        {
            std::thread t(worker);
            t.detach();
            {
                std::lock_guard<std::mutex> lock(mtx);
                ready = true;
                std::cout << "Main thread Id:"<< std::this_thread::get_id() << " state changed:" << ready << std::endl;
            }
            cv.notify_all();
            //
            std::cout << "condition_variable Main thread working" <<std::endl;
        }

        // 线程池
        {
            // 线程池创建管理四个线程
            ThreadPool pool(4);
            std::vector< std::future<void> > futures;

            for (int i = 0; i < 8; ++i) {
                futures.emplace_back(
                    pool.enqueue(exampleTask, i)
                );
            }

            for (auto& future : futures) {
                future.wait();
            }
        }
    }

    return 0;
}