#include <iostream>
#include <thread>
#include <future>
#include <cmath>
#include <memory>

double divideNumber(double num, double denom)
{
    // print system id of worker thread
    std::cout << "Worker thread id = " << std::this_thread::get_id() << std::endl;

    std::this_thread::sleep_for( std::chrono::milliseconds(500) ); // simulate work

    if (denom == 0)
        throw std::runtime_error("Exception from thread#: Division by zero!");

    return num / denom;
}

int main()
{
    // print system id of worker thread
    std::cout << "Main thread id = " << std::this_thread::get_id() << std::endl;

    // use async to start a task
    double num = 42.0, denom = 2.0;
    std::future<double> ftr = std::async(std::launch::deferred, divideNumber, num, denom);

    // retrieve result within try-catch-block
    try
    {
        double result = ftr.get();
        std::cout << "Result = " << result << std::endl;
    }
    catch(const std::exception& e)
    {
        std::cout << e.what() << std::endl;
    }

    return 0;
}

/* [Output]
 * 
 * Main thread id = 140269526951744
 * Worker thread id = 140269526951744
 * Result = 21
 * 
 * [Explain]
 * Line 27 enforces the synchronous execution of divideByNumber by introducing "std::launch::deferred", 
 * which results in the following output, where the thread ids for main and worker thread are identical.
 * 
 * [Outro]
 * At this point, let us compare std::thread with std::async: Internally, std::async creates a promise, 
 * gets a future from it and runs a template function that takes the promise, calls our function and then
 * either sets the value or the exception of that promise - depending on function behavior. 
 * The code used internally by std::async is more or less identical to the code we used in the previous example, 
 * except that this time it has been generated by the compiler and it is hidden from us - 
 * which means that the code we write appears much cleaner and leaner. 
 * Also, std::async makes it possible to control the amount of concurrency by passing an optional launch parameter, 
 * which enforces either synchronous or asynchronous behavior. 
 * This ability, especially when left to the system, allows us to prevent an overload of threads, 
 * which would eventually slow down the system as threads consume resources for both management and communication. 
 * If we were to use too many threads, the increased resource consumption would outweigh the advantages of parallelism 
 * and slow down the program. By leaving the decision to the system, we can ensure that the number of threads is 
 * chosen in a carefully balanced way that optimizes runtime performance by looking at the current workload of the 
 * system and the multi-core architecture of the system.
 *
 */