/***
* ==++==
*
* Copyright (c) Microsoft Corporation.  All rights reserved.
*
* ==--==
* =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
*
* pplx.h
*
* Parallel Patterns Library
*
* =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
****/

#pragma once

#ifndef _PPLX_H
#define _PPLX_H

//#define _CRTDBG_MAP_ALLOC
#include <stdexcept>
#include <iterator>
#include <functional>
#include <memory>
#include <type_traits>
#include <algorithm>
#include <sal.h>
#include <limits.h>
#include <crtdbg.h>
#include <intrin.h>

#include "pplxdefs.h"

  
#pragma pack(push,_CRT_PACKING)

/// <summary>
///     The <c>pplx</c> namespace provides classes and functions that give you access to the Concurrency Runtime,
///     a concurrent programming framework for C++. For more information, see <see cref="Concurrency Runtime"/>.
/// </summary>
/**/
namespace pplx
{

/// <summary>
///     An elementary abstraction for a task, defined as <c>void (__cdecl * TaskProc)(void *)</c>. A <c>TaskProc</c> is called to
///     invoke the body of a task.
/// </summary>
/**/
typedef void (__cdecl * TaskProc)(void *);

/// <summary>
///     Describes the execution status of a <c>task_group</c> or <c>structured_task_group</c> object.  A value of this type is returned
///     by numerous methods that wait on tasks scheduled to a task group to complete.
/// </summary>
/// <seealso cref="task_group Class"/>
/// <seealso cref="task_group::wait Method"/>
/// <seealso cref="task_group::run_and_wait Method"/>
/// <seealso cref="structured_task_group Class"/>
/// <seealso cref="structured_task_group::wait Method"/>
/// <seealso cref="structured_task_group::run_and_wait Method"/>
/**/
enum task_group_status
{
    /// <summary>
    ///     The tasks queued to the <c>task_group</c> object have not completed.  Note that this value is not presently returned by
    ///     the Concurrency Runtime.
    /// </summary>
    /**/
    not_complete,

    /// <summary>
    ///     The tasks queued to the <c>task_group</c> or <c>structured_task_group</c> object completed successfully.
    /// </summary>
    /**/
    completed,

    /// <summary>
    ///     The <c>task_group</c> or <c>structured_task_group</c> object was canceled.  One or more tasks may not have executed.
    /// </summary>
    /**/
    canceled
};

/// <summary>
///     This class describes an exception thrown by the PPL tasks layer in order to force the current task
///     to cancel. It is also thrown by the <c>get()</c> method on <see cref="task Class">task</see>, for a
///     canceled task.
/// </summary>
/// <seealso cref="task::get Method"/>
/// <seealso cref="cancel_current_task Method"/>
/**/
class task_canceled : public std::exception
{
public:
    /// <summary>
    ///     Constructs a <c>task_canceled</c> object.
    /// </summary>
    /// <param name="_Message">
    ///     A descriptive message of the error.
    /// </param>
    /**/
    explicit task_canceled(_In_z_ const char * _Message) throw()
        : exception(_Message)
    {
    }

    /// <summary>
    ///     Constructs a <c>task_canceled</c> object.
    /// </summary>
    /**/
    task_canceled() throw()
        : exception()
    {
    }
};

/// <summary>
///     This class describes an exception thrown when an invalid operation is performed that is not more accurately
///     described by another exception type thrown by the Concurrency Runtime.
/// </summary>
/// <remarks>
///     The various methods which throw this exception will generally document under what circumstances they will throw it.
/// </remarks>
/**/
class invalid_operation : public std::exception
{
public:
    /// <summary>
    ///     Constructs an <c>invalid_operation</c> object.
    /// </summary>
    /// <param name="_Message">
    ///     A descriptive message of the error.
    /// </param>
    /**/
    invalid_operation(_In_z_ const char * _Message) throw()
        : exception(_Message)
    {
    }

    /// <summary>
    ///     Constructs an <c>invalid_operation</c> object.
    /// </summary>
    /**/
    invalid_operation() throw()
        : exception()
    {
    }
};

/// <summary>
///     A generic RAII wrapper for locks that implement the critical_section interface
/// </summary>
template<class _Lock>
class scoped_lock_impl
{
public:
    explicit scoped_lock_impl(_Lock& _Critical_section) : _M_critical_section(_Critical_section)
    {
        _M_critical_section.lock();
    }

    ~scoped_lock_impl()
    {
        _M_critical_section.unlock();
    }

private:
    _Lock& _M_critical_section;
    scoped_lock_impl(const scoped_lock_impl&);                    // no copy constructor
    scoped_lock_impl const & operator=(const scoped_lock_impl&);  // no assignment operator
};

// Interfaces

/// <summary>
///     Scheduler Interface
/// </summary>
struct __declspec(novtable) scheduler
{
    virtual void schedule( TaskProc, void* ) = 0;
};

/// <summary>
///     Manual reset event Interface
/// </summary>
struct __declspec(novtable) event
{
    virtual void set() = 0;
    virtual void reset() = 0;

    // Returns 0 if the event is signalled. 
    // returns timeout_infinite if the wait timed out
    virtual unsigned int wait(unsigned int timeout) = 0;

    virtual ~event() {}

    static const unsigned int timeout_infinite = 0xFFFFFFFF;
};

/// <summary>
///     critical section Interface
/// </summary>
struct __declspec(novtable) critical_section
{
    typedef scoped_lock_impl<critical_section> scoped_lock;

    virtual void lock() = 0;
    virtual void unlock() = 0;
    virtual ~critical_section() {}
};

/// <summary>
///     reader writer lock Interface
/// </summary>
struct __declspec(novtable) reader_writer_lock
{
    typedef scoped_lock_impl<reader_writer_lock> scoped_lock;

    virtual void lock() = 0;
    virtual void lock_read() = 0;
    virtual void unlock() = 0;

    class scoped_lock_read
    {
    public:
        explicit scoped_lock_read(pplx::reader_writer_lock &_Reader_writer_lock) : _M_reader_writer_lock(_Reader_writer_lock)
        {
            _M_reader_writer_lock.lock_read();
        }

        ~scoped_lock_read()
        {
            _M_reader_writer_lock.unlock();
        }

    private:
        pplx::reader_writer_lock& _M_reader_writer_lock;
        scoped_lock_read(const scoped_lock_read&);                    // no copy constructor
        scoped_lock_read const & operator=(const scoped_lock_read&);  // no assignment operator
    };
};

/// <summary>
///     Factory Interface.
/// </summary>
struct factory
{
    virtual std::unique_ptr<pplx::event> create_event(const pplx::scheduler&) = 0;
    virtual std::unique_ptr<pplx::critical_section> create_critical_section(const pplx::scheduler&) = 0;
    virtual std::unique_ptr<pplx::reader_writer_lock> create_reader_writer_lock(const pplx::scheduler&) = 0;
};

namespace platform
{
    _PPLXIMP long GetCurrentThreadId();
};

namespace details
{
    //
    // An internal exception that is used for cancellation. Users do not "see" this exception except through the
    // resulting stack unwind. This exception should never be intercepted by user code. It is intended
    // for use by the runtime only.
    //
    class _Interruption_exception : public std::exception
    {
    public:
        _Interruption_exception(){}
    };

    class _Spin_wait
    {
    public:

        _Spin_wait()
            : _M_state(_StateInitial)
        {
        }

        _PPLXIMP void _SpinOnce();

    protected:

        enum _SpinState
        {
            _StateInitial,
            _StateSpin,
            _StateYield,
            _StateBlock,
            _StateSingle
        };

        _SpinState _M_state;
    };

    // Non-reentrant spin lock
    class _Spin_lock : public ::pplx::critical_section
    {
    public:

        typedef scoped_lock_impl<_Spin_lock> scoped_lock;

        _Spin_lock()
            : _M_lock(0)
        {
        }

        virtual void lock()
        {
            if ( _InterlockedCompareExchange(&_M_lock, 1, 0) != 0 )
            {
                _Spin_wait spinWait;
                do 
                {
                    spinWait._SpinOnce();
                } while ( _InterlockedCompareExchange(&_M_lock, 1, 0) != 0 );
            }
        }

        virtual void unlock()
        {
            // fence for release semantics
            _InterlockedExchange(&_M_lock, 0);
        }

    private:

        volatile long _M_lock;
    };

    class _SchedulerAncor
    {
        std::unique_ptr<pplx::scheduler> _M_Scheduler;
        std::unique_ptr<pplx::factory> _M_Factory;
        _Spin_lock _M_SpinLock;

        _SchedulerAncor() :
            _M_Scheduler(nullptr), 
            _M_Factory(nullptr)
        {
        }
        
        _PPLXIMP static pplx::scheduler * __cdecl _GetDefaultScheduler();
        _PPLXIMP static pplx::factory * __cdecl _GetDefaultFactory();
           
        _SchedulerAncor(_SchedulerAncor const&); // Don't Implement
        void operator=(_SchedulerAncor const&);  // Don't implement

    public:

        static _SchedulerAncor & _GetInstance()
        {
            static _SchedulerAncor    _Instance;
            return _Instance;
        }

        pplx::scheduler & _GetScheduler()
        {
            if ( _M_Scheduler == nullptr )
            {
                pplx::details::_Spin_lock::scoped_lock _Lock(_M_SpinLock);
                 if (_M_Scheduler == nullptr)
                 {
                    _M_Scheduler.reset(_GetDefaultScheduler());
                 }
            }

            return *_M_Scheduler;
        }

        pplx::factory & _GetFactory()
        {
            if (_M_Factory == nullptr)
            {
                pplx::details::_Spin_lock::scoped_lock _Lock(_M_SpinLock);
                if (_M_Factory == nullptr)
                {
                    _M_Factory.reset(_GetDefaultFactory());
                }
            }

            return *_M_Factory;
        }

        void _Init(std::unique_ptr<pplx::scheduler> _Scheduler, std::unique_ptr<pplx::factory> _Factory)
        {
            pplx::details::_Spin_lock::scoped_lock _Lock(_M_SpinLock);

            if ((_M_Factory != nullptr) || (_M_Scheduler != nullptr))
            {
                throw std::exception("Factory and/or Scheduler is already initialized");
            }

            _M_Factory = std::move(_Factory);
            _M_Scheduler = std::move(_Scheduler);
        }
    };
}

// Init Once - set the ambient scheduler and factory
inline void init_pplx(std::unique_ptr<pplx::scheduler> _Scheduler, std::unique_ptr<pplx::factory> _Factory)
{
    details::_SchedulerAncor::_GetInstance()._Init(std::move(_Scheduler), std::move(_Factory));
}

inline pplx::scheduler& get_ambient_scheduler()
{
    return details::_SchedulerAncor::_GetInstance()._GetScheduler();
}

inline pplx::factory& get_factory()
{
    return details::_SchedulerAncor::_GetInstance()._GetFactory();
}

class eventx : public ::pplx::event
{
public:

    eventx(pplx::scheduler& _Scheduler = get_ambient_scheduler(), pplx::factory& _Factory = get_factory())
        : _M_ev(_Factory.create_event(_Scheduler))
    {
    }

    virtual void set()
    {
        _M_ev->set();
    }

    virtual void reset()
    {
        _M_ev->reset();
    }

    unsigned int wait()
    {
        return wait(event::timeout_infinite);
    }

    virtual unsigned int wait(unsigned int timeout)
    {
        return _M_ev->wait(timeout);
    }

private:
    std::unique_ptr<pplx::event> _M_ev;

    eventx(const eventx&);                  // no copy constructor
    eventx const & operator=(const eventx&); // no assignment operator
};

class critical_sectionx : public ::pplx::critical_section
{
public:

    typedef ::pplx::critical_section::scoped_lock scoped_lock;

    critical_sectionx(pplx::scheduler& _Scheduler = get_ambient_scheduler(), pplx::factory& _Factory = get_factory())
        : _M_cs(_Factory.create_critical_section(_Scheduler))
    {
    }

    virtual void lock()
    {
        _M_cs->lock();
    }

    virtual void unlock()
    {
        _M_cs->unlock();
    }

private:

    std::unique_ptr<pplx::critical_section> _M_cs;

    critical_sectionx(const critical_sectionx&);                  // no copy constructor
    critical_sectionx const & operator=(const critical_sectionx&); // no assignment operator
};

class reader_writer_lockx : public ::pplx::reader_writer_lock
{
public:

    typedef ::pplx::reader_writer_lock::scoped_lock scoped_lock;
    typedef ::pplx::reader_writer_lock::scoped_lock_read scoped_lock_read;

    reader_writer_lockx(pplx::scheduler& _Scheduler = get_ambient_scheduler(), pplx::factory& _Factory = get_factory())
        : _M_lock(_Factory.create_reader_writer_lock(_Scheduler))
    {
    }

    virtual void lock()
    {
        _M_lock->lock();
    }

    virtual void lock_read()
    {
        _M_lock->lock_read();
    }

    virtual void unlock()
    {
        _M_lock->unlock();
    }

private:
    std::unique_ptr<pplx::reader_writer_lock> _M_lock;
};

namespace details
{
    inline size_t _AtomicIncrementSizeT(volatile size_t * _Target)
    {
#if (defined(_M_IX86) || defined(_M_ARM))
        return static_cast<size_t>(_InterlockedIncrement(reinterpret_cast<long volatile *>(_Target)));
#else
        return static_cast<size_t>(_InterlockedIncrement64(reinterpret_cast<__int64 volatile *>(_Target)));
#endif
    }

    inline size_t _AtomicDecrementSizeT(volatile size_t * _Target)
    {
#if (defined(_M_IX86) || defined(_M_ARM))
        return static_cast<size_t>(_InterlockedDecrement(reinterpret_cast<long volatile *>(_Target)));
#else
        return static_cast<size_t>(_InterlockedDecrement64(reinterpret_cast<__int64 volatile *>(_Target)));
#endif
    }

    inline size_t _AtomicCompareExchangeSizeT(volatile size_t * _Target, size_t _Exchange, size_t _Comparand)
    {
#if (defined(_M_IX86) || defined(_M_ARM))
        return static_cast<size_t>(_InterlockedCompareExchange(reinterpret_cast<long volatile *>(_Target), static_cast<long>(_Exchange), static_cast<long>(_Comparand)));
#else
        return static_cast<size_t>(_InterlockedCompareExchange64(reinterpret_cast<__int64 volatile *>(_Target), static_cast<__int64>(_Exchange), static_cast<__int64>(_Comparand)));
#endif
    }

    class _Reentrant_critcal_section : public ::pplx::critical_section
    {
    public:

        typedef ::pplx::critical_section::scoped_lock scoped_lock;

        _Reentrant_critcal_section(pplx::scheduler& _Scheduler = get_ambient_scheduler(), pplx::factory& _Factory = get_factory())
            : _M_cs(_Scheduler, _Factory)
        {
            _M_owner = -1;
            _M_recursionCount = 0;
        }

        ~_Reentrant_critcal_section()
        {
            _PPLX_ASSERT(_M_owner == -1);
            _PPLX_ASSERT(_M_recursionCount == 0);
        }

        virtual void lock()
        {
            auto id = ::pplx::platform::GetCurrentThreadId();

            if ( _M_owner == id )
            {
                _M_recursionCount++;
            }
            else
            {
                _M_cs.lock();
                _M_owner = id;
                _M_recursionCount = 1;
            }            
        }

        virtual void unlock()
        {
            _PPLX_ASSERT(_M_owner == ::pplx::platform::GetCurrentThreadId());
            _PPLX_ASSERT(_M_recursionCount >= 1);

            _M_recursionCount--;

            if ( _M_recursionCount == 0 )
            {
                _M_owner = -1;
                _M_cs.unlock();
            }           
        }

    private:
        pplx::critical_sectionx _M_cs;
        long _M_recursionCount;
        volatile long _M_owner;
    };

    struct _Chore
    {
    protected:
        // Constructors.
        explicit _Chore(TaskProc _PFunction) : m_pFunction(_PFunction)
        {
        }

        _Chore()
        {
        }

        virtual ~_Chore()
        {
        }

    public:

        // The function which invokes the work of the chore.
        TaskProc m_pFunction;
    };

    // Represents possible results of waiting on a task collection.
    enum _TaskCollectionStatus
    {
        _NotComplete,
        _Completed,
        _Canceled
    };

    // _UnrealizedChore represents an unrealized chore -- a unit of work that scheduled in a work
    // stealing capacity. Some higher level construct (language or library) will map atop this to provide
    // an usable abstraction to clients.
    class _UnrealizedChore : public _Chore
    {
    public:
        // Constructor for an unrealized chore.
        _UnrealizedChore()
        {
        }

        // Sets the attachment state of the chore at the time of stealing.
        void _SetDetached(bool _FDetached);

        // Set flag that indicates whether the scheduler owns the lifetime of the object and is responsible for freeing it.
        // The flag is ignored by _StructuredTaskCollection
        void _SetRuntimeOwnsLifetime(bool fValue) 
        { 
            _M_fRuntimeOwnsLifetime = fValue; 
        }

        // Returns the flag that indicates whether the scheduler owns the lifetime of the object and is responsible for freeing it.
        // The flag is ignored by _StructuredTaskCollection
        bool _GetRuntimeOwnsLifetime() const
        {
            return _M_fRuntimeOwnsLifetime;
        }

        // Allocator to be used when runtime owns lifetime.
        template <typename _ChoreType, typename _Function>
        static _ChoreType * _InternalAlloc(const _Function& _Func)
        {
            // This is always invoked from the PPL layer by the user and can never be attached to the default scheduler. Therefore '_concrt_new' is not required here
            _ChoreType * _Chore = new _ChoreType(_Func);
            _Chore->_M_fRuntimeOwnsLifetime = true;
            return _Chore;
        }

    protected:
        // Invocation bridge between the _UnrealizedChore and PPL.
        template <typename _ChoreType>
        static void __cdecl _InvokeBridge(_ChoreType * _PChore)
        {
            (*_PChore)();
        }

    private:

        typedef void (__cdecl * CHOREFUNC)(_UnrealizedChore * _PChore);

        // Indicates whether the scheduler owns the lifetime of the object and is responsible for freeing it.
        // This flag is ignored by _StructuredTaskCollection
        bool _M_fRuntimeOwnsLifetime;

#if 0 // PPLX
        // Cancellation via token to a stolen chore
        static void __cdecl _CancelViaToken(::pplx::details::ContextBase *pContext);
#endif // PPLX
    };

} // namespace details

} // namespace pplx

#pragma pack(pop)

#endif _PPLX_H
