﻿#if !NET40
using System;
using System.Collections.Concurrent;
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Runtime.InteropServices;
using System.Threading;

namespace CuteAnt.Threading
{
  // This class has been taken from https://github.com/dotnet/coreclr/blob/e07292d009d47a4920c19c669796b6893b307ec4/src/mscorlib/src/System/Threading/ThreadPool.cs#L42
  [StructLayout(LayoutKind.Sequential)] // enforce layout so that padding reduces false sharing
  public sealed class ThreadPoolWorkQueue
  {
    internal bool loggingEnabled;
    internal readonly ConcurrentQueue<WorkItem> workItems = new ConcurrentQueue<WorkItem>();

    private PaddingFor32 pad1;

    internal readonly WorkStealingQueueList workStealingQueues = new WorkStealingQueueList();
    private static readonly int processorCount = Environment.ProcessorCount;
    private const int CACHE_LINE_SIZE = 64;
    private const int CompletedState = 1;
    private int isAddingCompleted;

    private PaddingFor32 pad2;

    private volatile int numOutstandingThreadRequests = 0;

    private PaddingFor32 pad3;

    private readonly UnfairSemaphore semaphore = new UnfairSemaphore();

    public ThreadPoolWorkQueueThreadLocals EnsureCurrentThreadHasQueue() =>
        ThreadPoolWorkQueueThreadLocals.threadLocals ??
        (ThreadPoolWorkQueueThreadLocals.threadLocals = new ThreadPoolWorkQueueThreadLocals(this));

    internal void EnsureThreadRequested()
    {
      var count = numOutstandingThreadRequests;
      while (count < processorCount)
      {
        var prev = Interlocked.CompareExchange(ref numOutstandingThreadRequests, count + 1, count);
        if (prev == count)
        {
          semaphore.Release();
          break;
        }

        count = prev;
      }
    }

    internal void WaitForWork()
    {
      semaphore.Wait();
      MarkThreadRequestSatisfied();
    }

    public void Enqueue(WorkItem callback, bool forceGlobal)
    {
      ThreadPoolWorkQueueThreadLocals tl = null;
      if (!forceGlobal)
        tl = ThreadPoolWorkQueueThreadLocals.threadLocals;

      if (null != tl && tl.workQueue == this)
      {
        tl.workStealingQueue.LocalPush(callback);
      }
      else
      {
        workItems.Enqueue(callback);
      }

      EnsureThreadRequested();
    }

    internal void MarkThreadRequestSatisfied()
    {
      //
      // One of our outstanding thread requests has been satisfied.
      // Decrement the count so that future calls to EnsureThreadRequested will succeed.
      //
      int count = numOutstandingThreadRequests;
      while (count > 0)
      {
        int prev = Interlocked.CompareExchange(ref numOutstandingThreadRequests, count - 1, count);
        if (prev == count)
        {
          break;
        }

        count = prev;
      }
    }

    internal bool LocalFindAndPop(WorkItem callback)
    {
      ThreadPoolWorkQueueThreadLocals tl = ThreadPoolWorkQueueThreadLocals.threadLocals;
      return tl != null && tl.workStealingQueue.LocalFindAndPop(callback);
    }

    public bool TryDequeue(ThreadPoolWorkQueueThreadLocals tl, out WorkItem workItem)
    {
      WorkStealingQueue localWsq = tl.workStealingQueue;

      if ((workItem = localWsq.LocalPop()) == null && // first try the local queue
          !workItems.TryDequeue(out workItem)) // then try the global queue
      {
        // finally try to steal from another thread's local queue
        WorkStealingQueue[] queues = tl.workQueue.workStealingQueues.Queues;
        int c = queues.Length;
        Debug.Assert(c > 0, "There must at least be a queue for this thread.");
        int maxIndex = c - 1;
        int i = tl.random.Next(c);
        while (c > 0)
        {
          i = (i < maxIndex) ? i + 1 : 0;
          WorkStealingQueue otherQueue = queues[i];
          if (otherQueue != localWsq && otherQueue.CanSteal)
          {
            var ignoredMissedSteal = false;
            workItem = otherQueue.TrySteal(ref ignoredMissedSteal);
            if (workItem != null)
            {
              break;
            }
          }
          c--;
        }
      }

      return workItem != null;
    }

    internal void CompleteAdding()
    {
      int previousCompleted = Interlocked.Exchange(ref isAddingCompleted, CompletedState);

      if (previousCompleted == CompletedState)
        return;

      // When CompleteAdding() is called, we fill up the _outstandingRequests and the semaphore
      // This will ensure that all threads will unblock and try to execute the remaining item in
      // the queue. When IsAddingCompleted is set, all threads will exit once the queue is empty.

      while (true)
      {
        int count = numOutstandingThreadRequests;
        int countToRelease = UnfairSemaphore.MaxWorker - count;

        int prev = Interlocked.CompareExchange(ref numOutstandingThreadRequests, UnfairSemaphore.MaxWorker, count);

        if (prev == count)
        {
          semaphore.Release((short)countToRelease);
          break;
        }
      }
    }

    internal class WorkStealingQueueList
    {
      private volatile WorkStealingQueue[] _queues = new WorkStealingQueue[0];

      public WorkStealingQueue[] Queues => _queues;

      public void Add(WorkStealingQueue queue)
      {
        Debug.Assert(queue != null);
        while (true)
        {
          WorkStealingQueue[] oldQueues = _queues;
          Debug.Assert(Array.IndexOf(oldQueues, queue) == -1);

          var newQueues = new WorkStealingQueue[oldQueues.Length + 1];
          Array.Copy(oldQueues, 0, newQueues, 0, oldQueues.Length);
          newQueues[newQueues.Length - 1] = queue;
          if (Interlocked.CompareExchange(ref _queues, newQueues, oldQueues) == oldQueues)
          {
            break;
          }
        }
      }

      public void Remove(WorkStealingQueue queue)
      {
        Debug.Assert(queue != null);
        while (true)
        {
          WorkStealingQueue[] oldQueues = _queues;
          if (oldQueues.Length == 0)
          {
            return;
          }

          int pos = Array.IndexOf(oldQueues, queue);
          if (pos == -1)
          {
            Debug.Fail("Should have found the queue");
            return;
          }

          var newQueues = new WorkStealingQueue[oldQueues.Length - 1];
          if (pos == 0)
          {
            Array.Copy(oldQueues, 1, newQueues, 0, newQueues.Length);
          }
          else if (pos == oldQueues.Length - 1)
          {
            Array.Copy(oldQueues, 0, newQueues, 0, newQueues.Length);
          }
          else
          {
            Array.Copy(oldQueues, 0, newQueues, 0, pos);
            Array.Copy(oldQueues, pos + 1, newQueues, pos, newQueues.Length - pos);
          }

          if (Interlocked.CompareExchange(ref _queues, newQueues, oldQueues) == oldQueues)
          {
            break;
          }
        }
      }
    }

    public sealed class WorkStealingQueue
    {
      private const int INITIAL_SIZE = 32;
      internal volatile WorkItem[] m_array = new WorkItem[INITIAL_SIZE];
      private volatile int m_mask = INITIAL_SIZE - 1;

#if DEBUG
      // in debug builds, start at the end so we exercise the index reset logic.
      private const int START_INDEX = int.MaxValue;
#else
      private const int START_INDEX = 0;
#endif

      private volatile int m_headIndex = START_INDEX;
      private volatile int m_tailIndex = START_INDEX;

      private SpinLock m_foreignLock = new SpinLock(enableThreadOwnerTracking: false);

      public void LocalPush(WorkItem obj)
      {
        int tail = m_tailIndex;

        // We're going to increment the tail; if we'll overflow, then we need to reset our counts
        if (tail == int.MaxValue)
        {
          bool lockTaken = false;
          try
          {
            m_foreignLock.Enter(ref lockTaken);

            if (m_tailIndex == int.MaxValue)
            {
              //
              // Rather than resetting to zero, we'll just mask off the bits we don't care about.
              // This way we don't need to rearrange the items already in the queue; they'll be found
              // correctly exactly where they are.  One subtlety here is that we need to make sure that
              // if head is currently < tail, it remains that way.  This happens to just fall out from
              // the bit-masking, because we only do this if tail == int.MaxValue, meaning that all
              // bits are set, so all of the bits we're keeping will also be set.  Thus it's impossible
              // for the head to end up > than the tail, since you can't set any more bits than all of 
              // them.
              //
              m_headIndex = m_headIndex & m_mask;
              m_tailIndex = tail = m_tailIndex & m_mask;
              Debug.Assert(m_headIndex <= m_tailIndex);
            }
          }
          finally
          {
            if (lockTaken)
              m_foreignLock.Exit(useMemoryBarrier: true);
          }
        }

        // When there are at least 2 elements' worth of space, we can take the fast path.
        if (tail < m_headIndex + m_mask)
        {
          Volatile.Write(ref m_array[tail & m_mask], obj);
          m_tailIndex = tail + 1;
        }
        else
        {
          // We need to contend with foreign pops, so we lock.
          bool lockTaken = false;
          try
          {
            m_foreignLock.Enter(ref lockTaken);

            int head = m_headIndex;
            int count = m_tailIndex - m_headIndex;

            // If there is still space (one left), just add the element.
            if (count >= m_mask)
            {
              // We're full; expand the queue by doubling its size.
              var newArray = new WorkItem[m_array.Length << 1];
              for (int i = 0; i < m_array.Length; i++)
                newArray[i] = m_array[(i + head) & m_mask];

              // Reset the field values, incl. the mask.
              m_array = newArray;
              m_headIndex = 0;
              m_tailIndex = tail = count;
              m_mask = (m_mask << 1) | 1;
            }

            Volatile.Write(ref m_array[tail & m_mask], obj);
            m_tailIndex = tail + 1;
          }
          finally
          {
            if (lockTaken)
              m_foreignLock.Exit(useMemoryBarrier: false);
          }
        }
      }

      [SuppressMessage("Microsoft.Concurrency", "CA8001", Justification = "Reviewed for thread safety")]
      public bool LocalFindAndPop(WorkItem obj)
      {
        // Fast path: check the tail. If equal, we can skip the lock.
        if (m_array[(m_tailIndex - 1) & m_mask] == obj)
        {
          WorkItem unused = LocalPop();
          Debug.Assert(unused == null || unused == obj);
          return unused != null;
        }

        // Else, do an O(N) search for the work item. The theory of work stealing and our
        // inlining logic is that most waits will happen on recently queued work.  And
        // since recently queued work will be close to the tail end (which is where we
        // begin our search), we will likely find it quickly.  In the worst case, we
        // will traverse the whole local queue; this is typically not going to be a
        // problem (although degenerate cases are clearly an issue) because local work
        // queues tend to be somewhat shallow in length, and because if we fail to find
        // the work item, we are about to block anyway (which is very expensive).
        for (int i = m_tailIndex - 2; i >= m_headIndex; i--)
        {
          if (m_array[i & m_mask] == obj)
          {
            // If we found the element, block out steals to avoid interference.
            bool lockTaken = false;
            try
            {
              m_foreignLock.Enter(ref lockTaken);

              // If we encountered a race condition, bail.
              if (m_array[i & m_mask] == null)
                return false;

              // Otherwise, null out the element.
              Volatile.Write(ref m_array[i & m_mask], null);

              // And then check to see if we can fix up the indexes (if we're at
              // the edge).  If we can't, we just leave nulls in the array and they'll
              // get filtered out eventually (but may lead to superflous resizing).
              if (i == m_tailIndex)
                m_tailIndex -= 1;
              else if (i == m_headIndex)
                m_headIndex += 1;

              return true;
            }
            finally
            {
              if (lockTaken)
                m_foreignLock.Exit(useMemoryBarrier: false);
            }
          }
        }

        return false;
      }

      public WorkItem LocalPop() => m_headIndex < m_tailIndex ? LocalPopCore() : null;

      [SuppressMessage("Microsoft.Concurrency", "CA8001", Justification = "Reviewed for thread safety")]
      private WorkItem LocalPopCore()
      {
        while (true)
        {
          int tail = m_tailIndex;
          if (m_headIndex >= tail)
          {
            return null;
          }

          // Decrement the tail using a fence to ensure subsequent read doesn't come before.
          tail -= 1;
          Interlocked.Exchange(ref m_tailIndex, tail);

          // If there is no interaction with a take, we can head down the fast path.
          if (m_headIndex <= tail)
          {
            int idx = tail & m_mask;
            WorkItem obj = Volatile.Read(ref m_array[idx]);

            // Check for nulls in the array.
            if (obj == null) continue;

            m_array[idx] = null;
            return obj;
          }
          else
          {
            // Interaction with takes: 0 or 1 elements left.
            bool lockTaken = false;
            try
            {
              m_foreignLock.Enter(ref lockTaken);

              if (m_headIndex <= tail)
              {
                // Element still available. Take it.
                int idx = tail & m_mask;
                WorkItem obj = Volatile.Read(ref m_array[idx]);

                // Check for nulls in the array.
                if (obj == null) continue;

                m_array[idx] = null;
                return obj;
              }
              else
              {
                // If we encountered a race condition and element was stolen, restore the tail.
                m_tailIndex = tail + 1;
                return null;
              }
            }
            finally
            {
              if (lockTaken)
                m_foreignLock.Exit(useMemoryBarrier: false);
            }
          }
        }
      }

      public bool CanSteal => m_headIndex < m_tailIndex;

      public WorkItem TrySteal(ref bool missedSteal)
      {
        while (true)
        {
          if (CanSteal)
          {
            bool taken = false;
            try
            {
              m_foreignLock.TryEnter(ref taken);
              if (taken)
              {
                // Increment head, and ensure read of tail doesn't move before it (fence).
                int head = m_headIndex;
                Interlocked.Exchange(ref m_headIndex, head + 1);

                if (head < m_tailIndex)
                {
                  int idx = head & m_mask;
                  WorkItem obj = Volatile.Read(ref m_array[idx]);

                  // Check for nulls in the array.
                  if (obj == null) continue;

                  m_array[idx] = null;
                  return obj;
                }
                else
                {
                  // Failed, restore head.
                  m_headIndex = head;
                }
              }
            }
            finally
            {
              if (taken)
                m_foreignLock.Exit(useMemoryBarrier: false);
            }

            missedSteal = true;
          }

          return null;
        }
      }
    }

    #region UnfairSemaphore implementation

    // This class has been copied from:
    // https://github.com/akkadotnet/akka.net/blob/45533db309d2c5aea0be59918ecd92f655535462/src/core/Akka/Helios.Concurrency.DedicatedThreadPool.cs#L575
    // which was translated from https://github.com/dotnet/coreclr/blob/97433b9d153843492008652ff6b7c3bf4d9ff31c/src/vm/win32threadpool.h#L124

    // UnfairSemaphore is a more scalable semaphore than Semaphore.  It prefers to release threads that have more recently begun waiting,
    // to preserve locality.  Additionally, very recently-waiting threads can be released without an addition kernel transition to unblock
    // them, which reduces latency.
    //
    // UnfairSemaphore is only appropriate in scenarios where the order of unblocking threads is not important, and where threads frequently
    // need to be woken.

    [StructLayout(LayoutKind.Sequential)]
    private sealed class UnfairSemaphore
    {
      public const int MaxWorker = 0x7FFF;

      private static readonly int ProcessorCount = Environment.ProcessorCount;

      // We track everything we care about in a single 64-bit struct to allow us to 
      // do CompareExchanges on this for atomic updates.
      [StructLayout(LayoutKind.Explicit)]
      private struct SemaphoreState
      {
        //how many threads are currently spin-waiting for this semaphore?
        [FieldOffset(0)]
        public short Spinners;

        //how much of the semaphore's count is available to spinners?
        [FieldOffset(2)]
        public short CountForSpinners;

        //how many threads are blocked in the OS waiting for this semaphore?
        [FieldOffset(4)]
        public short Waiters;

        //how much count is available to waiters?
        [FieldOffset(6)]
        public short CountForWaiters;

        [FieldOffset(0)]
        public long RawData;
      }

      [StructLayout(LayoutKind.Explicit, Size = 64)]
      private struct CacheLinePadding
      { }

      private readonly Semaphore m_semaphore;

      // padding to ensure we get our own cache line
#pragma warning disable 169
      private readonly CacheLinePadding m_padding1;
      private SemaphoreState m_state;
      private readonly CacheLinePadding m_padding2;
#pragma warning restore 169

      public UnfairSemaphore()
      {
        m_semaphore = new Semaphore(0, short.MaxValue);
      }

      public bool Wait()
      {
        return Wait(Timeout.InfiniteTimeSpan);
      }

      public bool Wait(TimeSpan timeout)
      {
        while (true)
        {
          SemaphoreState currentCounts = GetCurrentState();
          SemaphoreState newCounts = currentCounts;

          // First, just try to grab some count.
          if (currentCounts.CountForSpinners > 0)
          {
            --newCounts.CountForSpinners;
            if (TryUpdateState(newCounts, currentCounts))
              return true;
          }
          else
          {
            // No count available, become a spinner
            ++newCounts.Spinners;
            if (TryUpdateState(newCounts, currentCounts))
              break;
          }
        }

        //
        // Now we're a spinner.  
        //
        int numSpins = 0;
        const int spinLimitPerProcessor = 50;
        while (true)
        {
          SemaphoreState currentCounts = GetCurrentState();
          SemaphoreState newCounts = currentCounts;

          if (currentCounts.CountForSpinners > 0)
          {
            --newCounts.CountForSpinners;
            --newCounts.Spinners;
            if (TryUpdateState(newCounts, currentCounts))
              return true;
          }
          else
          {
            double spinnersPerProcessor = (double)currentCounts.Spinners / ProcessorCount;
            int spinLimit = (int)((spinLimitPerProcessor / spinnersPerProcessor) + 0.5);
            if (numSpins >= spinLimit)
            {
              --newCounts.Spinners;
              ++newCounts.Waiters;
              if (TryUpdateState(newCounts, currentCounts))
                break;
            }
            else
            {
              //
              // We yield to other threads using Thread.Sleep(0) rather than the more traditional Thread.Yield().
              // This is because Thread.Yield() does not yield to threads currently scheduled to run on other
              // processors.  On a 4-core machine, for example, this means that Thread.Yield() is only ~25% likely
              // to yield to the correct thread in some scenarios.
              // Thread.Sleep(0) has the disadvantage of not yielding to lower-priority threads.  However, this is ok because
              // once we've called this a few times we'll become a "waiter" and wait on the Semaphore, and that will
              // yield to anything that is runnable.
              //
              Thread.Sleep(0);
              numSpins++;
            }
          }
        }

        //
        // Now we're a waiter
        //
        bool waitSucceeded = m_semaphore.WaitOne(timeout);

        while (true)
        {
          SemaphoreState currentCounts = GetCurrentState();
          SemaphoreState newCounts = currentCounts;

          --newCounts.Waiters;

          if (waitSucceeded)
            --newCounts.CountForWaiters;

          if (TryUpdateState(newCounts, currentCounts))
            return waitSucceeded;
        }
      }

      public void Release()
      {
        Release(1);
      }

      public void Release(short count)
      {
        while (true)
        {
          SemaphoreState currentState = GetCurrentState();
          SemaphoreState newState = currentState;

          short remainingCount = count;

          // First, prefer to release existing spinners,
          // because a) they're hot, and b) we don't need a kernel
          // transition to release them.
          short spinnersToRelease = Math.Max((short)0, Math.Min(remainingCount, (short)(currentState.Spinners - currentState.CountForSpinners)));
          newState.CountForSpinners += spinnersToRelease;
          remainingCount -= spinnersToRelease;

          // Next, prefer to release existing waiters
          short waitersToRelease = Math.Max((short)0, Math.Min(remainingCount, (short)(currentState.Waiters - currentState.CountForWaiters)));
          newState.CountForWaiters += waitersToRelease;
          remainingCount -= waitersToRelease;

          // Finally, release any future spinners that might come our way
          newState.CountForSpinners += remainingCount;

          // Try to commit the transaction
          if (TryUpdateState(newState, currentState))
          {
            // Now we need to release the waiters we promised to release
            if (waitersToRelease > 0)
              m_semaphore.Release(waitersToRelease);

            break;
          }
        }
      }

      private bool TryUpdateState(SemaphoreState newState, SemaphoreState currentState)
      {
        if (Interlocked.CompareExchange(ref m_state.RawData, newState.RawData, currentState.RawData) == currentState.RawData)
        {
          Debug.Assert(newState.CountForSpinners <= MaxWorker, "CountForSpinners is greater than MaxWorker");
          Debug.Assert(newState.CountForSpinners >= 0, "CountForSpinners is lower than zero");
          Debug.Assert(newState.Spinners <= MaxWorker, "Spinners is greater than MaxWorker");
          Debug.Assert(newState.Spinners >= 0, "Spinners is lower than zero");
          Debug.Assert(newState.CountForWaiters <= MaxWorker, "CountForWaiters is greater than MaxWorker");
          Debug.Assert(newState.CountForWaiters >= 0, "CountForWaiters is lower than zero");
          Debug.Assert(newState.Waiters <= MaxWorker, "Waiters is greater than MaxWorker");
          Debug.Assert(newState.Waiters >= 0, "Waiters is lower than zero");
          Debug.Assert(newState.CountForSpinners + newState.CountForWaiters <= MaxWorker, "CountForSpinners + CountForWaiters is greater than MaxWorker");

          return true;
        }

        return false;
      }

      private SemaphoreState GetCurrentState()
      {
        // Volatile.Read of a long can get a partial read in x86 but the invalid
        // state will be detected in TryUpdateState with the CompareExchange.

        SemaphoreState state = new SemaphoreState();
        state.RawData = Volatile.Read(ref m_state.RawData);
        return state;
      }
    }

    #endregion

    [StructLayout(LayoutKind.Explicit, Size = CACHE_LINE_SIZE - sizeof(int))]
    private struct PaddingFor32
    {
    }
  }

  // Simple random number generator. We don't need great randomness, we just need a little and for it to be fast.
  public struct FastRandom // xorshift prng
  {
    private uint _w, _x, _y, _z;

    public FastRandom(int seed)
    {
      _x = (uint)seed;
      _w = 88675123;
      _y = 362436069;
      _z = 521288629;
    }

    public int Next(int maxValue)
    {
      Debug.Assert(maxValue > 0);

      uint t = _x ^ (_x << 11);
      _x = _y; _y = _z; _z = _w;
      _w = _w ^ (_w >> 19) ^ (t ^ (t >> 8));

      return (int)(_w % (uint)maxValue);
    }
  }

  // Holds a WorkStealingQueue, and removes it from the list when this object is no longer referenced.
  public sealed class ThreadPoolWorkQueueThreadLocals
  {
    [ThreadStatic]
    public static ThreadPoolWorkQueueThreadLocals threadLocals;

    public readonly ThreadPoolWorkQueue workQueue;
    public readonly ThreadPoolWorkQueue.WorkStealingQueue workStealingQueue;
    public FastRandom random = new FastRandom(Thread.CurrentThread.ManagedThreadId); // mutable struct, do not copy or make readonly

    public ThreadPoolWorkQueueThreadLocals(ThreadPoolWorkQueue tpq)
    {
      workQueue = tpq;
      workStealingQueue = new ThreadPoolWorkQueue.WorkStealingQueue();
      workQueue.workStealingQueues.Add(workStealingQueue);
    }

    private void CleanUp()
    {
      if (null != workStealingQueue)
      {
        if (null != workQueue)
        {
          WorkItem cb;
          while ((cb = workStealingQueue.LocalPop()) != null)
          {
            Debug.Assert(null != cb);
            workQueue.Enqueue(cb, forceGlobal: true);
          }

          workQueue.workStealingQueues.Remove(workStealingQueue);
        }
      }
    }

    ~ThreadPoolWorkQueueThreadLocals()
    {
      // Since the purpose of calling CleanUp is to transfer any pending workitems into the global
      // queue so that they will be executed by another thread, there's no point in doing this cleanup
      // if we're in the process of shutting down or unloading the AD.  In those cases, the work won't
      // execute anyway.  And there are subtle race conditions involved there that would lead us to do the wrong
      // thing anyway.  So we'll only clean up if this is a "normal" finalization.
      if (!(Environment.HasShutdownStarted || AppDomain.CurrentDomain.IsFinalizingForUnload()))
        CleanUp();
    }
  }
}
#endif
