using BepuUtilities;
using BepuUtilities.Collections;
using BepuUtilities.Memory;
using System;
using System.Diagnostics;
using System.Runtime.CompilerServices;
using System.Threading;

namespace BepuPhysics
{
    public partial class Solver
    {

        // 这看起来比一系列FOLOOP的预期要复杂一些。
        // 一个简单的实现应该类似于：
        // 1)PRESTEP：所有约束条件下的并行分派,与批次无关。(前置步骤不写入共享数据,因此不需要按批重新调度。)
        // 2)WARMSTART：循环遍历所有约束批,并行分派所有批约束。(瓦姆会启动读取和写入速度,因此必须遵守批处理边界。)
        // 3)求解迭代：遍历迭代、遍历所有约束批次、并行分批调度所有约束。(求解迭代也可读/写。)

        // 这种方法有几个问题：
        // 1)Fork-Join调度不是免费的。无论工作负载如何,预计每个线程在主线程上的开销约为2us。
        // 如果有10个约束批次和10个迭代,则最多有1+10+10*10=111个派单。纯头顶超过五分之一毫秒。
        // 这只是通用调度器无法利用极细粒度的应用程序知识的副产品。
        // 每个分派都必须使线程滚动和调度,然后线程必须确定何时返回阻塞状态
        // 当没有更多的工作不可用时,依此类推。一遍又一遍。

        // 2)不保证forloop提供程序跨多个分派维护forloop索引和底层硬件线程之间的关系。
        // 事实上,我们应该期待相反的情况。工作窃取是线程池避免毫无意义的空闲时间的一个重要特性。
        // 不幸的是,这可能会在求解器迭代中破坏潜在的缓存局部性。对于单个处理器上的较小模拟来说,这只是一件微不足道的事情-
        // 如果单个内核的求解器迭代数据可以放在L2中,那么使用"粘性"调度将有助于多次迭代。对于每核256 KiB的L2,
        // 这将是对每个核心只有大约700个大约束的模拟。(这实际上是相当多的回到BEPUPhysical v1的土地上…在v2中就没那么多了。)

        // 当谈到更大规模的模拟时,粘性调度变得更加重要。考虑L3;8 MiB L3缓存可以容纳超过20000个繁重的约束
        // 求解器迭代数据的价值。这通常在处理器的所有内核之间共享,因此粘性并不总是有用的。然而,请考虑
        // 一种多处理机系统。如果有多个处理器,则有多个L3缓存。限制处理器之间的通信量
        // (以及对系统存储器的潜在远程部分)是重要的,因为这些访问往往比直接L3访问具有更长的等待时间和更低的总带宽。
        // 但是,您不必求助于大型服务器才能看到这样的效果-一些处理器,特别是最近的Ryzen系列,实际上表现得有点像
        // 多个处理器恰好卡在同一块芯片上。如果应用程序需要大量的内核间通信,性能将受到影响。
        // 当然,缓存未命中非常糟糕。

        // 3)缺乏应用程序知识的工作窃取实现往往会使给定的Worker跨非连续区域操作,从而损害局部性并强制缓存未命中。

        // 那我们该怎么做呢？我们有特别保证：
        // 1)我们必须按顺序进行一系列求解器迭代,一遍又一遍地覆盖完全相同的数据。即使是前一步和热启动也涵盖了很多相同的数据。
        // 2)我们可以将调度大小控制在一个帧内。它们将是相同的,一遍又一遍,下一次调度紧跟在最后一次调度之后。
        // 3)我们可以保证单个工作块相当小。(一把微秒。)

        // 因此,实施的解决方案有几个部分：
        // 1)在约束批次边界*一次*调度,细粒度同步,忙碌等待挡路。除非操作系统
        // 重新调度线程(这很有可能,但不是经常发生),工作索引将与相同的底层硬件保持关联。
        // 2)工人开始位置分布在工作块上,使得每个工人连续认领多个块的概率很高。
        // 3)工作人员跟踪他们在迭代中能够声明的最大连续区域。这用于为下一次迭代提供更好的开始猜测。

        // 因此,在很大程度上,相同的内核/处理器在求解过程中往往会处理相同的数据。万岁！

        // 请注意以下几点：
        // 1)我们明确地不关心维护框架之间的工作数据关系。缓存可能会被程序的睡觉丢弃-甚至是其他部分
        // 物理模拟的东西会被驱逐出去。我们主要关注求解器中的调度。
        // 2)请注意,前一步和热启动都不用于修改求解的工作分配-这两个阶段都不与求解迭代负载成正比。

        // 3)核心数据粘性并不能真正为L1/L2缓存提供太多价值。清除全部旧数据并不需要太多-3770K在其L2中只有256KB。
        // 即使我们将每个约束优化为对于最重的约束,每次迭代要求不超过350B
        // (在编写本文时,每次迭代为602B),单个内核的L2最多只能容纳约750个约束。
        // 因此,在理想情况下,如果模拟总共有少于3000个这样的约束,3770K将避免在每次迭代的基础上逐出。
        // 一个3700K、4.5ghz的线程可以在大约2.6毫秒内完成预步进-热启动-8次迭代。换句话说,这是一个相当小的模拟。

        // 粘滞调度只有在处理多处理器系统(或类似ryzen的多处理器系统)和大数据集时才会变得更有用,就像您可能在MMO服务器中发现的那样。
        // 3770K在所有内核之间共享8MB的L3缓存,足以容纳略低于24000的大型约束解决迭代之间的数据价值,这是一个相当大的块。
        // 如果您有四个类似的处理器,那么理想情况下,您可以处理几乎100,000个约束,而不会在迭代期间在每个处理器的L3中遭受重大驱逐。
        // 如果没有粘性调度,内存带宽使用可能会在迭代期间飙升,因为L3会一次又一次地被错过。


        struct WorkBlock
        {
            public int BatchIndex;
            public int TypeBatchIndex;
            /// <summary>
            /// 挡路中第一个捆绑包的索引。
            /// </summary>
            public int StartBundle;
            /// <summary>
            /// 束的外部末端索引。挡路中最后一个捆绑包的索引为END-1。
            /// </summary>
            public int End;
        }

        struct FallbackScatterWorkBlock
        {
            public int Start;
            public int End;
        }

        interface ITypeBatchSolveFilter
        {
            bool AllowFallback { get; }
            bool AllowType(int typeId);
        }

        struct MainSolveFilter : ITypeBatchSolveFilter
        {
            public bool AllowFallback
            {
                [MethodImpl(MethodImplOptions.AggressiveInlining)]
                get
                {
                    return true;
                }
            }

            [MethodImpl(MethodImplOptions.AggressiveInlining)]
            public bool AllowType(int typeId)
            {
                return true;
            }
        }

        private unsafe void BuildWorkBlocks<TTypeBatchFilter>(BufferPool pool, int minimumBlockSizeInBundles, int targetBlocksPerBatch, ref TTypeBatchFilter typeBatchFilter) where TTypeBatchFilter : ITypeBatchSolveFilter
        {
            ref var activeSet = ref ActiveSet;
            context.ConstraintBlocks.Blocks = new QuickList<WorkBlock>(targetBlocksPerBatch * activeSet.Batches.Count, pool);
            pool.Take(activeSet.Batches.Count, out context.BatchBoundaries);
            for (int batchIndex = 0; batchIndex < activeSet.Batches.Count; ++batchIndex)
            {
                ref var typeBatches = ref activeSet.Batches[batchIndex].TypeBatches;
                var bundleCount = 0;
                for (int typeBatchIndex = 0; typeBatchIndex < typeBatches.Count; ++typeBatchIndex)
                {
                    if (typeBatchFilter.AllowType(typeBatches[typeBatchIndex].TypeId))
                    {
                        bundleCount += typeBatches[typeBatchIndex].BundleCount;
                    }
                }

                for (int typeBatchIndex = 0; typeBatchIndex < typeBatches.Count; ++typeBatchIndex)
                {
                    ref var typeBatch = ref typeBatches[typeBatchIndex];
                    if (!typeBatchFilter.AllowType(typeBatch.TypeId))
                    {
                        continue;
                    }
                    var typeBatchSizeFraction = typeBatch.BundleCount / (float)bundleCount;
                    var typeBatchMaximumBlockCount = typeBatch.BundleCount / (float)minimumBlockSizeInBundles;
                    var typeBatchBlockCount = Math.Max(1, (int)Math.Min(typeBatchMaximumBlockCount, targetBlocksPerBatch * typeBatchSizeFraction));
                    int previousEnd = 0;
                    var baseBlockSizeInBundles = typeBatch.BundleCount / typeBatchBlockCount;
                    var remainder = typeBatch.BundleCount - baseBlockSizeInBundles * typeBatchBlockCount;
                    for (int newBlockIndex = 0; newBlockIndex < typeBatchBlockCount; ++newBlockIndex)
                    {
                        ref var block = ref context.ConstraintBlocks.Blocks.Allocate(pool);
                        var blockBundleCount = newBlockIndex < remainder ? baseBlockSizeInBundles + 1 : baseBlockSizeInBundles;
                        block.BatchIndex = batchIndex;
                        block.TypeBatchIndex = typeBatchIndex;
                        block.StartBundle = previousEnd;
                        block.End = previousEnd + blockBundleCount;
                        previousEnd = block.End;
                        Debug.Assert(block.StartBundle >= 0 && block.StartBundle < typeBatch.BundleCount);
                        Debug.Assert(block.End >= block.StartBundle + Math.Min(minimumBlockSizeInBundles, typeBatch.BundleCount) && block.End <= typeBatch.BundleCount);
                    }
                }
                context.BatchBoundaries[batchIndex] = context.ConstraintBlocks.Blocks.Count;
            }
            if (typeBatchFilter.AllowFallback && activeSet.Batches.Count > FallbackBatchThreshold)
            {
                // 有一个后备批,所以我们需要为它创建后备工作块。
                var blockCount = Math.Min(targetBlocksPerBatch, ActiveSet.Fallback.BodyCount);
                context.FallbackBlocks.Blocks = new QuickList<FallbackScatterWorkBlock>(blockCount, pool);
                var baseBodiesPerBlock = activeSet.Fallback.BodyCount / blockCount;
                var remainder = activeSet.Fallback.BodyCount - baseBodiesPerBlock * blockCount;
                int previousEnd = 0;
                for (int i = 0; i < blockCount; ++i)
                {
                    var bodiesInBlock = i < remainder ? baseBodiesPerBlock + 1 : baseBodiesPerBlock;
                    context.FallbackBlocks.Blocks.AllocateUnsafely() = new FallbackScatterWorkBlock { Start = previousEnd, End = previousEnd = previousEnd + bodiesInBlock };
                }
            }
        }


        struct WorkerBounds
        {
            /// <summary>
            /// 已知任何工人要求的块的开始(包括在内)。
            /// </summary>
            public int Min;
            /// <summary>
            /// 已知的任何工人要求的区块的独占末端。
            /// </summary>
            public int Max;

            [MethodImpl(MethodImplOptions.AggressiveInlining)]
            public static void Merge(ref WorkerBounds current, ref WorkerBounds mergeSource)
            {
                if (mergeSource.Min < current.Min)
                    current.Min = mergeSource.Min;
                if (mergeSource.Max > current.Max)
                    current.Max = mergeSource.Max;
            }
            [MethodImpl(MethodImplOptions.AggressiveInlining)]
            public static bool BoundsTouch(ref WorkerBounds a, ref WorkerBounds b)
            {
                // 请注意,触摸是合并的充分理由。他们不一定要真正相交。
                return a.Min - b.Max <= 0 && b.Min - a.Max <= 0;
            }
            [MethodImpl(MethodImplOptions.AggressiveInlining)]
            public static void MergeIfTouching(ref WorkerBounds current, ref WorkerBounds other)
            {
                // 如果重要的话,这里可以更聪明一点。
                if (BoundsTouch(ref current, ref other))
                    Merge(ref current, ref other);

            }
        }
        struct WorkBlocks<T> where T : unmanaged
        {
            public QuickList<T> Blocks;
            public Buffer<int> Claims;

            public void CreateClaims(BufferPool pool)
            {
                pool.TakeAtLeast(Blocks.Count, out Claims);
                Claims.Clear(0, Blocks.Count);
            }
            public void Dispose(BufferPool pool)
            {
                Blocks.Dispose(pool);
                pool.Return(ref Claims);
            }
        }

        // 只是把这些捆绑起来以避免污染这个。智能感知。
        struct MultithreadingParameters
        {
            public float Dt;
            public WorkBlocks<WorkBlock> ConstraintBlocks;
            public Buffer<int> BatchBoundaries;
            public WorkBlocks<FallbackScatterWorkBlock> FallbackBlocks;
            public int WorkerCompletedCount;
            public int WorkerCount;
            public Buffer<FallbackTypeBatchResults> FallbackResults;

            public Buffer<WorkerBounds> WorkerBoundsA;
            public Buffer<WorkerBounds> WorkerBoundsB;

        }
        MultithreadingParameters context;


        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        void MergeWorkerBounds(ref WorkerBounds bounds, ref Buffer<WorkerBounds> allWorkerBounds, int workerIndex)
        {
            for (int i = 0; i < workerIndex; ++i)
            {
                WorkerBounds.MergeIfTouching(ref bounds, ref allWorkerBounds[i]);
            }
            for (int i = workerIndex + 1; i < context.WorkerCount; ++i)
            {
                WorkerBounds.MergeIfTouching(ref bounds, ref allWorkerBounds[i]);
            }
        }
        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        int TraverseForwardUntilBlocked<TStageFunction, TBlock>(ref TStageFunction stageFunction, ref WorkBlocks<TBlock> blocks, int blockIndex, ref WorkerBounds bounds, ref Buffer<WorkerBounds> allWorkerBounds, int workerIndex,
            int batchEnd, int claimedState, int unclaimedState)
            where TStageFunction : IStageFunction
            where TBlock : unmanaged
        {
            // 如果未发出声明,则默认为无效的间隔端点。
            int highestLocallyClaimedIndex = -1;
            while (true)
            {
                if (Interlocked.CompareExchange(ref blocks.Claims[blockIndex], claimedState, unclaimedState) == unclaimedState)
                {
                    highestLocallyClaimedIndex = blockIndex;
                    bounds.Max = blockIndex + 1; // 排他性界限。
                    Debug.Assert(blockIndex < batchEnd);
                    stageFunction.Execute(this, blockIndex);
                    // 递增或退出。
                    if (++blockIndex == batchEnd)
                        break;
                }
                else
                {
                    // 已经认领了。
                    bounds.Max = blockIndex + 1; // 排他性界限。
                    break;
                }
            }
            Debug.Assert(bounds.Max <= batchEnd);
            MergeWorkerBounds(ref bounds, ref allWorkerBounds, workerIndex);
            Debug.Assert(bounds.Max <= batchEnd);
            return highestLocallyClaimedIndex;
        }
        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        int TraverseBackwardUntilBlocked<TStageFunction, TBlock>(ref TStageFunction stageFunction, ref WorkBlocks<TBlock> blocks, int blockIndex, ref WorkerBounds bounds, ref Buffer<WorkerBounds> allWorkerBounds, int workerIndex,
            int batchStart, int claimedState, int unclaimedState)
            where TStageFunction : IStageFunction
            where TBlock : unmanaged
        {
            // 如果未发出声明,则默认为无效的间隔端点。
            int lowestLocallyClaimedIndex = blocks.Blocks.Count;
            while (true)
            {
                if (Interlocked.CompareExchange(ref blocks.Claims[blockIndex], claimedState, unclaimedState) == unclaimedState)
                {
                    lowestLocallyClaimedIndex = blockIndex;
                    bounds.Min = blockIndex;
                    Debug.Assert(blockIndex >= batchStart);
                    stageFunction.Execute(this, blockIndex);
                    // 减少或退出。
                    if (blockIndex == batchStart)
                        break;
                    --blockIndex;
                }
                else
                {
                    // 已经认领了。
                    bounds.Min = blockIndex;
                    break;
                }
            }
            MergeWorkerBounds(ref bounds, ref allWorkerBounds, workerIndex);
            return lowestLocallyClaimedIndex;
        }
        interface IStageFunction
        {
            void Execute(Solver solver, int blockIndex);
        }
        struct PrestepStageFunction : IStageFunction
        {
            public float Dt;
            public float InverseDt;

            [MethodImpl(MethodImplOptions.AggressiveInlining)]
            public void Execute(Solver solver, int blockIndex)
            {
                ref var block = ref solver.context.ConstraintBlocks.Blocks[blockIndex];
                ref var typeBatch = ref solver.ActiveSet.Batches[block.BatchIndex].TypeBatches[block.TypeBatchIndex];
                var typeProcessor = solver.TypeProcessors[typeBatch.TypeId];
                // Prestep动态选取路径,因为它在所有批处理中并行执行。
                // 相比之下,WarmStart/Solve每批必须分派一次,因此我们可以选择入口点的代码路径。
                if (block.BatchIndex < solver.FallbackBatchThreshold)
                    typeProcessor.Prestep(ref typeBatch, solver.bodies, Dt, InverseDt, block.StartBundle, block.End);
                else
                    typeProcessor.JacobiPrestep(ref typeBatch, solver.bodies, ref solver.ActiveSet.Fallback, Dt, InverseDt, block.StartBundle, block.End);
            }
        }
        struct WarmStartStageFunction : IStageFunction
        {
            [MethodImpl(MethodImplOptions.AggressiveInlining)]
            public void Execute(Solver solver, int blockIndex)
            {
                ref var block = ref solver.context.ConstraintBlocks.Blocks[blockIndex];
                ref var typeBatch = ref solver.ActiveSet.Batches[block.BatchIndex].TypeBatches[block.TypeBatchIndex];
                var typeProcessor = solver.TypeProcessors[typeBatch.TypeId];
                typeProcessor.WarmStart(ref typeBatch, ref solver.bodies.ActiveSet.Velocities, block.StartBundle, block.End);

            }
        }
        struct SolveStageFunction : IStageFunction
        {
            [MethodImpl(MethodImplOptions.AggressiveInlining)]
            public void Execute(Solver solver, int blockIndex)
            {
                ref var block = ref solver.context.ConstraintBlocks.Blocks[blockIndex];
                ref var typeBatch = ref solver.ActiveSet.Batches[block.BatchIndex].TypeBatches[block.TypeBatchIndex];
                var typeProcessor = solver.TypeProcessors[typeBatch.TypeId];
                typeProcessor.SolveIteration(ref typeBatch, ref solver.bodies.ActiveSet.Velocities, block.StartBundle, block.End);
            }
        }

        struct WarmStartFallbackStageFunction : IStageFunction
        {
            [MethodImpl(MethodImplOptions.AggressiveInlining)]
            public void Execute(Solver solver, int blockIndex)
            {
                ref var block = ref solver.context.ConstraintBlocks.Blocks[blockIndex];
                ref var typeBatch = ref solver.ActiveSet.Batches[block.BatchIndex].TypeBatches[block.TypeBatchIndex];
                var typeProcessor = solver.TypeProcessors[typeBatch.TypeId];
                typeProcessor.JacobiWarmStart(ref typeBatch, ref solver.bodies.ActiveSet.Velocities, ref solver.context.FallbackResults[block.TypeBatchIndex], block.StartBundle, block.End);

            }
        }
        struct SolveFallbackStageFunction : IStageFunction
        {
            [MethodImpl(MethodImplOptions.AggressiveInlining)]
            public void Execute(Solver solver, int blockIndex)
            {
                ref var block = ref solver.context.ConstraintBlocks.Blocks[blockIndex];
                ref var typeBatch = ref solver.ActiveSet.Batches[block.BatchIndex].TypeBatches[block.TypeBatchIndex];
                var typeProcessor = solver.TypeProcessors[typeBatch.TypeId];
                typeProcessor.JacobiSolveIteration(ref typeBatch, ref solver.bodies.ActiveSet.Velocities, ref solver.context.FallbackResults[block.TypeBatchIndex], block.StartBundle, block.End);
            }
        }

        struct FallbackScatterStageFunction : IStageFunction
        {
            [MethodImpl(MethodImplOptions.AggressiveInlining)]
            public void Execute(Solver solver, int blockIndex)
            {
                ref var block = ref solver.context.FallbackBlocks.Blocks[blockIndex];
                solver.ActiveSet.Fallback.ScatterVelocities(solver.bodies, solver, ref solver.context.FallbackResults, block.Start, block.End);
            }
        }


        // TODO：这种旋转等待很可能不适合像ThreadRipper这样的较新系统。
        /// <summary>
        /// 其行为类似于框架SpinWait,但从不自愿将时间片让给核心外线程。
        /// </summary>
        /// <remarks><para>There are three big reasons for using this over the regular framework SpinWait:</para>
        /// 1)框架SpinWait在诉诸任何形式的时间片投降之前,需要相当长的一段时间依赖于旋转。
        /// 从经验上讲,这对于求解器来说并不理想--如果在几纳秒内没有满足同步条件,那么它往往需要几个微秒的距离。
        /// 这种Spin Wait在转向收益率方面要积极得多。</para>
        /// <para>2)在多次产生之后,框架SpinWait将求助于调用睡眠。
        /// 这将潜在的可调度线程集扩大到非当前核心的本机线程。如果我们允许这种转换,它很可能会驱逐缓存的求解器数据。
        /// (对于非常大的模拟,使用睡眠(0)并不那么令人担忧-每个迭代都可以足够大,以清除所有缓存-
        /// 但在上下文中使用它仍然不会比收益有多大好处。)</para>
        /// 3)经过特别长的等待之后,框架SpinWait求助于睡眠(1)。这对解算器来说是灾难性的-比仅仅干扰高速缓存的数据更糟糕,
        /// 它还可以简单地防止线程被重新调度很长一段时间(可能是一帧的大部分时间！)在默认时钟分辨率下。</para>
        /// 请注意,这并不表示应该更改框架SpinWait,而是说明求解器的要求非常具体且不匹配
        /// 非常好的通用解决方案。</para></备注>
        struct LocalSpinWait
        {
            public int WaitCount;

            // 从经验上看,相当积极的让利会产生最好的结果。这很合理-
            // 单个约束包可能需要数百纳秒才能完成。
            // 这将是一个完整的旋转,可以被一些其他线程使用。在最坏的情况下,我们对系统上的其他应用程序更加友好。
            // 这个线程可能会被重新调度在同一个内核上,所以我们不太可能会失去任何缓存温暖(我们本来就不会失去)。
            public const int YieldThreshold = 3;

            [MethodImpl(MethodImplOptions.AggressiveInlining)]
            public void SpinOnce()
            {
                if (WaitCount >= YieldThreshold)
                {
                    Thread.Yield();
                }
                else
                {
                    // 我们正在牺牲提供等待的较新框架的一个重要特性-标准化旋转(RuntimeThread.OptimalMaxSpinWaitsPerSpinIteration).
                    // 不同的平台可能会以显著不同的速度旋转,因此最大旋转持续时间的单个常量值并不能很好地映射到所有硬件。
                    // 从好的方面来说,我们倾向于关注两种模式-等待时间非常短,等待时间中等。
                    // "短"时间的具体长度并不太重要,只要它相当短即可。
                    Thread.SpinWait(1 << WaitCount);
                    ++WaitCount;
                }

            }
        }


        void InterstageSync(ref int syncStageIndex)
        {
            // 没有更多的工作可以声称,但并不是每个线程都必须完成他们声称的工作。所以我们需要一个专用的同步-在完成它的本地工作后,
            // Worker递增"workerCompleted"计数器,并且该计数器上的旋转达到workerCount*stageIndex。
            ++syncStageIndex;
            var neededCompletionCount = context.WorkerCount * syncStageIndex;
            if (Interlocked.Increment(ref context.WorkerCompletedCount) != neededCompletionCount)
            {
                var spinWait = new LocalSpinWait();
                while (Volatile.Read(ref context.WorkerCompletedCount) < neededCompletionCount)
                {
                    spinWait.SpinOnce();
                }
            }
        }

        private void ExecuteStage<TStageFunction, TBlock>(ref TStageFunction stageFunction, ref WorkBlocks<TBlock> blocks,
            ref Buffer<WorkerBounds> allWorkerBounds, ref Buffer<WorkerBounds> previousWorkerBounds, int workerIndex,
            int batchStart, int batchEnd, ref int workerStart, ref int syncStage,
            int claimedState, int unclaimedState)
            where TStageFunction : IStageFunction
            where TBlock : unmanaged
        {
            // 工人有可能在特定批次中没有任何可用的工作。只有当批处理中的工作进程多于工作块时,才会发生这种情况。
            // 索引超过可用工作块的工作进程将由调度器将其STARTS全部设置为-1。
            // 所有以前的工人都将拥有紧凑的连续指数,根本无法进行工作窃取。
            if (workerStart > -1)
            {
                Debug.Assert(workerStart >= batchStart && workerStart < batchEnd);
                var blockIndex = workerStart;

                ref var bounds = ref allWorkerBounds[workerIndex];

                // 只要假设最低价会被认领就行了。线程可能会被抢占,或者在实际声明之前读取值,但是
                // 这是一个非常小的风险,不会影响长期的正确性。(这只会在一定程度上降低窃取工作的效率,从而降低绩效。)
                bounds.Min = blockIndex;
                Debug.Assert(bounds.Max <= batchEnd);

                // 请注意,初始化保证批处理中有起始索引;不需要测试。
                // 请注意,我们在阶段执行过程中跟踪最大的连续区域。此工作器的批处理开始时间将设置为
                // 最大连续区域的最小时隙,以便后续迭代往往具有更好的初始工作分配,而窃取的工作更少。
                Debug.Assert(batchStart <= blockIndex && batchEnd > blockIndex);
                var highestLocalClaim = TraverseForwardUntilBlocked(ref stageFunction, ref blocks, blockIndex, ref bounds, ref allWorkerBounds, workerIndex, batchEnd, claimedState, unclaimedState);

                Debug.Assert(bounds.Max <= batchEnd);
                // 到目前为止,我们已经在前进方向上到达了毗连区域的尽头。试着走另一条路。
                blockIndex = workerStart - 1;
                // 请注意,不能保证挡路会在批次中-这可能是最左边的工人。
                int lowestLocalClaim;
                if (blockIndex >= batchStart)
                {
                    lowestLocalClaim = TraverseBackwardUntilBlocked(ref stageFunction, ref blocks, blockIndex, ref bounds, ref allWorkerBounds, workerIndex, batchStart, claimedState, unclaimedState);
                }
                else
                {
                    lowestLocalClaim = batchStart;
                }
                Debug.Assert(bounds.Max <= batchEnd);
                // 这实际上是两个包含的界限,所以这是计数-1,但是只要我们是一致的,就可以了。
                // 对于第一个区域,我们需要检查它是否确实是一个有效区域-如果请求被阻止,则可能不是。
                var largestContiguousRegionSize = highestLocalClaim - lowestLocalClaim;
                if (largestContiguousRegionSize >= 0)
                    workerStart = lowestLocalClaim;
                else
                    largestContiguousRegionSize = 0; // 它是无效区域,但以后应按大小拒绝无效区域。设置为零可确保后面的区域必须至少有一个空隙。


                // 所有连续的插槽都已被认领。现在,只需沿着正确的方向遍历到尽头即可。
                while (bounds.Max < batchEnd)
                {
                    // 这些迭代中的每一次都可能发现比我们之前的尝试更大的连续区域。
                    lowestLocalClaim = bounds.Max;
                    highestLocalClaim = TraverseForwardUntilBlocked(ref stageFunction, ref blocks, bounds.Max, ref bounds, ref allWorkerBounds, workerIndex, batchEnd, claimedState, unclaimedState);
                    // 如果索引lowestLocalClaim处的声明被阻止,则HighestLocalClaim将为-1,因此大小将为负值。
                    var regionSize = highestLocalClaim - lowestLocalClaim; // 再说一次,实际计数是-1
                    if (regionSize > largestContiguousRegionSize)
                    {
                        workerStart = lowestLocalClaim;
                        largestContiguousRegionSize = regionSize;
                    }
                    Debug.Assert(bounds.Max <= batchEnd);
                }

                // 向后遍历。
                while (bounds.Min > batchStart)
                {
                    // 注意边界。Min-1;Min包含在内,因此要访问新位置,必须将其推出。
                    // 请注意,上面的条件使用>来处理此问题。
                    highestLocalClaim = bounds.Min - 1;
                    lowestLocalClaim = TraverseBackwardUntilBlocked(ref stageFunction, ref blocks, highestLocalClaim, ref bounds, ref allWorkerBounds, workerIndex, batchStart, claimedState, unclaimedState);
                    // 如果最高LocalClaim处的索赔被阻止,则lowestLocalClaim将为workblock.Count,因此大小将为负值。
                    var regionSize = highestLocalClaim - lowestLocalClaim; // 再说一次,实际计数是-1
                    if (regionSize > largestContiguousRegionSize)
                    {
                        workerStart = lowestLocalClaim;
                        largestContiguousRegionSize = regionSize;
                    }
                    Debug.Assert(bounds.Max <= batchEnd);
                }

                Debug.Assert(bounds.Min == batchStart && bounds.Max == batchEnd);

            }
            // 在同步之前清除前一个边界数组,以便下一阶段具有最新数据。
            // 请注意,这种清除是无条件的-必须清除以前的工人数据,否则垃圾数据可能会进入下一阶段。
            previousWorkerBounds[workerIndex].Min = int.MaxValue;
            previousWorkerBounds[workerIndex].Max = int.MinValue;

            InterstageSync(ref syncStage);
            // 在继续操作之前,交换正在使用的边界缓冲区。
            var tempWorkerBounds = allWorkerBounds;
            allWorkerBounds = previousWorkerBounds;
            previousWorkerBounds = tempWorkerBounds;


        }

        static int GetUniformlyDistributedStart(int workerIndex, int blockCount, int workerCount, int offset)
        {
            if (blockCount <= workerCount)
            {
                // 块太少,无法给每个工人分配一个作业;将作业分配给第一个上下文。WorkBlocks.Count Worker。
                return workerIndex < blockCount ? offset + workerIndex : -1;
            }
            var blocksPerWorker = blockCount / workerCount;
            var remainder = blockCount - blocksPerWorker * workerCount;
            return offset + blocksPerWorker * workerIndex + Math.Min(remainder, workerIndex);
        }

        void SolveWorker(int workerIndex)
        {
            int prestepStart = GetUniformlyDistributedStart(workerIndex, context.ConstraintBlocks.Blocks.Count, context.WorkerCount, 0);
            int fallbackStart = GetUniformlyDistributedStart(workerIndex, context.FallbackBlocks.Blocks.Count, context.WorkerCount, 0);
            Buffer<int> batchStarts;
            ref var activeSet = ref ActiveSet;
            unsafe
            {
                // stackalloc实际上有点慢,因为localsinit行为强制清零。
                // 幸运的是,它在每个线程、每个帧中执行一次。如果有32个批次,它将增加.每帧几纳秒。我们可以接受那笔开销。
                // 这比在堆上预分配更可取-我们可能会写入这些值,并且我们不想无缘无故地冒错误共享的风险。
                // 单个错误共享实例的成本将远远超过清零数组的开销。
                var batchStartsData = stackalloc int[activeSet.Batches.Count];
                batchStarts = new Buffer<int>(batchStartsData, activeSet.Batches.Count);
            }
            for (int batchIndex = 0; batchIndex < activeSet.Batches.Count; ++batchIndex)
            {
                var batchOffset = batchIndex > 0 ? context.BatchBoundaries[batchIndex - 1] : 0;
                var batchCount = context.BatchBoundaries[batchIndex] - batchOffset;
                batchStarts[batchIndex] = GetUniformlyDistributedStart(workerIndex, batchCount, context.WorkerCount, batchOffset);
            }


            int syncStage = 0;
            // 在每次使用两个乒乓球声明缓冲区之后,声明和未声明的状态交换。
            int claimedState = 1;
            int unclaimedState = 0;
            var bounds = context.WorkerBoundsA;
            var boundsBackBuffer = context.WorkerBoundsB;
            // 请注意,每个批次都有不同的开始位置。每个包含不同的约束子集,因此它们需要不同的起始位置。
            // 同样的概念也适用于前一步--前一步一次涵盖所有约束,而不是一批一批地覆盖。
            var prestepStage = new PrestepStageFunction { Dt = context.Dt, InverseDt = 1f / context.Dt };
            Debug.Assert(activeSet.Batches.Count > 0, "Don't dispatch if there are no constraints.");
            // 从技术上讲,这可能会改变前一步的开始,但目前我们重建的每一帧都开始了,所以无论以哪种方式都无关紧要。
            ExecuteStage(ref prestepStage, ref context.ConstraintBlocks, ref bounds, ref boundsBackBuffer, workerIndex, 0, context.ConstraintBlocks.Blocks.Count,
                ref prestepStart, ref syncStage, claimedState, unclaimedState);

            GetSynchronizedBatchCount(out var synchronizedBatchCount, out var fallbackExists);
            claimedState ^= 1;
            unclaimedState ^= 1;
            var warmStartStage = new WarmStartStageFunction();
            for (int batchIndex = 0; batchIndex < synchronizedBatchCount; ++batchIndex)
            {
                var batchOffset = batchIndex > 0 ? context.BatchBoundaries[batchIndex - 1] : 0;
                // 不要使用热启动来猜测求解迭代的工作分配。
                var workerBatchStartCopy = batchStarts[batchIndex];
                ExecuteStage(ref warmStartStage, ref context.ConstraintBlocks, ref bounds, ref boundsBackBuffer, workerIndex, batchOffset, context.BatchBoundaries[batchIndex],
                    ref workerBatchStartCopy, ref syncStage, claimedState, unclaimedState);
            }
            var fallbackScatterStage = new FallbackScatterStageFunction();
            if (fallbackExists)
            {
                var warmStartFallbackStage = new WarmStartFallbackStageFunction();
                var batchStart = FallbackBatchThreshold > 0 ? context.BatchBoundaries[FallbackBatchThreshold - 1] : 0;
                // 不要使用热启动来猜测求解迭代的工作分配。
                var workerBatchStartCopy = batchStarts[FallbackBatchThreshold];
                ExecuteStage(ref warmStartFallbackStage, ref context.ConstraintBlocks, ref bounds, ref boundsBackBuffer, workerIndex, batchStart, context.BatchBoundaries[FallbackBatchThreshold],
                    ref workerBatchStartCopy, ref syncStage, claimedState, unclaimedState);
                ExecuteStage(ref fallbackScatterStage, ref context.FallbackBlocks, ref bounds, ref boundsBackBuffer,
                    workerIndex, 0, context.FallbackBlocks.Blocks.Count, ref fallbackStart, ref syncStage, unclaimedState, claimedState); // 注索赔状态掉期：回退分散索赔没有前置步骤,因此它偏离了一个周期
            }
            claimedState ^= 1;
            unclaimedState ^= 1;

            var solveStage = new SolveStageFunction();
            var solveFallbackStage = new SolveFallbackStageFunction();
            for (int iterationIndex = 0; iterationIndex < iterationCount; ++iterationIndex)
            {
                for (int batchIndex = 0; batchIndex < synchronizedBatchCount; ++batchIndex)
                {
                    var batchOffset = batchIndex > 0 ? context.BatchBoundaries[batchIndex - 1] : 0;
                    ExecuteStage(ref solveStage, ref context.ConstraintBlocks, ref bounds, ref boundsBackBuffer, workerIndex, batchOffset, context.BatchBoundaries[batchIndex],
                        ref batchStarts[batchIndex], ref syncStage, claimedState, unclaimedState);
                }
                if (fallbackExists)
                {
                    var batchOffset = FallbackBatchThreshold > 0 ? context.BatchBoundaries[FallbackBatchThreshold - 1] : 0;
                    ExecuteStage(ref solveFallbackStage, ref context.ConstraintBlocks, ref bounds, ref boundsBackBuffer, workerIndex, batchOffset, context.BatchBoundaries[FallbackBatchThreshold],
                        ref batchStarts[FallbackBatchThreshold], ref syncStage, claimedState, unclaimedState);
                    ExecuteStage(ref fallbackScatterStage, ref context.FallbackBlocks, ref bounds, ref boundsBackBuffer,
                        workerIndex, 0, context.FallbackBlocks.Blocks.Count, ref fallbackStart, ref syncStage, unclaimedState, claimedState); // 注索赔状态掉期：回退分散索赔没有前置步骤,因此它偏离了一个周期
                }
                claimedState ^= 1;
                unclaimedState ^= 1;
            }
        }

        [Conditional("DEBUG")]
        void ValidateWorkBlocks<TTypeBatchSolveFilter>(ref TTypeBatchSolveFilter filter) where TTypeBatchSolveFilter : ITypeBatchSolveFilter
        {
            ref var activeSet = ref ActiveSet;
            int[][][] batches = new int[activeSet.Batches.Count][][];
            for (int i = 0; i < activeSet.Batches.Count; ++i)
            {
                var typeBatches = batches[i] = new int[activeSet.Batches[i].TypeBatches.Count][];
                for (int j = 0; j < typeBatches.Length; ++j)
                {
                    typeBatches[j] = new int[activeSet.Batches[i].TypeBatches[j].BundleCount];
                }
            }

            for (int blockIndex = 0; blockIndex < context.ConstraintBlocks.Blocks.Count; ++blockIndex)
            {
                ref var block = ref context.ConstraintBlocks.Blocks[blockIndex];
                for (int bundleIndex = block.StartBundle; bundleIndex < block.End; ++bundleIndex)
                {
                    ref var visitedCount = ref batches[block.BatchIndex][block.TypeBatchIndex][bundleIndex];
                    ++visitedCount;
                    Debug.Assert(visitedCount == 1);
                }
            }

            for (int batchIndex = 0; batchIndex < batches.Length; ++batchIndex)
            {
                for (int typeBatchIndex = 0; typeBatchIndex < batches[batchIndex].Length; ++typeBatchIndex)
                {
                    ref var typeBatch = ref ActiveSet.Batches[batchIndex].TypeBatches[typeBatchIndex];
                    if (filter.AllowType(typeBatch.TypeId))
                    {
                        for (int constraintIndex = 0; constraintIndex < batches[batchIndex][typeBatchIndex].Length; ++constraintIndex)
                        {
                            Debug.Assert(batches[batchIndex][typeBatchIndex][constraintIndex] == 1);
                        }
                    }
                }
            }

        }

        void ExecuteMultithreaded<TTypeBatchSolveFilter>(float dt, IThreadDispatcher threadDispatcher, Action<int> workDelegate) where TTypeBatchSolveFilter : struct, ITypeBatchSolveFilter
        {
            var filter = default(TTypeBatchSolveFilter);
            var workerCount = context.WorkerCount = threadDispatcher.ThreadCount;
            context.WorkerCompletedCount = 0;
            context.Dt = dt;
            // 首先构建一组工作块。
            // 挡路的大小应该相对较小,才能让偷工者有事可做,但我们不想为区块数量而疯狂。
            // 这些值是通过经验调整找到的。最佳值可能因体系结构而异。
            // 这里的目标是刚好有足够的块,以便在我们最终出现一些功能不足的线程(由于竞争或超线程)的情况下,
            // 有足够的块,工作窃取通常仍然允许额外的线程有用。
            const int targetBlocksPerBatchPerWorker = 16;
            const int minimumBlockSizeInBundles = 3;

            var targetBlocksPerBatch = workerCount * targetBlocksPerBatchPerWorker;
            BuildWorkBlocks(pool, minimumBlockSizeInBundles, targetBlocksPerBatch, ref filter);
            ValidateWorkBlocks(ref filter);

            // 请注意这一点：挡路声明必须初始化为0,以便第一个工作阶段知道数据可用于声明。
            context.ConstraintBlocks.CreateClaims(pool);
            if (filter.AllowFallback && ActiveSet.Batches.Count > FallbackBatchThreshold)
            {
                Debug.Assert(context.FallbackBlocks.Blocks.Count > 0);
                FallbackBatch.AllocateResults(this, pool, ref ActiveSet.Batches[FallbackBatchThreshold], out context.FallbackResults);
                context.FallbackBlocks.CreateClaims(pool);
            }
            pool.Take(workerCount, out context.WorkerBoundsA);
            pool.Take(workerCount, out context.WorkerBoundsB);
            // 应该初始化Worker边界前台缓冲区,以避免垃圾桶间隔数据扰乱窃取工作。
            // 在进入下一阶段之前,工作器将清除工作器边界返回缓冲区。
            for (int i = 0; i < workerCount; ++i)
            {
                context.WorkerBoundsA[i] = new WorkerBounds { Min = int.MaxValue, Max = int.MinValue };
            }

            // 虽然我们可以在这种情况下更积极地进行扑杀工作,但这并不重要。必须这样做才能保证正确;工人依赖它。
            if (ActiveSet.Batches.Count > 0)
                threadDispatcher.DispatchWorkers(workDelegate);

            context.ConstraintBlocks.Dispose(pool);
            if (filter.AllowFallback && ActiveSet.Batches.Count > FallbackBatchThreshold)
            {
                FallbackBatch.DisposeResults(this, pool, ref ActiveSet.Batches[FallbackBatchThreshold], ref context.FallbackResults);
                context.FallbackBlocks.Dispose(pool);
            }
            pool.Return(ref context.BatchBoundaries);
            pool.Return(ref context.WorkerBoundsA);
            pool.Return(ref context.WorkerBoundsB);
        }


        public void Solve(float dt, IThreadDispatcher threadDispatcher = null)
        {
            if (threadDispatcher == null)
            {
                var inverseDt = 1f / dt;
                ref var activeSet = ref ActiveSet;
                GetSynchronizedBatchCount(out var synchronizedBatchCount, out var fallbackExists);
                for (int i = 0; i < synchronizedBatchCount; ++i)
                {
                    ref var batch = ref activeSet.Batches[i];
                    for (int j = 0; j < batch.TypeBatches.Count; ++j)
                    {
                        ref var typeBatch = ref batch.TypeBatches[j];
                        TypeProcessors[typeBatch.TypeId].Prestep(ref typeBatch, bodies, dt, inverseDt, 0, typeBatch.BundleCount);
                    }
                }
                if (fallbackExists)
                {
                    ref var batch = ref activeSet.Batches[FallbackBatchThreshold];
                    for (int j = 0; j < batch.TypeBatches.Count; ++j)
                    {
                        ref var typeBatch = ref batch.TypeBatches[j];
                        TypeProcessors[typeBatch.TypeId].JacobiPrestep(ref typeBatch, bodies, ref activeSet.Fallback, dt, inverseDt, 0, typeBatch.BundleCount);
                    }
                }
                // TODO：可能需要考虑在前一步之后立即执行热启动。多线程不能做到这一点,因此可能会引入一些逐位差异。
                // 从好的方面来说,它将利用缓存的数据。
                for (int i = 0; i < synchronizedBatchCount; ++i)
                {
                    ref var batch = ref activeSet.Batches[i];
                    for (int j = 0; j < batch.TypeBatches.Count; ++j)
                    {
                        ref var typeBatch = ref batch.TypeBatches[j];
                        TypeProcessors[typeBatch.TypeId].WarmStart(ref typeBatch, ref bodies.ActiveSet.Velocities, 0, typeBatch.BundleCount);
                    }
                }
                Buffer<FallbackTypeBatchResults> fallbackResults = default;
                if (fallbackExists)
                {
                    ref var batch = ref activeSet.Batches[FallbackBatchThreshold];
                    FallbackBatch.AllocateResults(this, pool, ref batch, out fallbackResults);
                    for (int j = 0; j < batch.TypeBatches.Count; ++j)
                    {
                        ref var typeBatch = ref batch.TypeBatches[j];
                        TypeProcessors[typeBatch.TypeId].JacobiWarmStart(ref typeBatch, ref bodies.ActiveSet.Velocities, ref fallbackResults[j], 0, typeBatch.BundleCount);
                    }
                    activeSet.Fallback.ScatterVelocities(bodies, this, ref fallbackResults, 0, activeSet.Fallback.BodyCount);
                }
                for (int iterationIndex = 0; iterationIndex < iterationCount; ++iterationIndex)
                {
                    for (int i = 0; i < synchronizedBatchCount; ++i)
                    {
                        ref var batch = ref activeSet.Batches[i];
                        for (int j = 0; j < batch.TypeBatches.Count; ++j)
                        {
                            ref var typeBatch = ref batch.TypeBatches[j];
                            TypeProcessors[typeBatch.TypeId].SolveIteration(ref typeBatch, ref bodies.ActiveSet.Velocities, 0, typeBatch.BundleCount);
                        }
                    }
                    if (fallbackExists)
                    {
                        ref var batch = ref activeSet.Batches[FallbackBatchThreshold];
                        for (int j = 0; j < batch.TypeBatches.Count; ++j)
                        {
                            ref var typeBatch = ref batch.TypeBatches[j];
                            TypeProcessors[typeBatch.TypeId].JacobiSolveIteration(ref typeBatch, ref bodies.ActiveSet.Velocities, ref fallbackResults[j], 0, typeBatch.BundleCount);
                        }
                        activeSet.Fallback.ScatterVelocities(bodies, this, ref fallbackResults, 0, activeSet.Fallback.BodyCount);
                    }
                }
                if (fallbackExists)
                {
                    FallbackBatch.DisposeResults(this, pool, ref activeSet.Batches[FallbackBatchThreshold], ref fallbackResults);
                }
            }
            else
            {
                ExecuteMultithreaded<MainSolveFilter>(dt, threadDispatcher, solveWorker);
            }
        }

    }
}
