using BepuUtilities;
using BepuUtilities.Collections;
using BepuUtilities.Memory;
using System;
using System.Diagnostics;
using System.Runtime.CompilerServices;
using System.Threading;

namespace BepuPhysics.CollisionDetection
{
    struct TypeBatchIndex
    {
        public short TypeBatch;
        public short Batch;
    }

    /// <summary>
    /// 累积要从多个线程中移除的约束,并将它们作为一个批有效地移除。
    /// </summary>
    public class ConstraintRemover
    {
        internal Solver solver;
        internal Bodies bodies;
        BufferPool pool;

        internal struct PerBodyRemovalTarget
        {
            public int BodyIndex;
            public ConstraintHandle ConstraintHandle;

            public int BatchIndex;
            public BodyHandle BodyHandle;
        }


        struct RemovalsForTypeBatch
        {
            public QuickList<ConstraintHandle> ConstraintHandlesToRemove;
            public QuickList<PerBodyRemovalTarget> PerBodyRemovalTargets;
        }

        struct RemovalCache
        {
            public int BatchCount;
            public Buffer<TypeBatchIndex> TypeBatches;
            public Buffer<RemovalsForTypeBatch> RemovalsForTypeBatches;

            // 按约束批存储这些内容是一种选择,但它需要更多的预过滤,而这不是免费的。
            int minimumCapacityPerBatch;

            public RemovalCache(BufferPool pool, int batchCapacity, int minimumCapacityPerBatch)
            {
                this.minimumCapacityPerBatch = minimumCapacityPerBatch;

                pool.TakeAtLeast(batchCapacity, out TypeBatches);
                pool.TakeAtLeast(batchCapacity, out RemovalsForTypeBatches);
                BatchCount = 0;
            }

            [MethodImpl(MethodImplOptions.AggressiveInlining)]
            public int IndexOf(TypeBatchIndex typeBatchIndex)
            {
                // 假设批次/类型批次的数量非常少,因此使用线性枚举应该比使用字典更好。
                ref var indexAsInt = ref Unsafe.As<TypeBatchIndex, int>(ref typeBatchIndex);
                for (int i = 0; i < BatchCount; ++i)
                {
                    if (indexAsInt == Unsafe.As<TypeBatchIndex, int>(ref TypeBatches[i]))
                        return i;
                }
                return -1;
            }
            public int AllocateSpaceForTargets(TypeBatchIndex typeBatchIndex, int constraintHandleCount, int perBodyRemovalCount, BufferPool pool)
            {
                var index = IndexOf(typeBatchIndex);
                if (index >= 0)
                {
                    ref var slot = ref RemovalsForTypeBatches[index];
                    Debug.Assert(slot.ConstraintHandlesToRemove.Span.Allocated && slot.PerBodyRemovalTargets.Span.Allocated);
                    slot.PerBodyRemovalTargets.EnsureCapacity(slot.PerBodyRemovalTargets.Count + perBodyRemovalCount, pool);
                    slot.ConstraintHandlesToRemove.EnsureCapacity(slot.ConstraintHandlesToRemove.Count + constraintHandleCount, pool);
                    return index;
                }
                index = BatchCount;
                BatchCount = BatchCount + 1;
                if (TypeBatches.Length == index)
                    pool.ResizeToAtLeast(ref TypeBatches, BatchCount, index);
                if (RemovalsForTypeBatches.Length == index)
                    pool.ResizeToAtLeast(ref RemovalsForTypeBatches, BatchCount, index);
                TypeBatches[index] = typeBatchIndex;
                ref var newSlot = ref RemovalsForTypeBatches[index];
                newSlot.ConstraintHandlesToRemove = new QuickList<ConstraintHandle>(Math.Max(constraintHandleCount, minimumCapacityPerBatch), pool);
                newSlot.PerBodyRemovalTargets = new QuickList<PerBodyRemovalTarget>(Math.Max(perBodyRemovalCount, minimumCapacityPerBatch), pool);
                return index;
            }

            public void Dispose(BufferPool pool)
            {
                pool.Return(ref TypeBatches);
                for (int i = 0; i < BatchCount; ++i)
                {
                    ref var removal = ref RemovalsForTypeBatches[i];
                    removal.PerBodyRemovalTargets.Dispose(pool);
                    removal.ConstraintHandlesToRemove.Dispose(pool);
                }
                pool.Return(ref RemovalsForTypeBatches);
                this = default;
            }
        }


        struct WorkerCache
        {

            internal BufferPool pool;
            public RemovalCache Removals;

            public WorkerCache(BufferPool pool, int batchCapacity, int minimumCapacityPerBatch)
            {
                this.pool = pool;
                Debug.Assert(minimumCapacityPerBatch > 0);
                Removals = new RemovalCache(pool, batchCapacity, minimumCapacityPerBatch);
            }

            [MethodImpl(MethodImplOptions.AggressiveInlining)]
            public unsafe void EnqueueForRemoval(ConstraintHandle constraintHandle, Solver solver, Bodies bodies)
            {
                ref var constraint = ref solver.HandleToConstraint[constraintHandle.Value];
                Debug.Assert(constraint.SetIndex == 0, "The constraint remover requires that the target constraint is active.");
                TypeBatchIndex typeBatchIndex;
                // 并行删除保证在所有删除完成之前不会更改约束索引,因此我们可以在这里预先缓存类型批处理索引。
                // 这允许我们按Batch类型收集要删除的约束。不同类型的批处理中的移除可以并行进行。
                typeBatchIndex.Batch = (short)constraint.BatchIndex;
                ref var constraintBatch = ref solver.ActiveSet.Batches[constraint.BatchIndex];
                typeBatchIndex.TypeBatch = (short)constraintBatch.TypeIndexToTypeBatchIndex[constraint.TypeId];

                var typeProcessor = solver.TypeProcessors[constraint.TypeId];
                var bodiesPerConstraint = typeProcessor.BodiesPerConstraint;
                var linearTypeBatchIndex = Removals.AllocateSpaceForTargets(typeBatchIndex, 1, bodiesPerConstraint, pool);

                ref var typeBatchRemovals = ref Removals.RemovalsForTypeBatches[linearTypeBatchIndex];
                typeBatchRemovals.ConstraintHandlesToRemove.AllocateUnsafely() = constraintHandle;

                // 现在提取正文列表约束删除目标和约束批次正文句柄删除目标并将其排队。
                // 我们必须在这里执行枚举,而不是在稍后的刷新中执行。从类型批中移除使得枚举连接的正文索引成为那里的争用条件。
                ref var typeBatch = ref constraintBatch.TypeBatches[typeBatchIndex.TypeBatch];
                var bodyIndices = stackalloc int[bodiesPerConstraint];
                var enumerator = new ReferenceCollector(bodyIndices);
                typeProcessor.EnumerateConnectedBodyIndices(ref typeBatch, constraint.IndexInTypeBatch, ref enumerator);

                for (int i = 0; i < bodiesPerConstraint; ++i)
                {
                    ref var target = ref typeBatchRemovals.PerBodyRemovalTargets.AllocateUnsafely();
                    target.BodyIndex = bodyIndices[i];
                    target.ConstraintHandle = constraintHandle;

                    target.BatchIndex = typeBatchIndex.Batch;
                    target.BodyHandle = bodies.ActiveSet.IndexToHandle[target.BodyIndex];
                }
            }

            public void Dispose()
            {
                Removals.Dispose(pool);
            }
        }

        int previousCapacityPerBatch;
        int previousBatchCapacity;
        float previousCapacityMultiplier;
        int minimumConstraintCapacity;
        int minimumTypeCapacity;
        WorkerCache[] workerCaches; // 池的工作缓存中有一个引用,因此这不能是缓冲区。
        int threadCount;

        public ConstraintRemover(BufferPool pool, Bodies bodies, Solver solver, int minimumTypeCapacity = 4, int minimumRemovalCapacity = 128, float previousCapacityMultiplier = 1.25f)
        {
            this.pool = pool;
            this.bodies = bodies;
            this.solver = solver;
            this.minimumConstraintCapacity = minimumRemovalCapacity;
            this.minimumTypeCapacity = minimumTypeCapacity;
            this.previousCapacityMultiplier = previousCapacityMultiplier;
        }


        public void Prepare(IThreadDispatcher dispatcher)
        {
            threadCount = dispatcher == null ? 1 : dispatcher.ThreadCount;
            // 这个数组不会有太多的工作器或大小调整,所以托管引用就可以了。使缓冲池的存储更容易。
            if (workerCaches == null || workerCaches.Length < threadCount)
            {
                workerCaches = new WorkerCache[threadCount];
            }
            var batchCapacity = (int)Math.Max(minimumTypeCapacity, previousBatchCapacity * previousCapacityMultiplier);
            var capacityPerBatch = (int)Math.Max(minimumConstraintCapacity, previousCapacityPerBatch * previousCapacityMultiplier);
            if (dispatcher != null)
            {
                for (int i = 0; i < threadCount; ++i)
                {
                    // 请注意每线程池的使用。工作人员可以调整集合的大小。
                    workerCaches[i] = new WorkerCache(dispatcher.GetThreadMemoryPool(i), batchCapacity, capacityPerBatch);
                }
            }
            else
            {
                workerCaches[0] = new WorkerCache(pool, batchCapacity, capacityPerBatch);
            }
            // 岛上的卧铺作业订单要求在Prepare而不是CreateFlushJobs中完成此分配。
            if (solver.ActiveSet.Batches.Count > solver.FallbackBatchThreshold)
            {
                // 确保备用释放列表也足够大。回退批可能会导致主字典返回3个缓冲区,每个可能再返回两个缓冲区
                // 已删除正文约束引用子集。
                allocationIdsToFree = new QuickList<int>(3 + solver.ActiveSet.Fallback.BodyCount * 2, pool);
            }
        }

        struct PerBodyRemovalTargetComparer : IComparerRef<PerBodyRemovalTarget>
        {
            [MethodImpl(MethodImplOptions.AggressiveInlining)]
            public int Compare(ref PerBodyRemovalTarget a, ref PerBodyRemovalTarget b)
            {
                // 约束句柄优先,但我们必须区分正文句柄以实现全局唯一排序。
                // (每个约束可能有多个PerBodyRemovalTargets,因为它会为与约束关联的每个实体创建一个目标。)
                var aLong = ((long)a.ConstraintHandle.Value << 32) | (long)a.BodyHandle.Value;
                var bLong = ((long)b.ConstraintHandle.Value << 32) | (long)b.BodyHandle.Value;
                return aLong.CompareTo(bLong);
            }
        }
        struct TypeBatchIndexComparer : IComparerRef<TypeBatchIndex>
        {
            [MethodImpl(MethodImplOptions.AggressiveInlining)]
            public int Compare(ref TypeBatchIndex a, ref TypeBatchIndex b)
            {
                return Unsafe.As<TypeBatchIndex, int>(ref a).CompareTo(Unsafe.As<TypeBatchIndex, int>(ref b));
            }
        }

        // 多线程约束删除的一般思想是在整个进程中有不同级别的并行性。
        // 如果没有锁,您不能从多个线程调用solver.RemoveConstraint,我们不想付出持续同步的代价。
        // 所以取而代之的是,我们识别相当连续的工作片段,并将它们放入局部连续的作业中。
        // 然后,我们将它们与其他更并行的工作一起运行。我们希望并行工作能够填补空白,平衡所有线程间的工作
        // 并限制最坏的情况。

        // 重要的是要注意,当模拟只处理<20个删除时,这是非常过分的。它可能比仅仅执行常规删除要慢
        // 在这种情况下,按顺序删除将花费大约5us,因此任何类型多线程开销都可能超过正在完成的工作。
        // 将最好的情况下的成本翻倍,导致几个微秒的浪费,这并不令人担忧(如果我们真的想要的话,我们可以特例)。
        // 在最坏的情况下,当数以千计的约束被删除了~threadcount的倍数时,降低成本是值得的,这种复杂性是值得的。框架钉是邪恶的！

        RemovalCache batches;
        /// <summary>
        /// 处理排队的约束删除并准备删除作业。
        /// </summary>
        /// <param name="defiristic">如果约束删除程序应以增加的成本维护确定性,则为True,否则为False。</param>
        /// <return>创建的删除作业数。若要完成作业,请对从0到返回的作业计数的每个索引执行RemoveConstraintsFromTypeBatch。</return>
        public int CreateFlushJobs(bool deterministic)
        {
            // 将这组唯一类型的批处理累积到一个连续的列表中,这样我们就可以轻松地在它们上面执行多线程作业。
            // 请注意,我们实际上在这里复制,而不仅仅是创建引用。这以相当小的代价简化了确定性/非确定性的划分。
            batches = new RemovalCache(pool, 32, 8);
            var removedConstraintCount = 0;
            for (int i = 0; i < threadCount; ++i)
            {
                ref var cache = ref workerCaches[i];
                for (int j = 0; j < cache.Removals.BatchCount; ++j)
                {
                    ref var typeBatchIndex = ref cache.Removals.TypeBatches[j];
                    ref var workerRemovals = ref cache.Removals.RemovalsForTypeBatches[j];
                    removedConstraintCount += workerRemovals.ConstraintHandlesToRemove.Count;
                    var batchIndex = batches.AllocateSpaceForTargets(typeBatchIndex, workerRemovals.ConstraintHandlesToRemove.Count, workerRemovals.PerBodyRemovalTargets.Count, pool);

                    ref var combinedRemovalsForBatch = ref batches.RemovalsForTypeBatches[batchIndex];
                    combinedRemovalsForBatch.ConstraintHandlesToRemove.AddRangeUnsafely(workerRemovals.ConstraintHandlesToRemove.Span, 0, workerRemovals.ConstraintHandlesToRemove.Count);
                    combinedRemovalsForBatch.PerBodyRemovalTargets.AddRangeUnsafely(workerRemovals.PerBodyRemovalTargets.Span, 0, workerRemovals.PerBodyRemovalTargets.Count);

                }
            }
            if (deterministic)
            {
                // 为确保确定性,请在每个类型内按约束句柄对批处理进行排序(对于删除目标,还应按主体句柄排序)。
                // 句柄是唯一的和确定性的,因此按顺序处理排序的列表将产生确定性的结果。
                // 这(也许还有连续的列表收集阶段)可以转到多线程分派,但只有在它看起来确实有益的情况下才能这样做。
                var constraintHandleComparer = new PrimitiveComparer<int>();
                var perBodyRemovalTargetComparer = new PerBodyRemovalTargetComparer();
                for (int i = 0; i < batches.BatchCount; ++i)
                {
                    ref var batchRemovals = ref batches.RemovalsForTypeBatches[i];
                    QuickSort.Sort(ref batchRemovals.ConstraintHandlesToRemove.Span[0].Value, 0, batchRemovals.ConstraintHandlesToRemove.Count - 1, ref constraintHandleComparer);
                    QuickSort.Sort(ref batchRemovals.PerBodyRemovalTargets[0], 0, batchRemovals.PerBodyRemovalTargets.Count - 1, ref perBodyRemovalTargetComparer);
                }
                // 还要根据类型批次索引对批次进行排序。注意,如果模拟是确定性的,则批次索引/类型批次索引是确定性的;
                // 如果不是,求解器将产生不确定的结果。
                var typeBatchIndexComparer = new TypeBatchIndexComparer();
                QuickSort.Sort(ref batches.TypeBatches[0], ref batches.RemovalsForTypeBatches[0], 0, batches.BatchCount - 1, ref typeBatchIndexComparer);
            }

            // 确保求解器的id池足够大,可以容纳要删除的所有约束句柄。
            // (请注意,即使我们最终将其用于睡眠,我们也会这样做,因为我们实际上不会返回句柄。
            // 这没有功能上的原因-只是没有条件API会更简单,而且对性能没有重大影响。稍后可能会发生变化。)
            solver.HandlePool.EnsureCapacity(solver.HandlePool.AvailableIdCount + removedConstraintCount, pool);

            // 确保删除列表足够大,以便在最坏的情况下容纳每一种类型的批次。这就避免了在执行过程中需要调整大小。
            // 这很有价值,因为每次访问主线程的缓冲池都是潜在的争用条件,而其他任务也在使用它。
            // 我们不想仅仅因为我们在这里没有预先分配微不足道的数量,就必须在其他任务中锁定缓冲池的每一个使用。
            int typeBatchCount = 0;
            ref var activeSet = ref solver.ActiveSet;
            for (int i = 0; i < activeSet.Batches.Count; ++i)
            {
                typeBatchCount += activeSet.Batches[i].TypeBatches.Count;
            }
            removedTypeBatches = new QuickList<TypeBatchIndex>(typeBatchCount, pool);
            return batches.BatchCount;
        }

        /// <summary>
        /// 将与所有移除的约束相关联的句柄返回到解算器的句柄池。
        /// </summary>
        public void ReturnConstraintHandles()
        {
            // 请注意,这不会清零与句柄关联的插槽。假设类型批次移除是并行进行的。
            // 它将尝试查找句柄->索引映射,因此我们不能破坏它们。
            for (int i = 0; i < batches.BatchCount; ++i)
            {
                ref var batchHandles = ref batches.RemovalsForTypeBatches[i].ConstraintHandlesToRemove;
                for (int j = 0; j < batchHandles.Count; ++j)
                {
                    solver.HandlePool.ReturnUnsafely(batchHandles[j].Value);
                }
            }
        }

        public void RemoveConstraintsFromBodyLists()
        {
            // 虽然从技术上讲,正文列表删除可以在内部多线程进行,但它会相当复杂-您必须为每个求解执行一次调度。批处理批处理
            // 以确保没有两个线程同时命中同一正文约束列表。
            // 这更复杂,几乎肯定会比这个本地顺序版本慢。
            for (int i = 0; i < batches.BatchCount; ++i)
            {
                ref var removals = ref batches.RemovalsForTypeBatches[i].PerBodyRemovalTargets;
                for (int j = 0; j < removals.Count; ++j)
                {
                    ref var target = ref removals[j];
                    bodies.RemoveConstraintReference(target.BodyIndex, target.ConstraintHandle);
                }
            }
        }

        public void RemoveConstraintsFromBatchReferencedHandles()
        {
            for (int i = 0; i < batches.BatchCount; ++i)
            {
                if (batches.TypeBatches[i].Batch == solver.FallbackBatchThreshold)
                {
                    // 后备批不存在引用批处理的句柄。
                    continue;
                }
                ref var removals = ref batches.RemovalsForTypeBatches[i].PerBodyRemovalTargets;
                for (int j = 0; j < removals.Count; ++j)
                {
                    ref var target = ref removals[j];
                    solver.batchReferencedHandles[target.BatchIndex].Remove(target.BodyHandle.Value);
                }
            }
        }

        QuickList<int> allocationIdsToFree;
        public void RemoveConstraintsFromFallbackBatch()
        {
            Debug.Assert(solver.ActiveSet.Batches.Count > solver.FallbackBatchThreshold);
            for (int i = 0; i < batches.BatchCount; ++i)
            {
                ref var removals = ref batches.RemovalsForTypeBatches[i].PerBodyRemovalTargets;
                if (batches.TypeBatches[i].Batch == solver.FallbackBatchThreshold)
                {
                    for (int j = 0; j < removals.Count; ++j)
                    {
                        ref var target = ref removals[j];
                        solver.ActiveSet.Fallback.Remove(target.BodyIndex, target.ConstraintHandle, ref allocationIdsToFree);
                    }
                }
            }
        }
        public void TryRemoveAllConstraintsForBodyFromFallbackBatch(int bodyIndex)
        {
            solver.ActiveSet.Fallback.TryRemove(bodyIndex, ref allocationIdsToFree);
        }

        QuickList<TypeBatchIndex> removedTypeBatches;
        SpinLock removedTypeBatchLocker = new SpinLock();
        public void RemoveConstraintsFromTypeBatch(int index)
        {
            var batch = batches.TypeBatches[index];
            ref var constraintBatch = ref solver.ActiveSet.Batches[batch.Batch];
            ref var typeBatch = ref constraintBatch.TypeBatches[batch.TypeBatch];
            var typeProcessor = solver.TypeProcessors[typeBatch.TypeId];
            ref var removals = ref batches.RemovalsForTypeBatches[index];
            bool lockTaken = false;
            for (int i = 0; i < removals.ConstraintHandlesToRemove.Count; ++i)
            {
                var handle = removals.ConstraintHandlesToRemove[i];
                // 请注意,我们在类型Batch中动态查找索引,即使我们可以将其与Batch和TypeBatch索引一起缓存。
                // 这是因为删除操作可能会更改索引,因此缓存索引需要在删除之前对每个类型批处理的索引进行排序。
                // 这是非常可行的,但是不这样做会更简单,并且性能差异可能微不足道。
                // TODO：可能值得测试。
                typeProcessor.Remove(ref typeBatch, solver.HandleToConstraint[handle.Value].IndexInTypeBatch, ref solver.HandleToConstraint);
                if (typeBatch.ConstraintCount == 0)
                {
                    // 此批次类型的批次需要删除。
                    // 请注意,我们在这里只使用了一个自旋锁,没有什么棘手之处-批量类型/批量移除的数量应该非常低(平均为0),
                    // 所以不值得做一堆每个工人的累加器之类的事情。
                    removedTypeBatchLocker.Enter(ref lockTaken);
                    removedTypeBatches.AddUnsafely(batch);
                    removedTypeBatchLocker.Exit();
                }
            }
        }

        struct TypeBatchComparer : IComparerRef<TypeBatchIndex>
        {
            [MethodImpl(MethodImplOptions.AggressiveInlining)]
            public int Compare(ref TypeBatchIndex a, ref TypeBatchIndex b)
            {
                return Unsafe.As<TypeBatchIndex, int>(ref b).CompareTo(Unsafe.As<TypeBatchIndex, int>(ref a));
            }
        }

        /// <summary>
        /// 要使用ConstraintRemover从模拟中完全移除约束(而不是简单地将其移动到其他位置),请参见
        /// 句柄->约束映射必须更新。这必须等到多线程操作实际完成之后才能避免损坏并行操作。
        /// </summary>
        public void MarkAffectedConstraintsAsRemovedFromSolver()
        {
            for (int i = 0; i < batches.BatchCount; ++i)
            {
                ref var batchHandles = ref batches.RemovalsForTypeBatches[i].ConstraintHandlesToRemove;
                for (int j = 0; j < batchHandles.Count; ++j)
                {
                    // 负集指数是我们不存在的标志。
                    solver.HandleToConstraint[batchHandles[j].Value].SetIndex = -1;
                }
            }
        }

        public void Postflush()
        {
            if (removedTypeBatches.Count > 0)
            {
                // 清除由于删除而变为空的所有类型批(和约束批)。
                // 从最高到最低对删除的批次进行排序,以便首先删除索引类型较高的批次和约束批次。
                // 这允许通过拉取最后一个索引来删除,而不会损坏其他索引。
                var comparer = new TypeBatchComparer();
                QuickSort.Sort(ref removedTypeBatches[0], 0, removedTypeBatches.Count - 1, ref comparer);
                ref var activeSet = ref solver.ActiveSet;
                for (int i = 0; i < removedTypeBatches.Count; ++i)
                {
                    var batchIndices = removedTypeBatches[i];
                    ref var batch = ref activeSet.Batches[batchIndices.Batch];
                    ref var typeBatch = ref batch.TypeBatches[batchIndices.TypeBatch];
                    batch.RemoveTypeBatchIfEmpty(ref typeBatch, batchIndices.TypeBatch, solver.pool);
                    solver.RemoveBatchIfEmpty(ref batch, batchIndices.Batch);
                }
            }
            removedTypeBatches.Dispose(pool);

            if (allocationIdsToFree.Span.Allocated)
            {
                for (int i = 0; i < allocationIdsToFree.Count; ++i)
                {
                    pool.ReturnUnsafely(allocationIdsToFree[i]);
                }
                allocationIdsToFree.Dispose(pool);
            }
            batches.Dispose(pool);

            // 取消工作缓存分配,并存储用于下一帧初始化的容量。
            previousCapacityPerBatch = 0;
            for (int i = 0; i < threadCount; ++i)
            {
                ref var workerCache = ref workerCaches[i];
                for (int j = 0; j < workerCache.Removals.BatchCount; ++j)
                {
                    if (previousCapacityPerBatch < workerCache.Removals.RemovalsForTypeBatches[j].ConstraintHandlesToRemove.Count)
                        previousCapacityPerBatch = workerCache.Removals.RemovalsForTypeBatches[j].ConstraintHandlesToRemove.Count;
                }
                if (previousBatchCapacity < workerCache.Removals.BatchCount)
                    previousBatchCapacity = workerCache.Removals.BatchCount;
                workerCache.Dispose();
            }
        }

        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        public void EnqueueRemoval(int workerIndex, ConstraintHandle constraintHandle)
        {
            workerCaches[workerIndex].EnqueueForRemoval(constraintHandle, solver, bodies);
        }
    }
}
