using BepuPhysics.Collidables;
using BepuUtilities;
using BepuUtilities.Collections;
using BepuUtilities.Memory;
using System;
using System.Diagnostics;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using static BepuPhysics.CollisionDetection.WorkerPairCache;

namespace BepuPhysics.CollisionDetection
{
    using OverlapMapping = QuickDictionary<CollidablePair, CollidablePairPointers, CollidablePairComparer>;

    [StructLayout(LayoutKind.Explicit, Size = 8)]
    public struct CollidablePair
    {
        [FieldOffset(0)]
        public CollidableReference A;
        [FieldOffset(4)]
        public CollidableReference B;

        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        public CollidablePair(CollidableReference a, CollidableReference b)
        {
            A = a;
            B = b;
        }

        public override string ToString()
        {
            return $"<{A}, {B}>";
        }
    }

    public struct CollidablePairComparer : IEqualityComparerRef<CollidablePair>
    {
        // 请注意,对是按句柄排序的,因此我们可以假定顺序很重要。
        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        public bool Equals(ref CollidablePair a, ref CollidablePair b)
        {
            return Unsafe.As<CollidablePair, ulong>(ref a) == Unsafe.As<CollidablePair, ulong>(ref b);
        }

        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        public int Hash(ref CollidablePair item)
        {
            const ulong p1 = 961748927UL;
            const ulong p2 = 899809343UL;
            var hash64 = (ulong)item.A.Packed * (p1 * p2) + (ulong)item.B.Packed * (p2);
            return (int)(hash64 ^ (hash64 >> 32));
        }
    }

    public struct CollidablePairPointers
    {
        /// <summary>
        /// 进入配对缓存的约束数据集的窄阶段特定类型和索引。没有关联约束的碰撞对
        /// 由于未生成联系人或因为约束已过滤,将具有不存在的ConstraintCache。
        /// </summary>
        public PairCacheIndex ConstraintCache;
        /// <summary>
        /// 窄阶段特定类型,并索引到该对的一批自定义数据中。许多类型不使用任何补充数据,但有些类型使用时间一致性
        /// 以加速联系人生成。
        /// </summary>
        public PairCacheIndex CollisionDetectionCache;
    }

    internal struct ArrayList<T>
    {
        public T[] Values;
        public int Count;
        public ref T this[int index] { get { return ref Values[index]; } }
        public bool Allocated { get { return Values != null; } }
        public ArrayList(int initialCapacity)
        {
            Values = new T[initialCapacity];
            Count = 0;
        }

        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        internal ref T AllocateUnsafely()
        {
            Debug.Assert(Count < Values.Length);
            return ref Values[Count++];
        }
    }

    public partial class PairCache
    {
        public OverlapMapping Mapping;

        /// <summary>
        /// 通过窄阶段执行添加或更新对时设置的每对"新鲜度"标志。仅在窄阶段执行期间初始化。
        /// </summary>
        /// <remarks>
        /// 每对存储一个字节。虽然它可以压缩到1位,但这需要手动确保线程安全。通过使用字节,我们依赖于
        /// 不大于本机指针大小的数据类型的原子设置行为。此外,较小的尺寸实际上在增加虚假分享方面付出了更高的代价。
        /// 数据类型的选择是在后期分析的内存带宽和错误共享频率之间的平衡行为。
        /// </remarks>
        internal RawBuffer PairFreshness;
        BufferPool pool;
        int minimumPendingSize;
        int minimumPerTypeCapacity;
        int previousPendingSize;


        // 在读取当前工作缓存的同时,写入下一个缓存。
        // 工作者对缓存包含对缓冲池的引用,该缓冲池是一种引用类型。这使得WorkerPairCache不可闪存,所以为了避免
        // 超高的复制总量,我们不使用非类型化的缓冲池来存储它。
        // 由于这里的数组大小很小,并且几乎永远不会更改,所以这不是一个大问题。
        ArrayList<WorkerPairCache> workerCaches;
        internal ArrayList<WorkerPairCache> NextWorkerCaches;


        public PairCache(BufferPool pool, int initialSetCapacity, int minimumMappingSize, int minimumPendingSize, int minimumPerTypeCapacity)
        {
            this.minimumPendingSize = minimumPendingSize;
            this.minimumPerTypeCapacity = minimumPerTypeCapacity;
            this.pool = pool;
            Mapping = new OverlapMapping(minimumMappingSize, pool);
            ResizeSetsCapacity(initialSetCapacity, 0);
        }

        public void Prepare(IThreadDispatcher threadDispatcher = null)
        {
            int maximumConstraintTypeCount = 0, maximumCollisionTypeCount = 0;
            for (int i = 0; i < workerCaches.Count; ++i)
            {
                workerCaches[i].GetMaximumCacheTypeCounts(out var collision, out var constraint);
                if (collision > maximumCollisionTypeCount)
                    maximumCollisionTypeCount = collision;
                if (constraint > maximumConstraintTypeCount)
                    maximumConstraintTypeCount = constraint;
            }
            var minimumSizesPerConstraintType = new QuickList<PreallocationSizes>(maximumConstraintTypeCount, pool);
            var minimumSizesPerCollisionType = new QuickList<PreallocationSizes>(maximumCollisionTypeCount, pool);
            // 由于最小大小累加会以递增方式构建最小大小,因此数组中的错误数据可能会破坏结果-我们必须清除它。
            minimumSizesPerConstraintType.Span.Clear(0, minimumSizesPerConstraintType.Span.Length);
            minimumSizesPerCollisionType.Span.Clear(0, minimumSizesPerCollisionType.Span.Length);
            for (int i = 0; i < workerCaches.Count; ++i)
            {
                workerCaches[i].AccumulateMinimumSizes(ref minimumSizesPerConstraintType, ref minimumSizesPerCollisionType);
            }

            var threadCount = threadDispatcher != null ? threadDispatcher.ThreadCount : 1;
            // 确保新的工作者对缓存可以容纳所有工作者。
            if (!NextWorkerCaches.Allocated || NextWorkerCaches.Values.Length < threadCount)
            {
                // 下一个Worker缓存应该永远不需要放在这里。同花顺应该已经处理好了。
#if DEBUG
                for (int i = 0; i < NextWorkerCaches.Count; ++i)
                    Debug.Assert(NextWorkerCaches[i].Equals(default(WorkerPairCache)));
#endif
                Array.Resize(ref NextWorkerCaches.Values, threadCount);
                NextWorkerCaches.Count = threadCount;
            }
            // 请注意,我们尚未初始化前一帧中的workerCache。在这是第一帧并且没有先前的工作者高速缓存的情况下,
            // 不会有指向缓存的指针,删除分析会循环计数,默认计数为零-因此是安全的。
            NextWorkerCaches.Count = threadCount;

            var pendingSize = Math.Max(minimumPendingSize, previousPendingSize);
            if (threadDispatcher != null)
            {
                for (int i = 0; i < threadCount; ++i)
                {
                    NextWorkerCaches[i] = new WorkerPairCache(i, threadDispatcher.GetThreadMemoryPool(i), ref minimumSizesPerConstraintType, ref minimumSizesPerCollisionType,
                        pendingSize, minimumPerTypeCapacity);
                }
            }
            else
            {
                NextWorkerCaches[0] = new WorkerPairCache(0, pool, ref minimumSizesPerConstraintType, ref minimumSizesPerCollisionType, pendingSize, minimumPerTypeCapacity);
            }
            minimumSizesPerConstraintType.Dispose(pool);
            minimumSizesPerCollisionType.Dispose(pool);

            // 为现有重叠创建对新鲜度数组。
            pool.TakeAtLeast(Mapping.Count, out PairFreshness);
            // 这将清除每对1个字节。假设32768对10Gbps的单核带宽意味着大约3微秒。
            // 多线程处理在较大的模拟中有很小的用处,但是非常非常接近。
            PairFreshness.Clear(0, Mapping.Count);

        }


        internal void EnsureConstraintToPairMappingCapacity(Solver solver, int targetCapacity)
        {
            targetCapacity = Math.Max(solver.HandlePool.HighestPossiblyClaimedId + 1, targetCapacity);
            if (ConstraintHandleToPair.Length < targetCapacity)
            {
                pool.ResizeToAtLeast(ref ConstraintHandleToPair, targetCapacity, ConstraintHandleToPair.Length);
            }
        }

        internal void ResizeConstraintToPairMappingCapacity(Solver solver, int targetCapacity)
        {
            targetCapacity = BufferPool.GetCapacityForCount<CollisionPairLocation>(Math.Max(solver.HandlePool.HighestPossiblyClaimedId + 1, targetCapacity));
            if (ConstraintHandleToPair.Length != targetCapacity)
            {
                pool.ResizeToAtLeast(ref ConstraintHandleToPair, targetCapacity, Math.Min(targetCapacity, ConstraintHandleToPair.Length));
            }
        }



        /// <summary>
        /// 刷新上次窄阶段执行后的所有延迟更改。
        /// </summary>
        public void PrepareFlushJobs(ref QuickList<NarrowPhaseFlushJob> jobs)
        {
            // 清除现在未使用的工作缓存。
            for (int i = 0; i < workerCaches.Count; ++i)
            {
                workerCaches[i].Dispose();
            }

            // 为了生成我们稍后处理的约束删除请求和PendingRemoves,应该已经使用了新鲜度缓存;现在就处理它。
            pool.Return(ref PairFreshness);

            // 确保预先有足够的重叠贴图大小。这需要扫描所有挂起的大小。
            int largestIntermediateSize = Mapping.Count;
            var newMappingSize = Mapping.Count;
            for (int i = 0; i < NextWorkerCaches.Count; ++i)
            {
                ref var cache = ref NextWorkerCaches[i];
                // 删除操作首先发生,因此只有当添加的数量多于删除的数量时,此缓存才会产生更大的映射。
                newMappingSize += cache.PendingAdds.Count - cache.PendingRemoves.Count;
                if (newMappingSize > largestIntermediateSize)
                    largestIntermediateSize = newMappingSize;
            }
            Mapping.EnsureCapacity(largestIntermediateSize, pool);

            jobs.Add(new NarrowPhaseFlushJob { Type = NarrowPhaseFlushJobType.FlushPairCacheChanges }, pool);
        }

        public unsafe void FlushMappingChanges()
        {
            // 刷新新集合中所有挂起的添加。
            // 请注意,此阶段不访问共享内存-它都是本地配对缓存,并且不进行池访问。
            // 这意味着我们可以在删除求解器约束的同时将其作为作业运行。这很好,因为向哈希表添加和删除并不是非常快。
            // (每次手术大约10-100纳秒,所以在病理情况下,它可以开始显示在配置文件中。)
            for (int i = 0; i < NextWorkerCaches.Count; ++i)
            {
                ref var cache = ref NextWorkerCaches[i];

                // 倒着走,抱着可以避免掉期的微弱机会。
                for (int j = cache.PendingRemoves.Count - 1; j >= 0; --j)
                {
                    var removed = Mapping.FastRemoveRef(ref cache.PendingRemoves[j]);
                    Debug.Assert(removed);
                }
                for (int j = 0; j < cache.PendingAdds.Count; ++j)
                {
                    ref var pending = ref cache.PendingAdds[j];
                    var added = Mapping.AddUnsafelyRef(ref pending.Pair, pending.Pointers);
                    Debug.Assert(added);
                }
            }
        }
        public void Postflush()
        {
            // 与更新映射表的成本相比,此簿记和处置阶段非常便宜,因此我们按顺序进行。
            // 我们在这里访问每个工作线程池的事实无论如何都会阻止简单的多线程;其他线程可能会使用它们。
            int largestPendingSize = 0;
            for (int i = 0; i < NextWorkerCaches.Count; ++i)
            {
                ref var cache = ref NextWorkerCaches[i];
                if (cache.PendingAdds.Count > largestPendingSize)
                {
                    largestPendingSize = cache.PendingAdds.Count;
                }
                if (cache.PendingRemoves.Count > largestPendingSize)
                {
                    largestPendingSize = cache.PendingRemoves.Count;
                }
                cache.PendingAdds.Dispose(cache.pool);
                cache.PendingRemoves.Dispose(cache.pool);
            }
            previousPendingSize = largestPendingSize;

            // 交换参照。
            var temp = workerCaches;
            workerCaches = NextWorkerCaches;
            NextWorkerCaches = temp;


        }

        internal void Clear()
        {
            for (int i = 0; i < workerCaches.Count; ++i)
            {
                workerCaches[i].Dispose();
            }
            workerCaches.Count = 0;
            for (int i = 1; i < SleepingSets.Length; ++i)
            {
                if (SleepingSets[i].Allocated)
                {
                    SleepingSets[i].Dispose(pool);
                }
            }
#if DEBUG
            if (NextWorkerCaches.Allocated)
            {
                for (int i = 0; i < NextWorkerCaches.Count; ++i)
                {
                    Debug.Assert(NextWorkerCaches[i].Equals(default(WorkerPairCache)), "Outside of the execution of the narrow phase, the 'next' caches should not be allocated.");
                }
            }
#endif
        }

        public void Dispose()
        {
            for (int i = 0; i < workerCaches.Count; ++i)
            {
                workerCaches[i].Dispose();
            }
            // 请注意,我们不需要处置工作缓存阵列本身-它们只是从直通池中取出的阵列。
#if DEBUG
            if (NextWorkerCaches.Allocated)
            {
                for (int i = 0; i < NextWorkerCaches.Count; ++i)
                {
                    Debug.Assert(NextWorkerCaches[i].Equals(default(WorkerPairCache)), "Outside of the execution of the narrow phase, the 'next' caches should not be allocated.");
                }
            }
#endif
            Mapping.Dispose(pool);
            for (int i = 1; i < SleepingSets.Length; ++i)
            {
                ref var set = ref SleepingSets[i];
                if (set.Allocated)
                    set.Dispose(pool);
            }
            pool.Return(ref SleepingSets);
            // 要配对的约束句柄部分从属于约束句柄容量。
            // 它确保了每一帧,但构造和第一帧之间的间隙可能会使其无法初始化。
            if (ConstraintHandleToPair.Allocated)
                pool.Return(ref ConstraintHandleToPair);
        }


        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        public int IndexOf(ref CollidablePair pair)
        {
            return Mapping.IndexOfRef(ref pair);
        }

        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        public ref CollidablePairPointers GetPointers(int index)
        {
            return ref Mapping.Values[index];
        }

        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        internal unsafe PairCacheIndex Add<TConstraintCache, TCollisionCache>(int workerIndex, ref CollidablePair pair,
            ref TCollisionCache collisionCache, ref TConstraintCache constraintCache)
            where TConstraintCache : IPairCacheEntry
            where TCollisionCache : IPairCacheEntry
        {
            // 请注意,我们不需要在这里设置任何刷新字节;使用此路径意味着无论如何都不存在要删除的以前的重叠。
            return NextWorkerCaches[workerIndex].Add(ref pair, ref collisionCache, ref constraintCache);
        }

        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        internal unsafe void Update<TConstraintCache, TCollisionCache>(int workerIndex, int pairIndex, ref CollidablePairPointers pointers,
            ref TCollisionCache collisionCache, ref TConstraintCache constraintCache)
            where TConstraintCache : IPairCacheEntry
            where TCollisionCache : IPairCacheEntry
        {
            // 我们正在更新现有的对,因此应该防止该对被删除。
            PairFreshness[pairIndex] = 0xFF;
            NextWorkerCaches[workerIndex].Update(ref pointers, ref collisionCache, ref constraintCache);
        }

        // 凸体4个,凸体2个,非凸体7个,凸体7个。
        public const int CollisionConstraintTypeCount = 22;
        public const int CollisionTypeCount = 16;


        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        internal unsafe void* GetOldConstraintCachePointer(int pairIndex)
        {
            ref var constraintCacheIndex = ref Mapping.Values[pairIndex].ConstraintCache;
            return workerCaches[constraintCacheIndex.Cache].GetConstraintCachePointer(constraintCacheIndex);
        }

        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        internal unsafe ConstraintHandle GetOldConstraintHandle(int pairIndex)
        {
            ref var constraintCacheIndex = ref Mapping.Values[pairIndex].ConstraintCache;
            return *(ConstraintHandle*)workerCaches[constraintCacheIndex.Cache].GetConstraintCachePointer(constraintCacheIndex);
        }

        /// <summary>
        /// 通过填充指向约束的窄阶段指针并分配累积的脉冲来完成约束的添加。
        /// </summary>
        /// <typeparam name="TContactImPulses">Count-包含缓存的累计脉冲的专用类型。</typeparam>
        /// <param name="窄相">触发约束添加的窄阶段。</param>
        /// <param name="solver">包含要设置脉冲的约束的求解器。</param>
        /// <param name="Pulses">应用于接触约束的热启动脉冲。</param>
        /// <param name="constraintCacheIndex">要更新的约束缓存的索引。</param>
        /// <param name="constraintHandle">与正在更新的约束缓存关联的约束句柄。</param>
        /// <param name="Pair">与新约束关联的可碰撞对。</param>
        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        internal unsafe void CompleteConstraintAdd<TContactImpulses>(NarrowPhase narrowPhase, Solver solver, ref TContactImpulses impulses, PairCacheIndex constraintCacheIndex,
            ConstraintHandle constraintHandle, ref CollidablePair pair)
        {
            // 请注意,更新被定向到*NEXT*工作缓存。我们还没有执行交换引用的刷新。
            // 请注意,这假设约束句柄存储在约束高速缓存的前4个字节中。
            *(ConstraintHandle*)NextWorkerCaches[constraintCacheIndex.Cache].GetConstraintCachePointer(constraintCacheIndex) = constraintHandle;
            solver.GetConstraintReference(constraintHandle, out var reference);
            Debug.Assert(reference.IndexInTypeBatch >= 0 && reference.IndexInTypeBatch < reference.TypeBatch.ConstraintCount);
            narrowPhase.contactConstraintAccessors[reference.TypeBatch.TypeId].ScatterNewImpulses(ref reference, ref impulses);
            // 此映射条目必须推迟到现在,因为到目前为止还不知道约束句柄。现在我们有了它,
            // 我们可以填回指向重叠映射的指针。
            ConstraintHandleToPair[constraintHandle.Value].Pair = pair;
        }

        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        public unsafe ref TConstraintCache GetConstraintCache<TConstraintCache>(PairCacheIndex constraintCacheIndex)
        {
            // 请注意,这些是指以前的workerCache,而不是nextWorkerCache。我们在窄阶段从这些缓存中读取以重新分配脉冲。
            return ref Unsafe.AsRef<TConstraintCache>(workerCaches[constraintCacheIndex.Cache].GetConstraintCachePointer(constraintCacheIndex));
        }

        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        public unsafe ref TCollisionData GetCollisionData<TCollisionData>(PairCacheIndex index) where TCollisionData : struct, IPairCacheEntry
        {
            return ref Unsafe.AsRef<TCollisionData>(workerCaches[index.Cache].GetCollisionCachePointer(index));
        }

    }
}
