using BepuPhysics.Constraints;
using BepuUtilities;
using BepuUtilities.Collections;
using BepuUtilities.Memory;
using System;
using System.Diagnostics;
using System.Runtime.CompilerServices;

namespace BepuPhysics
{
    /// <summary>
    /// 包含一组类型批处理,这些类型批处理的约束不共享正文引用。
    /// </summary>
    public struct ConstraintBatch
    {
        // 请注意,活动和非活动约束批共享相同的数据布局。
        // 这意味着我们在非活动岛中有一个类型id->索引映射。
        // 推理：
        // 类型id->索引映射是必需的,因为求解器的句柄->约束间接存储类型id。如果它存储了批特定类型*索引*,
        // 然后,当类型批处理由于删除而更改槽时,与类型批处理的约束相关联的所有映射都必须更新。
        // 考虑到可能需要数百或数千个这样的更改,我们转而依赖于这种最后一刻的重新映射。

        // 但是,非活动孤岛在正常情况下永远不会被删除,而且只能通过直接用户请求才能执行。
        // 换句话说,在不活跃的岛屿上移动类型批次的风险几乎是无关紧要的。实际上,您可以改为存储直接句柄->type*index*映射
        // 非常安全,即使这意味着在类型批处理变为空时不移动类型批处理,或者只是暴力强制更新与类型批处理中的每个约束相关联的映射。

        // 存储这些额外数据的成本并不是微不足道的。假设每个类型id->索引映射的平均大小为128字节,请考虑会发生什么情况
        // 当您有65536个非活动孤岛时：大约10兆字节浪费的地图数据。

        // 目前,我们没有尝试拆分存储布局并咬紧牙关,因为：
        // 1)虽然它确实需要一些内存,但对于要模拟65536个非活动孤岛的任何平台而言,10兆字节的容量都相当微不足道。
        // 2)从带宽的角度来看,不同孤岛使用的内存并不是真正需要考虑的问题,因为从本质上讲,它并不是每帧都被访问(也不是一次全部访问)。
        // 3)避免这种浪费需要拆分存储表示和/或使句柄->约束映射复杂化。
        // TODO：那么,也许有一天我们会考虑改变这一点,但目前还不值得。

        // 也就是说,我们确实将每个活动约束批次的BatchReferencedHandles与ConstraintBatch类型分开存储。
        // 做出不同选择的两个原因：
        // 1)句柄内存最终通常会大一个数量级。我们在这里谈论的不是65536个不活跃的岛屿的10MB,而是100-400MB或更高。
        // 2)将引用的句柄单独存储在求解器中实际上不会更改任何内容。我们只需将它们作为参数在各处传递即可;没有明显的复杂性。

        public Buffer<int> TypeIndexToTypeBatchIndex;
        public QuickList<TypeBatch> TypeBatches;

        public ConstraintBatch(BufferPool pool, int initialTypeCountEstimate = 32)
            : this()
        {
            ResizeTypeMap(pool, initialTypeCountEstimate);
            TypeBatches = new QuickList<TypeBatch>(initialTypeCountEstimate, pool);
        }

        void ResizeTypeMap(BufferPool pool, int newSize)
        {
            var oldLength = TypeIndexToTypeBatchIndex.Length;
            Debug.Assert(oldLength != BufferPool.GetCapacityForCount<int>(newSize), "Shouldn't resize if nothing changes.");
            pool.ResizeToAtLeast(ref TypeIndexToTypeBatchIndex, newSize, oldLength);
            for (int i = oldLength; i < TypeIndexToTypeBatchIndex.Length; ++i)
            {
                TypeIndexToTypeBatchIndex[i] = -1;
            }
        }
        internal void EnsureTypeMapSize(BufferPool pool, int targetSize)
        {
            if (targetSize > TypeIndexToTypeBatchIndex.Length)
                ResizeTypeMap(pool, targetSize);
        }

        [Conditional("DEBUG")]
        void ValidateTypeBatchMappings()
        {
            for (int i = 0; i < TypeIndexToTypeBatchIndex.Length; ++i)
            {
                var index = TypeIndexToTypeBatchIndex[i];
                if (index >= 0)
                {
                    Debug.Assert(index < TypeBatches.Count);
                    Debug.Assert(TypeBatches[index].TypeId == i);
                }
            }
            for (int i = 0; i < TypeBatches.Count; ++i)
            {
                Debug.Assert(TypeIndexToTypeBatchIndex[TypeBatches[i].TypeId] == i);
            }
        }

        /// <summary>
        /// 获取批中与给定类型ID匹配的类型Batch。
        /// 要求类型Batch中至少存在一个约束。
        /// </summary>
        /// <param name="typeId">要检索的TypeBatch类型的ID。</param>
        /// <return>与给定类型关联的TypeBatch实例。</return>
        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        public unsafe ref TypeBatch GetTypeBatch(int typeId)
        {
            return ref *GetTypeBatchPointer(typeId);
        }


        /// <summary>
        /// 获取指向批中与给定类型ID匹配的类型Batch的指针。
        /// 要求类型Batch中至少存在一个约束。
        /// </summary>
        /// <param name="typeId">要检索的TypeBatch类型的ID。</param>
        /// <return>与给定类型关联的TypeBatch实例。</return>
        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        public unsafe TypeBatch* GetTypeBatchPointer(int typeId)
        {
            ValidateTypeBatchMappings();
            var typeBatchIndex = TypeIndexToTypeBatchIndex[typeId];
            return TypeBatches.GetPointer(typeBatchIndex);
        }

        internal unsafe TypeBatch* CreateNewTypeBatch(int typeId, TypeProcessor typeProcessor, int initialCapacity, BufferPool pool)
        {
            Debug.Assert(typeProcessor != null, "Can't create a type batch for a nonexistent type processor. Did you forget to call Solver.Register<T> for the constraint type?");
            var newIndex = TypeBatches.Count;
            TypeBatches.EnsureCapacity(TypeBatches.Count + 1, pool);
            TypeIndexToTypeBatchIndex[typeId] = newIndex;
            ref var typeBatch = ref TypeBatches.AllocateUnsafely();
            typeProcessor.Initialize(ref typeBatch, initialCapacity, pool);
            return (TypeBatch*)Unsafe.AsPointer(ref typeBatch);
        }


        internal unsafe TypeBatch* GetOrCreateTypeBatch(int typeId, TypeProcessor typeProcessor, int initialCapacity, BufferPool pool)
        {
            if (typeId >= TypeIndexToTypeBatchIndex.Length)
            {
                // 虽然我们只请求大一个插槽的容量,但是缓冲池总是返回2的幂,所以这不会导致大量不必要的大小调整。
                ResizeTypeMap(pool, typeId + 1);
                return CreateNewTypeBatch(typeId, typeProcessor, initialCapacity, pool);
            }
            else
            {
                var typeBatchIndex = TypeIndexToTypeBatchIndex[typeId];
                if (typeBatchIndex == -1)
                {
                    return CreateNewTypeBatch(typeId, typeProcessor, initialCapacity, pool);
                }
                else
                {
                    return TypeBatches.GetPointer(typeBatchIndex);
                }
            }
        }

        public unsafe void Allocate(ConstraintHandle handle, Span<BodyHandle> constraintBodyHandles, Bodies bodies,
            int typeId, TypeProcessor typeProcessor, int initialCapacity, BufferPool pool, out ConstraintReference reference)
        {
            // 将所有约束的主体句柄添加到我们找到(或创建)到挡路的批处理中,以将来引用相同的主体。
            // 另外,将句柄转换为内存索引。出于性能原因,约束存储直接内存引用。
            var bodyIndices = stackalloc int[constraintBodyHandles.Length];
            for (int j = 0; j < constraintBodyHandles.Length; ++j)
            {
                var bodyHandle = constraintBodyHandles[j];
                ref var location = ref bodies.HandleToLocation[bodyHandle.Value];
                Debug.Assert(location.SetIndex == 0, "Creating a new constraint should have forced the connected bodies awake.");
                bodyIndices[j] = location.Index;
            }
            var typeBatch = GetOrCreateTypeBatch(typeId, typeProcessor, initialCapacity, pool);
            reference = new ConstraintReference(typeBatch, typeProcessor.Allocate(ref *typeBatch, handle, bodyIndices, pool));
            // TODO：我们可以根据分配的索引调整TypeBatchAllocation容量。
            // 如果它超过了当前的容量,我们可以确保仍然包括新的大小。
            // 这里的想法是通过确保始终使用历史遇到的大小来初始化,从而避免稍后调整大小。
            // 不过,这并不一定是有益的-通常,索引较高的批处理将包含较少数量的约束,因此分配一个巨大的数量
            // 对他们的约束是非常低的价值。您可能希望在启发式方面更聪明一点。不管是哪种方式,只要有一次
            // TypeBatch调整大小的证据一直是一个令人担忧的问题。这将需要帧尖峰分析,而不仅仅是平均计时。
            // (虽然调整大小肯定会发生,但请记住,它只对*新*类型的批次真正重要-
            // 而且很少会创建实际需要巨大的新类型批处理。)
        }


        unsafe struct ActiveBodyHandleRemover : IForEach<int>
        {
            public Bodies Bodies;
            public IndexSet* Handles;

            [MethodImpl(MethodImplOptions.AggressiveInlining)]
            public ActiveBodyHandleRemover(Bodies bodies, IndexSet* handles)
            {
                Bodies = bodies;
                Handles = handles;
            }

            [MethodImpl(MethodImplOptions.AggressiveInlining)]
            public void LoopBody(int bodyIndex)
            {
                Handles->Remove(Bodies.ActiveSet.IndexToHandle[bodyIndex].Value);
            }
        }

        // 请注意,为了多线程约束删除器重用,我们拆分了约束批删除。
        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        public void RemoveTypeBatchIfEmpty(ref TypeBatch typeBatch, int typeBatchIndexToRemove, BufferPool pool)
        {
            if (typeBatch.ConstraintCount == 0)
            {
                var constraintTypeId = typeBatch.TypeId;
                TypeIndexToTypeBatchIndex[constraintTypeId] = -1;
                typeBatch.Dispose(pool); // 请注意,处置必须在移除之前进行,否则我们将最终处置Batch移动到占用新的空槽的任何类型。
                TypeBatches.FastRemoveAt(typeBatchIndexToRemove);
                if (typeBatchIndexToRemove < TypeBatches.Count)
                {
                    // 如果我们将任何内容交换到删除的槽中,我们应该将类型索引更新为类型批映射。
                    TypeIndexToTypeBatchIndex[TypeBatches[typeBatchIndexToRemove].TypeId] = typeBatchIndexToRemove;
                }
            }
            ValidateTypeBatchMappings();
        }

        public unsafe void RemoveWithHandles(int constraintTypeId, int indexInTypeBatch, IndexSet* handles, Solver solver)
        {
            Debug.Assert(TypeIndexToTypeBatchIndex[constraintTypeId] >= 0, "Type index must actually exist within this batch.");

            var typeBatchIndex = TypeIndexToTypeBatchIndex[constraintTypeId];
            var handleRemover = new ActiveBodyHandleRemover(solver.bodies, handles);
            ref var typeBatch = ref TypeBatches[typeBatchIndex];
            Debug.Assert(typeBatch.ConstraintCount > indexInTypeBatch);
            solver.TypeProcessors[constraintTypeId].EnumerateConnectedBodyIndices(ref typeBatch, indexInTypeBatch, ref handleRemover);
            Remove(ref typeBatch, typeBatchIndex, indexInTypeBatch, solver.TypeProcessors[constraintTypeId], ref solver.HandleToConstraint, solver.pool);

        }

        public unsafe void Remove(int constraintTypeId, int indexInTypeBatch, Solver solver)
        {
            Debug.Assert(TypeIndexToTypeBatchIndex[constraintTypeId] >= 0, "Type index must actually exist within this batch.");

            var typeBatchIndex = TypeIndexToTypeBatchIndex[constraintTypeId];
            ref var typeBatch = ref TypeBatches[typeBatchIndex];
            Remove(ref TypeBatches[typeBatchIndex], typeBatchIndex, indexInTypeBatch, solver.TypeProcessors[constraintTypeId], ref solver.HandleToConstraint, solver.pool);
        }

        unsafe void Remove(ref TypeBatch typeBatch, int typeBatchIndex, int indexInTypeBatch, TypeProcessor typeProcessor, ref Buffer<ConstraintLocation> handleToConstraint, BufferPool pool)
        {
            typeProcessor.Remove(ref typeBatch, indexInTypeBatch, ref handleToConstraint);
            RemoveTypeBatchIfEmpty(ref typeBatch, typeBatchIndex, pool);
        }

        public void Clear(BufferPool pool)
        {
            for (int typeBatchIndex = 0; typeBatchIndex < TypeBatches.Count; ++typeBatchIndex)
            {
                TypeBatches[typeBatchIndex].Dispose(pool);
            }
            // 由于没有更多的类型批处理,因此必须清除映射。
            for (int typeId = 0; typeId < TypeIndexToTypeBatchIndex.Length; ++typeId)
            {
                TypeIndexToTypeBatchIndex[typeId] = -1;
            }
            TypeBatches.Clear();
        }

        [MethodImpl(MethodImplOptions.AggressiveInlining)]
        static int GetTargetCapacity(ref TypeBatch typeBatch, Solver solver)
        {
            return Math.Max(typeBatch.ConstraintCount, solver.GetMinimumCapacityForType(typeBatch.TypeId));
        }

        /// <summary>
        /// 确保此约束批中的所有类型批满足或超过求解器定义的每个类型容量的大小要求。
        /// </summary>
        /// <param name="solver">从中提取最小容量的求解器。</param>
        public void EnsureTypeBatchCapacities(Solver solver)
        {
            for (int i = 0; i < TypeBatches.Count; ++i)
            {
                ref var typeBatch = ref TypeBatches[i];
                var targetCapacity = GetTargetCapacity(ref typeBatch, solver);
                if (targetCapacity > typeBatch.IndexToHandle.Length)
                    solver.TypeProcessors[TypeBatches[i].TypeId].Resize(ref typeBatch, targetCapacity, solver.pool);
            }
        }

        /// <summary>
        /// 将求解器定义的最小容量应用于现有类型批处理。
        /// </summary>
        /// <param name="solver">从中提取最小容量的求解器。</param>
        public void ResizeTypeBatchCapacities(Solver solver)
        {
            for (int i = 0; i < TypeBatches.Count; ++i)
            {
                ref var typeBatch = ref TypeBatches[i];
                solver.TypeProcessors[TypeBatches[i].TypeId].Resize(ref typeBatch, GetTargetCapacity(ref typeBatch, solver), solver.pool);
            }
        }
        /// <summary>
        /// 释放批处理使用的所有内存。
        /// </summary>
        public void Dispose(BufferPool pool)
        {
            for (int i = 0; i < TypeBatches.Count; ++i)
            {
                TypeBatches[i].Dispose(pool);
            }
            pool.Return(ref TypeIndexToTypeBatchIndex);
            TypeIndexToTypeBatchIndex = new Buffer<int>();
            TypeBatches.Dispose(pool);
            TypeBatches = default;
        }
    }
}
