#include "VulkanBarriers.h"
#include "VulkanCommandBuffer.h"

namespace Alice
{
	static VkPipelineStageFlags GetVkStageFlagsForLayout(VkImageLayout Layout)
	{
		VkPipelineStageFlags Flags = 0;

		switch (Layout)
		{
			case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
				Flags = VK_PIPELINE_STAGE_TRANSFER_BIT;
				break;

			case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
				Flags = VK_PIPELINE_STAGE_TRANSFER_BIT;
				break;

			case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
				Flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
				break;

			case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
			case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
			case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL:
				Flags = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
				break;

			case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
				Flags = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
				break;

			case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
			case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
			case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
			case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
			case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
				Flags = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
				break;

			case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
				Flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
				break;

			case VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT:
				Flags = VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT;
				break;

			case VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR:
				Flags = VK_PIPELINE_STAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR;
				break;
				
			case VK_IMAGE_LAYOUT_GENERAL:
			case VK_IMAGE_LAYOUT_UNDEFINED:
				Flags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
				break;

			case VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL:
				// todo-jn: sync2 currently only used by depth/stencil targets
				Flags = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
				break;

			case VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL:
				// todo-jn: sync2 currently only used by depth/stencil targets
				Flags = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
				break;

			default:
				break;
		}

		return Flags;
	}
	//
	// The following two functions are used when the RHI needs to do image layout transitions internally.
	// They are not used for the transitions requested through the public API (RHICreate/Begin/EndTransition)
	// unless the initial state in ERHIAccess::Unknown, in which case the tracking code kicks in.
	//
	static VkAccessFlags GetVkAccessMaskForLayout(const VkImageLayout Layout)
	{
		VkAccessFlags Flags = 0;

		switch (Layout)
		{
			case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
				Flags = VK_ACCESS_TRANSFER_READ_BIT;
				break;
			case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
				Flags = VK_ACCESS_TRANSFER_WRITE_BIT;
				break;
			case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
				Flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
				break;

			case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
			case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
			case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL:
				Flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
				break;

			case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
			case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
				Flags = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
				break;

			case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
				Flags = VK_ACCESS_SHADER_READ_BIT;
				break;

			case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
			case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
			case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
				Flags = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
				break;

			case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
				Flags = 0;
				break;

			case VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT:
				Flags = VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT;
				break;

			case VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR:
				Flags = VK_ACCESS_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR;
				break;

			case VK_IMAGE_LAYOUT_GENERAL:
				// todo-jn: could be used for R64 in read layout
			case VK_IMAGE_LAYOUT_UNDEFINED:
				Flags = 0;
				break;

			case VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL:
				// todo-jn: sync2 currently only used by depth/stencil targets
				Flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
				break;

			case VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL:
				// todo-jn: sync2 currently only used by depth/stencil targets
				Flags = VK_ACCESS_SHADER_READ_BIT;
				break;

			default:
				break;
		}

		return Flags;
	}
	static void DowngradeBarrier(VkImageMemoryBarrier& OutBarrier, const VkImageMemoryBarrier2& InBarrier)
	{
		OutBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
		OutBarrier.pNext = InBarrier.pNext;
		OutBarrier.srcAccessMask = InBarrier.srcAccessMask;
		OutBarrier.dstAccessMask = InBarrier.dstAccessMask;
		OutBarrier.oldLayout = InBarrier.oldLayout;
		OutBarrier.newLayout = InBarrier.newLayout;
		OutBarrier.srcQueueFamilyIndex = InBarrier.srcQueueFamilyIndex;
		OutBarrier.dstQueueFamilyIndex = InBarrier.dstQueueFamilyIndex;
		OutBarrier.image = InBarrier.image;
		OutBarrier.subresourceRange = InBarrier.subresourceRange;
	}
	template <typename DstArrayType, typename SrcArrayType>
	static void DowngradeBarrierArray(DstArrayType& TargetArray, const SrcArrayType& SrcArray, VkPipelineStageFlags& MergedSrcStageMask, VkPipelineStageFlags& MergedDstStageMask)
	{
		TargetArray.resize(TargetArray.size() + SrcArray.size());
		for (uint32 i=0;i< SrcArray.size();i++)
		{
			auto& DstBarrier = TargetArray[i];
			DowngradeBarrier(DstBarrier, SrcArray[i]);
			MergedSrcStageMask |= SrcArray[i].srcStageMask;
			MergedDstStageMask |= SrcArray[i].dstStageMask;
		}
	}
	template <typename BarrierArrayType>
	static void MergeDepthStencilLayouts(BarrierArrayType& TargetArray)
	{
		for (auto& Barrier : TargetArray)
		{
			if (VKHasAnyFlags(Barrier.subresourceRange.aspectMask, (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)))
			{
				if (Barrier.newLayout == VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL)
				{
					Barrier.newLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
				}
				else if (Barrier.newLayout == VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL)
				{
					Barrier.newLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
				}

				if (Barrier.oldLayout == VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL)
				{
					Barrier.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
				}
				else if (Barrier.oldLayout == VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL)
				{
					Barrier.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
				}
			}
		}
	}
	//
	// Helpers for filling in the fields of a VkImageMemoryBarrier structure.
	//
	static void SetupImageBarrier(VkImageMemoryBarrier2& ImgBarrier, VkImage Image, VkPipelineStageFlags SrcStageFlags, VkPipelineStageFlags DstStageFlags, 
		VkAccessFlags SrcAccessFlags, VkAccessFlags DstAccessFlags, VkImageLayout SrcLayout, VkImageLayout DstLayout, const VkImageSubresourceRange& SubresRange)
	{
		ImgBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2;
		ImgBarrier.pNext = nullptr;
		ImgBarrier.srcStageMask = SrcStageFlags;
		ImgBarrier.dstStageMask = DstStageFlags;
		ImgBarrier.srcAccessMask = SrcAccessFlags;
		ImgBarrier.dstAccessMask = DstAccessFlags;
		ImgBarrier.oldLayout = SrcLayout;
		ImgBarrier.newLayout = DstLayout;
		ImgBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
		ImgBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
		ImgBarrier.image = Image;
		ImgBarrier.subresourceRange = SubresRange;
	}
	void CGIVulkanPipelineBarrier::AddImageLayoutTransition(VkImage Image, VkImageLayout SrcLayout, VkImageLayout DstLayout, const VkImageSubresourceRange& SubresourceRange)
	{
		const VkPipelineStageFlags SrcStageMask = GetVkStageFlagsForLayout(SrcLayout);
		const VkPipelineStageFlags DstStageMask = GetVkStageFlagsForLayout(DstLayout);

		const VkAccessFlags SrcAccessFlags = GetVkAccessMaskForLayout(SrcLayout);
		const VkAccessFlags DstAccessFlags = GetVkAccessMaskForLayout(DstLayout);

		VkImageMemoryBarrier2 ImgBarrier = {};
		SetupImageBarrier(ImgBarrier, Image, SrcStageMask, DstStageMask, SrcAccessFlags, DstAccessFlags, SrcLayout, DstLayout, SubresourceRange);
		ImageBarriers.push_back(ImgBarrier);
	}
    VkImageSubresourceRange CGIVulkanPipelineBarrier::MakeSubresourceRange(VkImageAspectFlags AspectMask, uint32 FirstMip, uint32 NumMips, uint32 FirstLayer, uint32 NumLayers)
    {
        VkImageSubresourceRange Range;
        Range.aspectMask = AspectMask;
        Range.baseMipLevel = FirstMip;
        Range.levelCount = NumMips;
        Range.baseArrayLayer = FirstLayer;
        Range.layerCount = NumLayers;
        return Range;
    }
	void CGIVulkanPipelineBarrier::Execute(VkCommandBuffer CmdBuffer)
	{
		/*if (MemoryBarriers.Num() != 0 || BufferBarriers.Num() != 0 || ImageBarriers.Num() != 0)
		{
			VkPipelineStageFlags SrcStageMask = 0;
			VkPipelineStageFlags DstStageMask = 0;

			TArray<VkMemoryBarrier, TInlineAllocator<1>> TempMemoryBarriers;
			DowngradeBarrierArray(TempMemoryBarriers, MemoryBarriers, SrcStageMask, DstStageMask);

			TArray<VkBufferMemoryBarrier> TempBufferBarriers;
			DowngradeBarrierArray(TempBufferBarriers, BufferBarriers, SrcStageMask, DstStageMask);

			TArray<VkImageMemoryBarrier, TInlineAllocator<2>> TempImageBarriers;
			DowngradeBarrierArray(TempImageBarriers, ImageBarriers, SrcStageMask, DstStageMask);
			MergeDepthStencilLayouts(TempImageBarriers);

			vkCmdPipelineBarrier(CmdBuffer, SrcStageMask, DstStageMask, 0, TempMemoryBarriers.Num(), TempMemoryBarriers.GetData(), 
				TempBufferBarriers.Num(), TempBufferBarriers.GetData(), TempImageBarriers.Num(), TempImageBarriers.GetData());
		}*/
		VkPipelineStageFlags SrcStageMask = 0;
		VkPipelineStageFlags DstStageMask = 0;
		std::vector<VkImageMemoryBarrier> TempImageBarriers;
		DowngradeBarrierArray(TempImageBarriers, ImageBarriers, SrcStageMask, DstStageMask);
		MergeDepthStencilLayouts(TempImageBarriers);
		vkCmdPipelineBarrier(CmdBuffer, SrcStageMask, DstStageMask, 0, 0, NULL, 
				0, NULL, TempImageBarriers.size(), TempImageBarriers.data());
	}

	void CGIVulkanPipelineBarrier::Execute(VulkanCommandBuffer* CmdBuffer)
	{
		/*if (MemoryBarriers.Num() != 0 || BufferBarriers.Num() != 0 || ImageBarriers.Num() != 0)
		{
			if (CmdBuffer->GetDevice()->SupportsParallelRendering())
			{
				VkDependencyInfo DependencyInfo;
				DependencyInfo.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO;
				DependencyInfo.pNext = nullptr;
				DependencyInfo.dependencyFlags = 0;
				DependencyInfo.memoryBarrierCount = MemoryBarriers.Num();
				DependencyInfo.pMemoryBarriers = MemoryBarriers.GetData();
				DependencyInfo.bufferMemoryBarrierCount = BufferBarriers.Num();
				DependencyInfo.pBufferMemoryBarriers = BufferBarriers.GetData();
				DependencyInfo.imageMemoryBarrierCount = ImageBarriers.Num();
				DependencyInfo.pImageMemoryBarriers = ImageBarriers.GetData();
				vkCmdPipelineBarrier2KHR(CmdBuffer->GetHandle(), &DependencyInfo);
			}
			else
			{
				// Call the original execute with older types
				Execute(CmdBuffer->GetHandle());
			}
		}*/
		Execute(CmdBuffer->GetHandle());
	}
    //
	// Used when we need to change the layout of a single image. Some plug-ins call this function from outside the RHI (Steam VR, at the time of writing this).
	//
	void VulkanSetImageLayout(VulkanCommandBuffer* CmdBuffer, VkImage Image, VkImageLayout OldLayout, VkImageLayout NewLayout, const VkImageSubresourceRange& SubresourceRange)
	{
		CGIVulkanPipelineBarrier Barrier;
		Barrier.AddImageLayoutTransition(Image, OldLayout, NewLayout, SubresourceRange);
		Barrier.Execute(CmdBuffer);
	}
	VulkanImageLayout::VulkanImageLayout(const VulkanImageLayout& inOther)
	{
		mNumMips=inOther.mNumMips;
		mNumLayers=inOther.mNumLayers;
		mNumPlanes=inOther.mNumPlanes;
		// The layout when all the subresources are in the same state.
		mMainLayout=inOther.mMainLayout;
		// Explicit subresource layouts. Always NumLayers*NumMips elements.
		mSubresLayouts=inOther.mSubresLayouts;
	}

	bool VulkanImageLayout::AreSubresourcesSameLayout(VkImageLayout Layout, const VkImageSubresourceRange& SubresourceRange) const
	{
		if (mSubresLayouts.size() == 0)
		{
			return mMainLayout == Layout;
		}

		const uint32 FirstPlane = (SubresourceRange.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) ? mNumPlanes - 1 : 0;
		const uint32 LastPlane = (SubresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) ? mNumPlanes : 1;

		const uint32 FirstLayer = SubresourceRange.baseArrayLayer;
		const uint32 LastLayer = FirstLayer + GetSubresRangeLayerCount(SubresourceRange);

		const uint32 FirstMip = SubresourceRange.baseMipLevel;
		const uint32 LastMip = FirstMip + GetSubresRangeMipCount(SubresourceRange);

		for (uint32 PlaneIdx = FirstPlane; PlaneIdx < LastPlane; ++PlaneIdx)
		{
			for (uint32 LayerIdx = FirstLayer; LayerIdx < LastLayer; ++LayerIdx)
			{
				for (uint32 MipIdx = FirstMip; MipIdx < LastMip; ++MipIdx)
				{
					if (mSubresLayouts[(PlaneIdx * mNumLayers * mNumMips) + (LayerIdx * mNumMips) + MipIdx] != Layout)
					{
						return false;
					}
				}
			}
		}

		return true;
	}

	void VulkanImageLayout::CollapseSubresLayoutsIfSame()
	{
		if (mSubresLayouts.size() == 0)
		{
			return;
		}

		const VkImageLayout Layout = mSubresLayouts[0];
		for (uint32 i = 1; i < mNumPlanes * mNumLayers * mNumMips; ++i)
		{
			if (mSubresLayouts[i] != Layout)
			{
				return;
			}
		}

		mMainLayout = Layout;
		mSubresLayouts.clear();
	}

	void VulkanImageLayout::Set(VkImageLayout Layout, const VkImageSubresourceRange& SubresourceRange)
	{
		const uint32 FirstPlane = (SubresourceRange.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) ? mNumPlanes - 1 : 0;
		const uint32 LastPlane = (SubresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) ? mNumPlanes : 1;

		const uint32 FirstLayer = SubresourceRange.baseArrayLayer;
		const uint32 LayerCount = GetSubresRangeLayerCount(SubresourceRange);

		const uint32 FirstMip = SubresourceRange.baseMipLevel;
		const uint32 MipCount = GetSubresRangeMipCount(SubresourceRange);

		if (FirstPlane == 0 && LastPlane == mNumPlanes &&
			FirstLayer == 0 && LayerCount == mNumLayers && 
			FirstMip == 0 && MipCount == mNumMips)
		{
			// We're setting the entire resource to the same layout.
			mMainLayout = Layout;
			mSubresLayouts.clear();
			return;
		}

		if (mSubresLayouts.size() == 0)
		{
			const uint32 SubresLayoutCount = mNumPlanes * mNumLayers * mNumMips;
			mSubresLayouts.resize(SubresLayoutCount);
			for (uint32 i = 0; i < SubresLayoutCount; ++i)
			{
				mSubresLayouts[i] = mMainLayout;
			}
		}

		for (uint32 Plane = FirstPlane; Plane < LastPlane; ++Plane)
		{
			for (uint32 Layer = FirstLayer; Layer < FirstLayer + LayerCount; ++Layer)
			{
				for (uint32 Mip = FirstMip; Mip < FirstMip + MipCount; ++Mip)
				{
					mSubresLayouts[Plane * (mNumLayers * mNumMips) + Layer * mNumMips + Mip] = Layout;
				}
			}
		}

		// It's possible we've just set all the subresources to the same layout. If that's the case, get rid of the
		// subresource info and set the main layout appropriatedly.
		CollapseSubresLayoutsIfSame();
	}
}
