#pragma once
#include "VulkanResource.h"
#include "CGIVulkan.h"
#include "VulkanCommandBuffer.h"
#include "VulkanContext.h"
#include "VulkanBarriers.h"
#include "VulkanRenderPass.h"
#include "Runtime/CGI/BindingState.h"
#include "VulkanMemory.h"
#include "Runtime/CGI/CGICommandList.h"

namespace Alice{
	struct FVulkanPendingBufferLock
	{
		FStagingBuffer* StagingBuffer = nullptr;
		uint32 Offset = 0;
		uint32 Size = 0;
		EResourceLockMode LockMode = RLM_Num;
	};

	static std::unordered_map<VulkanResourceMultiBuffer*, FVulkanPendingBufferLock> GPendingLocks;
	static FORCEINLINE void AddPendingBufferLock(VulkanResourceMultiBuffer* Buffer, FVulkanPendingBufferLock& PendingLock)
	{
		//FScopeLock ScopeLock(&GPendingLockMutex);
		check(!GPendingLocks.Contains(Buffer));
		GPendingLocks.insert(std::pair<VulkanResourceMultiBuffer*,FVulkanPendingBufferLock>(Buffer, PendingLock));
	}
	static FORCEINLINE FVulkanPendingBufferLock GetPendingBufferLock(VulkanResourceMultiBuffer* Buffer)
	{
		FVulkanPendingBufferLock PendingLock;

		// Found only if it was created for Write
		//FScopeLock ScopeLock(&GPendingLockMutex);
		auto iter=GPendingLocks.find(Buffer);
		if(iter!=GPendingLocks.end())
		{
			PendingLock=iter->second;
			GPendingLocks.erase(iter);
		}
		//const bool bFound = GPendingLocks.RemoveAndCopyValue(Buffer, PendingLock);

		//checkf(bFound, TEXT("Mismatched Buffer Lock/Unlock!"));
		return PendingLock;
	}
	VkImageAspectFlags GetAspectMaskFromUEFormat(EPixelFormat Format, bool bIncludeStencil, bool bIncludeDepth)
	{
		switch (Format)
		{
		case EPixelFormat::PF_X24_G8:
			return VK_IMAGE_ASPECT_STENCIL_BIT;
		case EPixelFormat::PF_DepthStencil:
			return (bIncludeDepth ? VK_IMAGE_ASPECT_DEPTH_BIT : 0) | (bIncludeStencil ? VK_IMAGE_ASPECT_STENCIL_BIT : 0);
		case EPixelFormat::PF_ShadowDepth:
		case EPixelFormat::PF_D24:
			return VK_IMAGE_ASPECT_DEPTH_BIT;
		default:
			return VK_IMAGE_ASPECT_COLOR_BIT;
		}
	}
	VkImageUsageFlags GetUsageFlagsFromCreateFlags(VulkanDevice& InDevice, const ETextureCreateFlags& UEFlags)
	{
		VkImageUsageFlags UsageFlags = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;

		if (EnumHasAnyFlags(UEFlags, ETextureCreateFlags::Presentable))
		{
			UsageFlags |= VK_IMAGE_USAGE_STORAGE_BIT;
		}
		else if (EnumHasAnyFlags(UEFlags, ETextureCreateFlags::RenderTargetable | ETextureCreateFlags::DepthStencilTargetable))
		{
			if (EnumHasAllFlags(UEFlags, ETextureCreateFlags::InputAttachmentRead))
			{
				UsageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
			}
			UsageFlags |= (EnumHasAnyFlags(UEFlags, ETextureCreateFlags::RenderTargetable) ? VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT : VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
			if (EnumHasAllFlags(UEFlags, ETextureCreateFlags::Memoryless))
			{
				UsageFlags |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
				// Remove the transfer and sampled bits, as they are incompatible with the transient bit.
				UsageFlags &= ~(VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_SAMPLED_BIT);
			}
		}
		else if (EnumHasAnyFlags(UEFlags, ETextureCreateFlags::DepthStencilResolveTarget))
		{
			UsageFlags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
		}
		else if (EnumHasAnyFlags(UEFlags, ETextureCreateFlags::ResolveTargetable))
		{
			UsageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
		}

		if (EnumHasAnyFlags(UEFlags, ETextureCreateFlags::UAV))
		{
			UsageFlags |= VK_IMAGE_USAGE_STORAGE_BIT;
		}

		return UsageFlags;
	}
	static VkImageLayout GetInitialLayoutFromRHIAccess(ECGIAccess RHIAccess, bool bIsDepthStencilTarget, bool bSupportReadOnlyOptimal)
	{
		if (EnumHasAnyFlags(RHIAccess, ECGIAccess::RTV) || RHIAccess == ECGIAccess::Present)
		{
			return VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
		}

		if (EnumHasAnyFlags(RHIAccess, ECGIAccess::DSVWrite))
		{
			return VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL;
		}

		if (EnumHasAnyFlags(RHIAccess, ECGIAccess::DSVRead))
		{
			return VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL;
		}

		if (EnumHasAnyFlags(RHIAccess, ECGIAccess::SRVMask))
		{
			if (bIsDepthStencilTarget)
			{
				return VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL;
			}

			return bSupportReadOnlyOptimal ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : VK_IMAGE_LAYOUT_GENERAL;
		}

		if (EnumHasAnyFlags(RHIAccess, ECGIAccess::UAVMask))
		{
			return VK_IMAGE_LAYOUT_GENERAL;
		}

		switch (RHIAccess)
		{
		case ECGIAccess::Unknown:	return VK_IMAGE_LAYOUT_UNDEFINED;
		case ECGIAccess::Discard:	return VK_IMAGE_LAYOUT_UNDEFINED;
		case ECGIAccess::CopySrc:	return VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
		case ECGIAccess::CopyDest:	return VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
		}

		return VK_IMAGE_LAYOUT_UNDEFINED;
	}
	VkFormat UEToVkTextureFormat(EPixelFormat UEFormat, const bool bIsSRGB)
	{
		if (bIsSRGB)
		{
			return GVulkanSRGBFormat[(uint32)UEFormat];
		}
		else
		{
			return (VkFormat)GPixelFormats[(uint32)UEFormat].PlatformFormat;
		}
	}
	VkImageViewType UETextureDimensionToVkImageViewType(ETextureDimension Dimension)
	{
		switch (Dimension)
		{
		case ETextureDimension::Texture2D: return VK_IMAGE_VIEW_TYPE_2D;
		case ETextureDimension::Texture2DArray: return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
		case ETextureDimension::Texture3D: return VK_IMAGE_VIEW_TYPE_3D;
		case ETextureDimension::TextureCube: return VK_IMAGE_VIEW_TYPE_CUBE;
		case ETextureDimension::TextureCubeArray: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
		default: return VK_IMAGE_VIEW_TYPE_MAX_ENUM;
		}
	}
	static inline VkAttachmentLoadOp RenderTargetLoadActionToVulkan(ERenderTargetLoadAction InLoadAction)
	{
		VkAttachmentLoadOp OutLoadAction = VK_ATTACHMENT_LOAD_OP_MAX_ENUM;
		switch (InLoadAction)
		{
		case ERenderTargetLoadAction::ELoad:		OutLoadAction = VK_ATTACHMENT_LOAD_OP_LOAD;			break;
		case ERenderTargetLoadAction::EClear:		OutLoadAction = VK_ATTACHMENT_LOAD_OP_CLEAR;		break;
		case ERenderTargetLoadAction::ENoAction:	OutLoadAction = VK_ATTACHMENT_LOAD_OP_DONT_CARE;	break;
		default:																						break;
		}
		// Check for missing translation
		return OutLoadAction;
	}

	static inline VkAttachmentStoreOp RenderTargetStoreActionToVulkan(ERenderTargetStoreAction InStoreAction)
	{
		VkAttachmentStoreOp OutStoreAction = VK_ATTACHMENT_STORE_OP_MAX_ENUM;
		switch (InStoreAction)
		{
		case ERenderTargetStoreAction::EStore:
			OutStoreAction = VK_ATTACHMENT_STORE_OP_STORE;
			break;
		case ERenderTargetStoreAction::ENoAction:
		case ERenderTargetStoreAction::EMultisampleResolve:
			OutStoreAction = VK_ATTACHMENT_STORE_OP_DONT_CARE;
			break;
		default:
			break;
		}

		// Check for missing translation
		return OutStoreAction;
	}
	VulkanRenderTargetLayout::VulkanRenderTargetLayout(const FGraphicsPipelineStateInitializer& Initializer)
		: NumAttachmentDescriptions(0)
		, NumColorAttachments(0)
		, bHasDepthStencil(false)
		, NumUsedClearValues(0)
	{
		bool bSetExtent = false;
		bool bFoundClearOp = false;
		for (int32 Index = 0; Index < Initializer.RenderTargetsEnabled; ++Index)
		{
			EPixelFormat UEFormat = (EPixelFormat)Initializer.RenderTargetFormats[Index];
			if(UEFormat!=EPixelFormat::PF_Unknown)
			{
				// With a CustomResolveSubpass last color attachment is a resolve target
				bool bCustomResolveAttachment = (Index == (Initializer.RenderTargetsEnabled - 1)) && Initializer.SubpassHint == ESubpassHint::CustomResolveSubpass;
				
				VkAttachmentDescription& CurrDesc = Desc[NumAttachmentDescriptions];
				CurrDesc.samples = VK_SAMPLE_COUNT_1_BIT;//bCustomResolveAttachment ? VK_SAMPLE_COUNT_1_BIT : static_cast<VkSampleCountFlagBits>(NumSamples);
				CurrDesc.format = UEToVkTextureFormat(UEFormat, EnumHasAllFlags(Initializer.RenderTargetFlags[Index],  ETextureCreateFlags::SRGB));
				CurrDesc.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
				CurrDesc.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
				CurrDesc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
				CurrDesc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;

				// If the initial != final we need to change the FullHashInfo and use FinalLayout
				CurrDesc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
				CurrDesc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;

				ColorReferences[NumColorAttachments].attachment = NumAttachmentDescriptions;
				ColorReferences[NumColorAttachments].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;

				if (CurrDesc.samples > VK_SAMPLE_COUNT_1_BIT)
				{
					Desc[NumAttachmentDescriptions + 1] = Desc[NumAttachmentDescriptions];
					Desc[NumAttachmentDescriptions + 1].samples = VK_SAMPLE_COUNT_1_BIT;
					Desc[NumAttachmentDescriptions + 1].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
					Desc[NumAttachmentDescriptions + 1].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
					//ResolveReferences[NumColorAttachments].attachment = NumAttachmentDescriptions + 1;
					//ResolveReferences[NumColorAttachments].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
					//CompatibleHashInfo.AttachmentsToResolve |= (uint16)(1 << NumColorAttachments);
					++NumAttachmentDescriptions;
					//bHasResolveAttachments = true;
				}

				//CompatibleHashInfo.Formats[NumColorAttachments] = CurrDesc.format;
				//FullHashInfo.LoadOps[NumColorAttachments] = CurrDesc.loadOp;
				//FullHashInfo.StoreOps[NumColorAttachments] = CurrDesc.storeOp;
				//FullHashInfo.InitialLayout[NumColorAttachments] = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
				//++CompatibleHashInfo.NumAttachments;

				++NumAttachmentDescriptions;
				++NumColorAttachments;
			}
			/*const CGIRenderPassInfo::FColorEntry& ColorEntry = RPInfo->mColorRenderTargets;
			CGIVulkanTexture* Texture = reinterpret_cast<CGIVulkanTexture*>(ColorEntry.RenderTarget);
			const CGITextureDesc& TextureDesc = Texture->GetDesc();
			if (!bSetExtent)
			{
				bSetExtent = true;
				Extent.Extent3D.width = MathUtils::Max(1, TextureDesc.Extent.X >> ColorEntry.MipIndex);
				Extent.Extent3D.height = MathUtils::Max(1, TextureDesc.Extent.Y >> ColorEntry.MipIndex);
				Extent.Extent3D.depth = TextureDesc.Depth;
			}
			VkAttachmentDescription& CurrDesc = Desc[NumAttachmentDescriptions];
			CurrDesc={};
			CurrDesc.samples = VK_SAMPLE_COUNT_1_BIT;
			CurrDesc.format = UEToVkTextureFormat(ColorEntry.RenderTarget->GetDesc().Format, EnumHasAllFlags(Texture->GetDesc().Flags, ETextureCreateFlags::SRGB));
			CurrDesc.loadOp = RenderTargetLoadActionToVulkan(GetLoadAction(ColorEntry.Action));
			bFoundClearOp = bFoundClearOp || (CurrDesc.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR);
			CurrDesc.storeOp = RenderTargetStoreActionToVulkan(GetStoreAction(ColorEntry.Action));
			CurrDesc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
			CurrDesc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
			CurrDesc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
			CurrDesc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
			ColorReferences[NumColorAttachments]={};
			ColorReferences[NumColorAttachments].attachment = NumAttachmentDescriptions;
			ColorReferences[NumColorAttachments].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
			
			++NumAttachmentDescriptions;
			++NumColorAttachments;*/
		}
	}
	VulkanRenderTargetLayout::VulkanRenderTargetLayout(VulkanDevice& InDevice, const CGIRenderPassInfo* RPInfo, VkImageLayout CurrentDepthLayout, VkImageLayout CurrentStencilLayout)
		: NumAttachmentDescriptions(0)
		, NumColorAttachments(0)
		, bHasDepthStencil(false)
		, NumUsedClearValues(0)
	{
		bool bSetExtent = false;
		bool bFoundClearOp = false;
		int32 NumColorRenderTargets = 1;
		for (int32 Index = 0; Index < NumColorRenderTargets; ++Index)
		{
			const CGIRenderPassInfo::FColorEntry& ColorEntry = RPInfo->mColorRenderTargets;
			CGIVulkanTexture* Texture = reinterpret_cast<CGIVulkanTexture*>(ColorEntry.RenderTarget);
			const CGITextureDesc& TextureDesc = Texture->GetDesc();
			if (!bSetExtent)
			{
				bSetExtent = true;
				Extent.Extent3D.width = MathUtils::Max(1, TextureDesc.Extent.X >> ColorEntry.MipIndex);
				Extent.Extent3D.height = MathUtils::Max(1, TextureDesc.Extent.Y >> ColorEntry.MipIndex);
				Extent.Extent3D.depth = TextureDesc.Depth;
			}
			VkAttachmentDescription& CurrDesc = Desc[NumAttachmentDescriptions];
			CurrDesc={};
			CurrDesc.samples = VK_SAMPLE_COUNT_1_BIT;
			CurrDesc.format = UEToVkTextureFormat(ColorEntry.RenderTarget->GetDesc().Format, EnumHasAllFlags(Texture->GetDesc().Flags, ETextureCreateFlags::SRGB));
			CurrDesc.loadOp = RenderTargetLoadActionToVulkan(GetLoadAction(ColorEntry.Action));
			bFoundClearOp = bFoundClearOp || (CurrDesc.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR);
			CurrDesc.storeOp = RenderTargetStoreActionToVulkan(GetStoreAction(ColorEntry.Action));
			CurrDesc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
			CurrDesc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
			CurrDesc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
			CurrDesc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
			ColorReferences[NumColorAttachments]={};
			ColorReferences[NumColorAttachments].attachment = NumAttachmentDescriptions;
			ColorReferences[NumColorAttachments].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
			
			++NumAttachmentDescriptions;
			++NumColorAttachments;
		}
		//ResetAttachments();
		/*FRenderPassCompatibleHashableStruct CompatibleHashInfo;
		FRenderPassFullHashableStruct FullHashInfo;

		bool bSetExtent = false;
		bool bFoundClearOp = false;
		bool bMultiviewRenderTargets = false;

		int32 NumColorRenderTargets = RPInfo->GetNumColorRenderTargets();
		for (int32 Index = 0; Index < NumColorRenderTargets; ++Index)
		{
			const CGIRenderPassInfo::FColorEntry& ColorEntry = RPInfo.ColorRenderTargets[Index];
			CGIVulkanTexture* Texture = ResourceCast(ColorEntry.RenderTarget);
			const FRHITextureDesc& TextureDesc = Texture->GetDesc();

			if (bSetExtent)
			{
				ensure(Extent.Extent3D.width == FMath::Max(1, TextureDesc.Extent.X >> ColorEntry.MipIndex));
				ensure(Extent.Extent3D.height == FMath::Max(1, TextureDesc.Extent.Y >> ColorEntry.MipIndex));
				ensure(Extent.Extent3D.depth == TextureDesc.Depth);
			}
			else
			{
				bSetExtent = true;
				Extent.Extent3D.width = FMath::Max(1, TextureDesc.Extent.X >> ColorEntry.MipIndex);
				Extent.Extent3D.height = FMath::Max(1, TextureDesc.Extent.Y >> ColorEntry.MipIndex);
				Extent.Extent3D.depth = TextureDesc.Depth;
			}

			// CustomResolveSubpass can have targets with a different NumSamples
			//ensure(!NumSamples || NumSamples == ColorEntry.RenderTarget->GetNumSamples() || RPInfo.SubpassHint == ESubpassHint::CustomResolveSubpass);
			//NumSamples = ColorEntry.RenderTarget->GetNumSamples();

			//ensure(!GetIsMultiView() || !bMultiviewRenderTargets || Texture->GetNumberOfArrayLevels() > 1);
			//bMultiviewRenderTargets = Texture->GetNumberOfArrayLevels() > 1;
			// With a CustomResolveSubpass last color attachment is a resolve target
			bool bCustomResolveAttachment = (Index == (NumColorRenderTargets - 1)) && RPInfo.SubpassHint == ESubpassHint::CustomResolveSubpass;

			VkAttachmentDescription& CurrDesc = Desc[NumAttachmentDescriptions];
			CurrDesc.samples = bCustomResolveAttachment ? VK_SAMPLE_COUNT_1_BIT : static_cast<VkSampleCountFlagBits>(NumSamples);
			CurrDesc.format = UEToVkTextureFormat(ColorEntry.RenderTarget->GetFormat(), EnumHasAllFlags(Texture->GetDesc().Flags, TexCreate_SRGB));
			CurrDesc.loadOp = RenderTargetLoadActionToVulkan(GetLoadAction(ColorEntry.Action));
			bFoundClearOp = bFoundClearOp || (CurrDesc.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR);
			CurrDesc.storeOp = RenderTargetStoreActionToVulkan(GetStoreAction(ColorEntry.Action));
			CurrDesc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
			CurrDesc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;

			if (EnumHasAnyFlags(Texture->GetDesc().Flags,  ETextureCreateFlags::Memoryless))
			{
				//ensure(CurrDesc.storeOp == VK_ATTACHMENT_STORE_OP_DONT_CARE);
			}

			// If the initial != final we need to change the FullHashInfo and use FinalLayout
			CurrDesc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
			CurrDesc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;

			ColorReferences[NumColorAttachments].attachment = NumAttachmentDescriptions;
			ColorReferences[NumColorAttachments].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;

			if (CurrDesc.samples > VK_SAMPLE_COUNT_1_BIT && ColorEntry.ResolveTarget)
			{
				Desc[NumAttachmentDescriptions + 1] = Desc[NumAttachmentDescriptions];
				Desc[NumAttachmentDescriptions + 1].samples = VK_SAMPLE_COUNT_1_BIT;
				Desc[NumAttachmentDescriptions + 1].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
				Desc[NumAttachmentDescriptions + 1].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
				//ResolveReferences[NumColorAttachments].attachment = NumAttachmentDescriptions + 1;
				//ResolveReferences[NumColorAttachments].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
				CompatibleHashInfo.AttachmentsToResolve |= (uint16)(1 << NumColorAttachments);
				++NumAttachmentDescriptions;
				//bHasResolveAttachments = true;
			}

			CompatibleHashInfo.Formats[NumColorAttachments] = CurrDesc.format;
			FullHashInfo.LoadOps[NumColorAttachments] = CurrDesc.loadOp;
			FullHashInfo.InitialLayout[NumColorAttachments] = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
			FullHashInfo.StoreOps[NumColorAttachments] = CurrDesc.storeOp;
			++CompatibleHashInfo.NumAttachments;

			++NumAttachmentDescriptions;
			++NumColorAttachments;
		}
		bool bMultiViewDepthStencil = false;
		if (RPInfo.DepthStencilRenderTarget.DepthStencilTarget)
		{
			VkAttachmentDescription& CurrDesc = Desc[NumAttachmentDescriptions];
			FMemory::Memzero(CurrDesc);
			FVulkanTexture* Texture = ResourceCast(RPInfo.DepthStencilRenderTarget.DepthStencilTarget);
			check(Texture);
			const FRHITextureDesc& TextureDesc = Texture->GetDesc();
			bMultiViewDepthStencil = (Texture->GetNumberOfArrayLevels() > 1) && !Texture->GetDesc().IsTextureCube();
			CurrDesc.samples = static_cast<VkSampleCountFlagBits>(RPInfo.DepthStencilRenderTarget.DepthStencilTarget->GetNumSamples());
			// CustomResolveSubpass can have targets with a different NumSamples
			ensure(!NumSamples || CurrDesc.samples == NumSamples || RPInfo.SubpassHint == ESubpassHint::CustomResolveSubpass);
			NumSamples = CurrDesc.samples;
			CurrDesc.format = UEToVkTextureFormat(RPInfo.DepthStencilRenderTarget.DepthStencilTarget->GetFormat(), false);
			CurrDesc.loadOp = RenderTargetLoadActionToVulkan(GetLoadAction(GetDepthActions(RPInfo.DepthStencilRenderTarget.Action)));
			CurrDesc.stencilLoadOp = RenderTargetLoadActionToVulkan(GetLoadAction(GetStencilActions(RPInfo.DepthStencilRenderTarget.Action)));
			bFoundClearOp = bFoundClearOp || (CurrDesc.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR || CurrDesc.stencilLoadOp == VK_ATTACHMENT_LOAD_OP_CLEAR);

			CurrDesc.storeOp = RenderTargetStoreActionToVulkan(GetStoreAction(GetDepthActions(RPInfo.DepthStencilRenderTarget.Action)));
			CurrDesc.stencilStoreOp = RenderTargetStoreActionToVulkan(GetStoreAction(GetStencilActions(RPInfo.DepthStencilRenderTarget.Action)));

			if (EnumHasAnyFlags(TextureDesc.Flags, TexCreate_Memoryless))
			{
				ensure(CurrDesc.storeOp == VK_ATTACHMENT_STORE_OP_DONT_CARE);
				ensure(CurrDesc.stencilStoreOp == VK_ATTACHMENT_STORE_OP_DONT_CARE);
			}

			FExclusiveDepthStencil ExclusiveDepthStencil = RPInfo.DepthStencilRenderTarget.ExclusiveDepthStencil;
			if (FVulkanPlatform::RequiresDepthWriteOnStencilClear() &&
				RPInfo.DepthStencilRenderTarget.Action == EDepthStencilTargetActions::LoadDepthClearStencil_StoreDepthStencil)
			{
				ExclusiveDepthStencil = FExclusiveDepthStencil::DepthWrite_StencilWrite;
				CurrentDepthLayout = VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL;
				CurrentStencilLayout = VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL;
			}

			// If the initial != final we need to change the FullHashInfo and use FinalLayout
			CurrDesc.initialLayout = CurrentDepthLayout;
			CurrDesc.finalLayout = CurrentDepthLayout;
			StencilDesc.stencilInitialLayout = CurrentStencilLayout;
			StencilDesc.stencilFinalLayout = CurrentStencilLayout;

			// We can't have the final layout be UNDEFINED, but it's possible that we get here from a transient texture
			// where the stencil was never used yet.  We can set the layout to whatever we want, the next transition will
			// happen from UNDEFINED anyhow.
			if (CurrentDepthLayout == VK_IMAGE_LAYOUT_UNDEFINED)
			{
				// Unused image aspects with a LoadOp but undefined layout should just remain untouched
				if (!RPInfo.DepthStencilRenderTarget.ExclusiveDepthStencil.IsUsingDepth() &&
					InDevice.GetOptionalExtensions().HasEXTLoadStoreOpNone &&
					(CurrDesc.loadOp == VK_ATTACHMENT_LOAD_OP_LOAD))
				{
					CurrDesc.loadOp = VK_ATTACHMENT_LOAD_OP_NONE_KHR;
				}

				check(CurrDesc.storeOp == VK_ATTACHMENT_STORE_OP_DONT_CARE);
				CurrDesc.finalLayout = VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL;
			}
			if (CurrentStencilLayout == VK_IMAGE_LAYOUT_UNDEFINED)
			{
				// Unused image aspects with a LoadOp but undefined layout should just remain untouched
				if (!RPInfo.DepthStencilRenderTarget.ExclusiveDepthStencil.IsUsingStencil() &&
					InDevice.GetOptionalExtensions().HasEXTLoadStoreOpNone &&
					(CurrDesc.stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD))
				{
					CurrDesc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_NONE_KHR;
				}

				check(CurrDesc.stencilStoreOp == VK_ATTACHMENT_STORE_OP_DONT_CARE);
				StencilDesc.stencilFinalLayout = VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL;
			}

			DepthReference.attachment = NumAttachmentDescriptions;
			DepthReference.layout = CurrentDepthLayout;
			StencilReference.stencilLayout = CurrentStencilLayout;

			if (GRHISupportsDepthStencilResolve && CurrDesc.samples > VK_SAMPLE_COUNT_1_BIT && RPInfo.DepthStencilRenderTarget.ResolveTarget)
			{
				Desc[NumAttachmentDescriptions + 1] = Desc[NumAttachmentDescriptions];
				Desc[NumAttachmentDescriptions + 1].samples = VK_SAMPLE_COUNT_1_BIT;
				Desc[NumAttachmentDescriptions + 1].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
				Desc[NumAttachmentDescriptions + 1].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
				Desc[NumAttachmentDescriptions + 1].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
				Desc[NumAttachmentDescriptions + 1].stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
				DepthStencilResolveReference.attachment = NumAttachmentDescriptions + 1;
				DepthStencilResolveReference.layout = CurrentDepthLayout;
				// NumColorAttachments was incremented after the last color attachment
				ensureMsgf(NumColorAttachments < 16, TEXT("Must have room for depth resolve bit"));
				CompatibleHashInfo.AttachmentsToResolve |= (uint16)(1 << NumColorAttachments);
				++NumAttachmentDescriptions;
				bHasDepthStencilResolve = true;
			}

			FullHashInfo.LoadOps[MaxSimultaneousRenderTargets] = CurrDesc.loadOp;
			FullHashInfo.LoadOps[MaxSimultaneousRenderTargets + 1] = CurrDesc.stencilLoadOp;
			FullHashInfo.StoreOps[MaxSimultaneousRenderTargets] = CurrDesc.storeOp;
			FullHashInfo.StoreOps[MaxSimultaneousRenderTargets + 1] = CurrDesc.stencilStoreOp;
			FullHashInfo.InitialLayout[MaxSimultaneousRenderTargets] = CurrentDepthLayout;
			FullHashInfo.InitialLayout[MaxSimultaneousRenderTargets + 1] = CurrentStencilLayout;
			CompatibleHashInfo.Formats[MaxSimultaneousRenderTargets] = CurrDesc.format;

			++NumAttachmentDescriptions;

			bHasDepthStencil = true;

			if (bSetExtent)
			{
				// Depth can be greater or equal to color. Clamp to the smaller size.
				Extent.Extent3D.width = FMath::Min<uint32>(Extent.Extent3D.width, TextureDesc.Extent.X);
				Extent.Extent3D.height = FMath::Min<uint32>(Extent.Extent3D.height, TextureDesc.Extent.Y);
			}
			else
			{
				bSetExtent = true;
				Extent.Extent3D.width = TextureDesc.Extent.X;
				Extent.Extent3D.height = TextureDesc.Extent.Y;
				Extent.Extent3D.depth = TextureDesc.Depth;
			}
		}
		else if (NumColorRenderTargets == 0)
		{
			// No Depth and no color, it's a raster-only pass so make sure the renderArea will be set up properly
			//checkf(RPInfo.ResolveRect.IsValid(), TEXT("For raster-only passes without render targets, ResolveRect has to contain the render area"));
			bSetExtent = true;
			Offset.Offset3D.x = RPInfo.ResolveRect.X1;
			Offset.Offset3D.y = RPInfo.ResolveRect.Y1;
			Offset.Offset3D.z = 0;
			Extent.Extent3D.width = RPInfo.ResolveRect.X2 - RPInfo.ResolveRect.X1;
			Extent.Extent3D.height = RPInfo.ResolveRect.Y2 - RPInfo.ResolveRect.Y1;
			Extent.Extent3D.depth = 1;
		}
		
		SubpassHint = RPInfo.SubpassHint;
		CompatibleHashInfo.SubpassHint = (uint8)RPInfo.SubpassHint;

		CompatibleHashInfo.NumSamples = NumSamples;
		CompatibleHashInfo.MultiViewCount = MultiViewCount;
		
		RenderPassCompatibleHash = FCrc::MemCrc32(&CompatibleHashInfo, sizeof(CompatibleHashInfo));
		RenderPassFullHash = FCrc::MemCrc32(&FullHashInfo, sizeof(FullHashInfo), RenderPassCompatibleHash);
		NumUsedClearValues = bFoundClearOp ? NumAttachmentDescriptions : 0;
		bCalculatedHash = true;*/
	}
	bool VulkanRenderTargetLayout::operator==(const VulkanRenderTargetLayout&inRenderTargetLayout)const
	{
		bool result=false;
		if(ColorReferences[0].attachment==inRenderTargetLayout.ColorReferences[0].attachment)
		{
			result=true;
		}
		return result;
	}
	VulkanResourceMultiBuffer::VulkanResourceMultiBuffer(VulkanDevice* InDevice, CGIBufferDesc const& InBufferDesc, CGIResourceCreateInfo& CreateInfo, CGICommandListBase* InRHICmdList)
		:CGIBuffer(InBufferDesc)
		,mDevice(InDevice)
	{
		const bool bZeroSize = (InBufferDesc.Size == 0);
		BufferUsageFlags = UEToVKBufferUsageFlags(InDevice, InBufferDesc.Usage, bZeroSize);
		AllocateMemory(mAlloc);
		if (CreateInfo.ResourceArray)
		{
			const uint32 CopyDataSize = 36;//MathUtils::Min(InBufferDesc.Size, CreateInfo.ResourceArray->GetResourceDataSize());
			Vector3f*data=(Vector3f*)CreateInfo.ResourceArray;
			void* Data = Lock(*InRHICmdList, RLM_WriteOnly, CopyDataSize, 0);
			memcpy(Data, data, CopyDataSize);
			Unlock(*InRHICmdList);
		}
	}
	void* VulkanResourceMultiBuffer::Lock(CGICommandListBase& RHICmdList, EResourceLockMode LockMode, uint32 LockSize, uint32 Offset)
	{
		void* Data = nullptr;
		uint32 DataOffset = 0;

		check(LockStatus == ELockStatus::Unlocked);

		//LockStatus = ELockStatus::Locked;
		const bool bIsFirstLock = true;//(0 == LockCounter++);

		// Dynamic:    Allocate a new Host_Visible buffer, swap this new buffer in on RHI thread and update views.  
		//             GPU reads directly from host memory, but no copy is required so it can be used in render passes.
		// Static:     A single Device_Local buffer is allocated at creation.  For Lock/Unlock, use a staging buffer for the upload:
		//             host writes to staging buffer on lock, a copy on GPU is issued on unlock to update the device_local memory.

		const bool bUnifiedMem = false;//mDevice->HasUnifiedMemory();
		const bool bDynamic = EnumHasAnyFlags(GetUsage(),  EBufferUsageFlags::Dynamic) || EnumHasAnyFlags(GetUsage(), EBufferUsageFlags::Volatile);
		const bool bStatic = EnumHasAnyFlags(GetUsage(), EBufferUsageFlags::Static) || !bDynamic;
		const bool bUAV = EnumHasAnyFlags(GetUsage(), EBufferUsageFlags::UnorderedAccess);
		const bool bSR = EnumHasAnyFlags(GetUsage(), EBufferUsageFlags::ShaderResource);

		check(bStatic || bDynamic || bUAV || bSR);

		if (LockMode == RLM_ReadOnly)
		{
			/*check(IsInRenderingThread());

			if (bUnifiedMem)
			{
				Data = CurrentBufferAlloc.HostPtr;
				DataOffset = Offset;
				LockStatus = ELockStatus::PersistentMapping;
			}
			else 
			{
				Device->PrepareForCPURead();
			
				FVulkanCommandListContextImmediate& Context = Device->GetImmediateContext();
				FVulkanCommandBufferManager* CommandBufferManager = Context.GetCommandBufferManager();
				FVulkanCmdBuffer* CmdBuffer = CommandBufferManager->GetUploadCmdBuffer();
					
				// Make sure any previous tasks have finished on the source buffer.
				VkMemoryBarrier BarrierBefore = { VK_STRUCTURE_TYPE_MEMORY_BARRIER, nullptr, VK_ACCESS_MEMORY_WRITE_BIT, VK_ACCESS_MEMORY_READ_BIT };
				VulkanRHI::vkCmdPipelineBarrier(CmdBuffer->GetHandle(), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 1, &BarrierBefore, 0, nullptr, 0, nullptr);

				// Create a staging buffer we can use to copy data from device to cpu.
				VulkanRHI::FStagingBuffer* StagingBuffer = Device->GetStagingManager().AcquireBuffer(LockSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT, VK_MEMORY_PROPERTY_HOST_CACHED_BIT);

				// Fill the staging buffer with the data on the device.
				VkBufferCopy Regions;
				Regions.size = LockSize;
				Regions.srcOffset = Offset + CurrentBufferAlloc.Alloc.Offset;
				Regions.dstOffset = 0;
					
				VulkanRHI::vkCmdCopyBuffer(CmdBuffer->GetHandle(), CurrentBufferAlloc.Alloc.GetBufferHandle(), StagingBuffer->GetHandle(), 1, &Regions);

				// Setup barrier.
				VkMemoryBarrier BarrierAfter = { VK_STRUCTURE_TYPE_MEMORY_BARRIER, nullptr, VK_ACCESS_MEMORY_WRITE_BIT, VK_ACCESS_HOST_READ_BIT };
				VulkanRHI::vkCmdPipelineBarrier(CmdBuffer->GetHandle(), VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 1, &BarrierAfter, 0, nullptr, 0, nullptr);
					
				// Force upload.
				CommandBufferManager->SubmitUploadCmdBuffer();
				Device->WaitUntilIdle();

				// Flush.
				StagingBuffer->FlushMappedMemory();

				// Get mapped pointer. 
				Data = StagingBuffer->GetMappedPointer();

				// Release temp staging buffer during unlock.
				FVulkanPendingBufferLock PendingLock;
				PendingLock.Offset = 0;
				PendingLock.Size = LockSize;
				PendingLock.LockMode = LockMode;
				PendingLock.StagingBuffer = StagingBuffer;
				AddPendingBufferLock(this, PendingLock);

				CommandBufferManager->PrepareForNewActiveCommandBuffer();
			}*/
		}
		else
		{
			check(LockMode == RLM_WriteOnly);

			// If this is the first lock on host visible memory, then the memory is still untouched so use it directly
			if ((bUnifiedMem || bDynamic) && bIsFirstLock)
			{
				/*check(CurrentBufferAlloc.HostPtr);
				Data = CurrentBufferAlloc.HostPtr;
				DataOffset = Offset;
				LockStatus = ELockStatus::PersistentMapping;*/
			}
			else if (bStatic)// || GVulkanForceStagingBufferOnLock)
			{
				FVulkanPendingBufferLock PendingLock;
				PendingLock.Offset = Offset;
				PendingLock.Size = LockSize;
				PendingLock.LockMode = LockMode;

				FStagingBuffer* StagingBuffer = mDevice->GetStagingManager().AcquireBuffer(LockSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
				PendingLock.StagingBuffer = StagingBuffer;
				Data = StagingBuffer->GetMappedPointer();

				AddPendingBufferLock(this, PendingLock);
			}
			else
			{
				/*FBufferAlloc NewAlloc;
				AllocateMemory(NewAlloc);
				NewAlloc.Alloc.Disown();

				RHICmdList.EnqueueLambda(TEXT("FVulkanBuffer::Lock"), [Buffer = this, NewAlloc](FRHICommandListBase& CmdList)
				{
					Buffer->CurrentBufferAlloc.Alloc.Free(*Buffer->GetParent());
					Buffer->CurrentBufferAlloc = NewAlloc;
					Buffer->CurrentBufferAlloc.Alloc.Own();
					Buffer->UpdateLinkedViews();
				});

				Data = NewAlloc.HostPtr;
				DataOffset = Offset;
				LockStatus = ELockStatus::PersistentMapping;*/
			}
		}

		check(Data);
		return (uint8*)Data + DataOffset;
	}
	void VulkanResourceMultiBuffer::Unlock(CGICommandListBase& RHICmdList)
	{
		const bool bUnifiedMem = false;//mDevice->HasUnifiedMemory();
		const bool bDynamic = EnumHasAnyFlags(GetUsage(), EBufferUsageFlags::Dynamic) || EnumHasAnyFlags(GetUsage(), EBufferUsageFlags::Volatile);
		const bool bStatic = EnumHasAnyFlags(GetUsage(), EBufferUsageFlags::Static) || !bDynamic;
		const bool bSR = EnumHasAnyFlags(GetUsage(), EBufferUsageFlags::ShaderResource);

		check(LockStatus != ELockStatus::Unlocked);

		//if (LockStatus == ELockStatus::PersistentMapping)
		{
			// Do nothing
		}
		//else
		{
			check(bStatic || bDynamic || bSR);

			FVulkanPendingBufferLock PendingLock = GetPendingBufferLock(this);
			VulkanResourceMultiBuffer*Buffer=this;
			//RHICmdList.EnqueueLambda(TEXT("FVulkanBuffer::Unlock"), [Buffer=this, PendingLock](FRHICommandListBase& CmdList)
			{
				FStagingBuffer* StagingBuffer = PendingLock.StagingBuffer;
				check(StagingBuffer);
				StagingBuffer->FlushMappedMemory();

				if (PendingLock.LockMode == RLM_ReadOnly)
				{
					// Just remove the staging buffer here.
					Buffer->mDevice->GetStagingManager().ReleaseBuffer(nullptr, StagingBuffer);
				}
				else if (PendingLock.LockMode == RLM_WriteOnly)
				{
					VulkanCommandListContext& Context = VulkanCommandListContext::GetVulkanContext(RHICmdList.GetContext());

					// We need to do this on the active command buffer instead of using an upload command buffer. The high level code sometimes reuses the same
					// buffer in sequences of upload / dispatch, upload / dispatch, so we need to order the copy commands correctly with respect to the dispatches.
					VulkanCommandBuffer* Cmd = Context.GetCommandBufferManager()->GetUploadCmdBuffer();//->mActiveCmdBuffer;//GetActiveCmdBuffer();
					check(Cmd && Cmd->IsOutsideRenderPass());
					VkCommandBuffer CmdBuffer = Cmd->GetHandle();

					//VulkanRHI::DebugHeavyWeightBarrier(CmdBuffer, 16);

					VkBufferCopy Region;
					memset(&Region,0,sizeof(Region));
					Region.size = PendingLock.Size;
					//Region.srcOffset = 0;
					Region.dstOffset = PendingLock.Offset + Buffer->mAlloc.Alloc.Offset;
					vkCmdCopyBuffer(CmdBuffer, StagingBuffer->GetHandle(), Buffer->mAlloc.Alloc.GetBufferHandle(), 1, &Region);

					// High level code expects the data in Buffer to be ready to read
					VkMemoryBarrier BarrierAfter = { VK_STRUCTURE_TYPE_MEMORY_BARRIER, nullptr, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT };
					vkCmdPipelineBarrier(CmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 1, &BarrierAfter, 0, nullptr, 0, nullptr);

					Buffer->mDevice->GetStagingManager().ReleaseBuffer(Cmd, StagingBuffer);
					Context.GetCommandBufferManager()->SubmitUploadCmdBuffer();
					//Buffer->UpdateLinkedViews(); // :todo-jn:  not needed?  Same buffer when we use staging?
				}
			}
			//);
		}

		//LockStatus = ELockStatus::Unlocked;
	}
	VkBufferUsageFlags VulkanResourceMultiBuffer::UEToVKBufferUsageFlags(VulkanDevice* InDevice, EBufferUsageFlags InUEUsage, bool bZeroSize)
	{
		// Always include TRANSFER_SRC since hardware vendors confirmed it wouldn't have any performance cost and we need it for some debug functionalities.
		VkBufferUsageFlags OutVkUsage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;

		auto TranslateFlag = [&OutVkUsage, &InUEUsage](EBufferUsageFlags SearchUEFlag, VkBufferUsageFlags AddedIfFound, VkBufferUsageFlags AddedIfNotFound = 0)
		{
			const bool HasFlag = EnumHasAnyFlags(InUEUsage, SearchUEFlag);
			OutVkUsage |= HasFlag ? AddedIfFound : AddedIfNotFound;
		};

		TranslateFlag( EBufferUsageFlags::VertexBuffer, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
		TranslateFlag( EBufferUsageFlags::IndexBuffer, VK_BUFFER_USAGE_INDEX_BUFFER_BIT);
		TranslateFlag( EBufferUsageFlags::StructuredBuffer, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
		TranslateFlag( EBufferUsageFlags::UniformBuffer, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
		TranslateFlag( EBufferUsageFlags::AccelerationStructure, VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR);

		if (!bZeroSize)
		{
			TranslateFlag(EBufferUsageFlags::UnorderedAccess, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT);
			TranslateFlag(EBufferUsageFlags::DrawIndirect, VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT);
			TranslateFlag(EBufferUsageFlags::KeepCPUAccessible, (VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT));
			TranslateFlag(EBufferUsageFlags::ShaderResource, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT);

			TranslateFlag(EBufferUsageFlags::Volatile, 0, VK_BUFFER_USAGE_TRANSFER_DST_BIT);

			//if (InDevice->GetOptionalExtensions().HasRaytracingExtensions())
			//{
			//	OutVkUsage |= VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT;
//
			//	TranslateFlag(BUF_AccelerationStructure, 0, VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR);
			//}

			// For descriptors buffers
			//if (InDevice->GetOptionalExtensions().HasBufferDeviceAddress)
			//{
			//	OutVkUsage |= VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT;
			//}
		}

		return OutVkUsage;
	}
	void VulkanResourceMultiBuffer::AllocateMemory(FBufferAlloc&inOutVulkanAllocation)
	{
		VkMemoryPropertyFlags BufferMemFlags = 0;
		const bool bUnifiedMem = false;// Device->HasUnifiedMemory();
		const bool bDynamic = false;//EnumHasAnyFlags(GetUsage(), BUF_Dynamic) || EnumHasAnyFlags(GetUsage(), BUF_Volatile);
		if (bUnifiedMem)
		{
			BufferMemFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
		}
		else if (bDynamic)
		{
			BufferMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
		}
		else
		{
			BufferMemFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
		}
		const uint32 BufferSize = GetSize();
		const uint32 BufferAlignment = FMemoryManager::CalculateBufferAlignment(*mDevice, GetUsage(), (BufferSize == 0));

		FBufferAlloc& NewBufferAlloc = inOutVulkanAllocation;
		if (!mDevice->GetMemoryManager().AllocateBufferPooled(NewBufferAlloc.Alloc, nullptr, BufferSize, BufferAlignment, BufferUsageFlags, BufferMemFlags, EVulkanAllocationMetaMultiBuffer, __FILE__, __LINE__))
		{
			mDevice->GetMemoryManager().HandleOOM();
		}
		//NewBufferAlloc.HostPtr = (bUnifiedMem || bDynamic) ? NewBufferAlloc.Alloc.GetMappedPointer(mDevice) : nullptr;
		//NewBufferAlloc.DeviceAddress = GetBufferDeviceAddress(mDevice, NewBufferAlloc.Alloc.GetBufferHandle()) + NewBufferAlloc.Alloc.Offset;

		//UpdateVulkanBufferStats(GetDesc(), BufferSize, true);
	}
    CGIVulkanTexture::CGIVulkanTexture()
        :CGITexture(ECGIResourceType::RRT_Texture)
    {

    }
    CGIVulkanTexture::CGIVulkanTexture(VulkanDevice& InDevice, const CGITextureCreateDesc& InCreateDesc, VkImage InImage, const CGIVulkanRHIExternalImageDeleteCallbackInfo& InExternalImageDeleteCallbackInfo)
	:CGITexture(InCreateDesc)
	, mImage(InImage)
	, mStorageFormat(VK_FORMAT_UNDEFINED)
	, mViewFormat(VK_FORMAT_UNDEFINED)
	, mTiling(VK_IMAGE_TILING_MAX_ENUM)
    {
		mStorageFormat = UEToVkTextureFormat(InCreateDesc.Format, false);
		mViewFormat = UEToVkTextureFormat(InCreateDesc.Format, EnumHasAllFlags(InCreateDesc.Flags,  ETextureCreateFlags::SRGB));
		mFullAspectMask = GetAspectMaskFromUEFormat(InCreateDesc.Format, true, true);
		mPartialAspectMask = GetAspectMaskFromUEFormat(InCreateDesc.Format, false, true);
		if (EnumHasAllFlags(InCreateDesc.Flags, ETextureCreateFlags::Presentable) && mTiling == VK_IMAGE_TILING_MAX_ENUM)
		{
			mTiling = VK_IMAGE_TILING_OPTIMAL;
		}
		if (mImage != VK_NULL_HANDLE)
		{
			mImageUsageFlags = GetUsageFlagsFromCreateFlags(InDevice, InCreateDesc.Flags);

			const bool bRenderTarget = EnumHasAnyFlags(InCreateDesc.Flags, ETextureCreateFlags::RenderTargetable | ETextureCreateFlags::DepthStencilTargetable);
			const VkImageLayout InitialLayout = GetInitialLayoutFromRHIAccess(InCreateDesc.InitialState, bRenderTarget && IsDepthOrStencilAspect(), SupportsSampling());
			const bool bDoInitialClear = bRenderTarget;
			const bool bOnlyAddToLayoutManager = !bRenderTarget;

			mDefaultLayout = InitialLayout;

			if (true)//!IsInRenderingThread() || (RHICmdList.Bypass() || !IsRunningRHIInSeparateThread()))
			{
				VulkanCommandListContext& Context = InDevice.GetImmediateContext();
				if (false)//bOnlyAddToLayoutManager)
				{
					//CGIVulkanCmdBuffer* CmdBuffer = Context.GetCommandBufferManager()->GetActiveCmdBuffer();
					//CmdBuffer->GetLayoutManager().SetFullLayout(*this, InitialLayout, true);
				}
				else if (InitialLayout != VK_IMAGE_LAYOUT_UNDEFINED || bDoInitialClear)
				{
					SetInitialImageState(Context, InitialLayout, bDoInitialClear, InCreateDesc.ClearValue, false);
				}
			}
			else
			{
				//ALLOC_COMMAND_CL(RHICmdList, FRHICommandSetInitialImageState)(this, InitialLayout, bOnlyAddToLayoutManager, bDoInitialClear, InCreateDesc.ClearValue, false);
			}
		}
    }
	void CGIVulkanTexture::SetInitialImageState(VulkanCommandListContext& Context, VkImageLayout InitialLayout, bool bClear, const FClearValueBinding& ClearValueBinding, bool bIsTransientResource)
	{
		// Can't use TransferQueue as Vulkan requires that queue to also have Gfx or Compute capabilities...
		//#todo-rco: This function is only used during loading currently, if used for regular RHIClear then use the ActiveCmdBuffer
		// NOTE: Transient resources' memory might have belonged to another resource earlier in the ActiveCmdBuffer, so we can't use UploadCmdBuffer
		VulkanCommandBuffer* CmdBuffer = Context.GetCommandBufferManager()->GetUploadCmdBuffer();//bIsTransientResource ? Context.GetCommandBufferManager()->GetActiveCmdBuffer() : Context.GetCommandBufferManager()->GetUploadCmdBuffer();

		VkImageSubresourceRange SubresourceRange = CGIVulkanPipelineBarrier::MakeSubresourceRange(mFullAspectMask);

		VkImageLayout CurrentLayout = VK_IMAGE_LAYOUT_UNDEFINED;
		if (bClear && !bIsTransientResource)
		{
			{
				CGIVulkanPipelineBarrier Barrier;
				Barrier.AddImageLayoutTransition(mImage, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, SubresourceRange);
				Barrier.Execute(CmdBuffer);
			}

			if (mFullAspectMask == VK_IMAGE_ASPECT_COLOR_BIT)
			{
				VkClearColorValue Color={};
				Color.float32[0] = ClearValueBinding.Value.Color[0];
				Color.float32[1] = ClearValueBinding.Value.Color[1];
				Color.float32[2] = ClearValueBinding.Value.Color[2];
				Color.float32[3] = ClearValueBinding.Value.Color[3];

				vkCmdClearColorImage(CmdBuffer->GetHandle(), mImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &Color, 1, &SubresourceRange);
			}
			else
			{
				VkClearDepthStencilValue Value={};
				Value.depth = ClearValueBinding.Value.DSValue.Depth;
				Value.stencil = ClearValueBinding.Value.DSValue.Stencil;

				vkCmdClearDepthStencilImage(CmdBuffer->GetHandle(), mImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &Value, 1, &SubresourceRange);
			}

			CurrentLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
		}

		if ((InitialLayout != CurrentLayout) && (InitialLayout != VK_IMAGE_LAYOUT_UNDEFINED))
		{
			//CGIVulkanPipelineBarrier Barrier;
			//Barrier.AddFullImageLayoutTransition(*this, CurrentLayout, InitialLayout);
			//Barrier.Execute(CmdBuffer);
		}

		CmdBuffer->GetLayoutManager().SetFullLayout(*this, InitialLayout);
	}
    CGIVulkanView::CGIVulkanView(VulkanDevice& InDevice, VkDescriptorType InDescriptorType)
    : mDevice(InDevice)
    {
        //BindlessHandle = Device.GetBindlessDescriptorManager()->ReserveDescriptor(InDescriptorType);
    }
    CGIVulkanView* CGIVulkanView::InitAsTextureView(
		  VkImage InImage
		, VkImageViewType ViewType
		, VkImageAspectFlags AspectFlags
		, EPixelFormat UEFormat
		, VkFormat Format
		, uint32 FirstMip
		, uint32 NumMips
		, uint32 ArraySliceIndex
		, uint32 NumArraySlices
		, bool bUseIdentitySwizzle
		, VkImageUsageFlags ImageUsageFlags
		, VkSamplerYcbcrConversion SamplerYcbcrConversion)
	{
		// We will need a deferred update if the descriptor was already in use
		//const bool bImmediateUpdate = !IsInitialized();

		//check(GetViewType() == EType::Null);
		////Storage.Emplace<FTextureView>();
		//FTextureView& TV = Storage.Get<FTextureView>();

		//LLM_SCOPE_VULKAN(ELLMTagVulkan::VulkanTextures);

		VkImageViewCreateInfo ViewInfo={};
		ViewInfo.sType=VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
		ViewInfo.image = InImage;
		ViewInfo.viewType = ViewType;
		ViewInfo.format = Format;

		if (bUseIdentitySwizzle)
		{
			ViewInfo.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
			ViewInfo.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
			ViewInfo.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
			ViewInfo.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
		}
		else
		{
			//ViewInfo.components = mDevice.GetFormatComponentMapping(UEFormat);
		}

		ViewInfo.subresourceRange.aspectMask = AspectFlags;
		ViewInfo.subresourceRange.baseMipLevel = FirstMip;
		ViewInfo.subresourceRange.levelCount = NumMips;
		ViewInfo.subresourceRange.baseArrayLayer = ArraySliceIndex;
		ViewInfo.subresourceRange.layerCount = NumArraySlices;

		//HACK.  DX11 on PC currently uses a D24S8 depthbuffer and so needs an X24_G8 SRV to visualize stencil.
		//So take that as our cue to visualize stencil.  In the future, the platform independent code will have a real format
		//instead of PF_DepthStencil, so the cross-platform code could figure out the proper format to pass in for this.
		if (UEFormat == EPixelFormat::PF_X24_G8)
		{
			ViewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
		}

		// Inform the driver the view will only be used with a subset of usage flags (to help performance and/or compatibility)
		VkImageViewUsageCreateInfo ImageViewUsageCreateInfo;
		if (ImageUsageFlags != 0)
		{
			ImageViewUsageCreateInfo={};
			ImageViewUsageCreateInfo.sType=VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO;
			ImageViewUsageCreateInfo.usage = ImageUsageFlags;

			ImageViewUsageCreateInfo.pNext = (void*)ViewInfo.pNext;
			ViewInfo.pNext = &ImageViewUsageCreateInfo;
		}

		//INC_DWORD_STAT(STAT_VulkanNumImageViews);
		vkCreateImageView(mDevice.GetDevice(), &ViewInfo, NULL, &mTextureView.View);

		mTextureView.Image = InImage;

		/*if (UseVulkanDescriptorCache())
		{
			TV.ViewId = ++GVulkanImageViewHandleIdCounter;
		}*/

		//const bool bDepthOrStencilAspect = (AspectFlags & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != 0;
		//mDevice.GetBindlessDescriptorManager()->UpdateImage(BindlessHandle, TV.View, bDepthOrStencilAspect, bImmediateUpdate);

		return this;
	}
	VulkanRenderPass::VulkanRenderPass(VulkanDevice& inDevice, const VulkanRenderTargetLayout& inVulkanRenderTargetLayout)
		:mDevice(inDevice)
		,mLayout(inVulkanRenderTargetLayout)
	{
		mRenderPass=CreateVulkanRenderPass(inDevice,inVulkanRenderTargetLayout);
	}
	VulkanFramebuffer::VulkanFramebuffer(VulkanDevice& Device, const CGISetRenderTargetsInfo& InRTInfo, const VulkanRenderTargetLayout& RTLayout, const VulkanRenderPass& RenderPass)
	: Framebuffer(VK_NULL_HANDLE)
	, NumColorRenderTargets(InRTInfo.NumColorRenderTargets)
	, NumColorAttachments(0)
	, DepthStencilRenderTargetImage(VK_NULL_HANDLE)
	, FragmentDensityImage(VK_NULL_HANDLE)
	{
		mRenderPass=&RenderPass;
		mColorAttachment0ImageView=NULL;
		memset(ColorRenderTargetImages,0,sizeof(ColorRenderTargetImages));
		memset(ColorResolveTargetImages,0,sizeof(ColorResolveTargetImages));
		
		AttachmentTextureViews.reserve(RTLayout.GetNumAttachmentDescriptions());

		auto CreateOwnedView = [&]()
		{
			const VkDescriptorType DescriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
			CGIVulkanView* View = new CGIVulkanView(Device, DescriptorType);
			AttachmentTextureViews.push_back(View);
			OwnedTextureViews.push_back(*View);
			return View;
		};

		auto AddExternalView = [&](CGIVulkanView const* View)
		{
			AttachmentTextureViews.push_back(View);
		};

		uint32 MipIndex = 0;

		const VkExtent3D& RTExtents = RTLayout.GetExtent3D();
		// Adreno does not like zero size RTs
		uint32 NumLayers = RTExtents.depth;

		for (int32 Index = 0; Index < InRTInfo.NumColorRenderTargets; ++Index)
		{
			CGITexture* RHITexture = InRTInfo.ColorRenderTarget[Index].Texture;
			if (!RHITexture)
			{
				continue;
			}
			CGIVulkanTexture* Texture = reinterpret_cast<CGIVulkanTexture*>(RHITexture);
			mColorAttachment0ImageView=Texture->mDefaultView->mTextureView.View;
			const CGITextureDesc& Desc = Texture->GetDesc();

			// this could fire in case one of the textures is FVulkanBackBuffer and it has not acquired an image
			// with EDelayAcquireImageType::LazyAcquire acquire happens when texture transition to Writeable state
			// make sure you call TransitionResource(Writable, Tex) before using this texture as a render-target

			ColorRenderTargetImages[Index] = Texture->mImage;
			MipIndex = InRTInfo.ColorRenderTarget[Index].MipIndex;

			if (Texture->GetViewType() == VK_IMAGE_VIEW_TYPE_2D || Texture->GetViewType() == VK_IMAGE_VIEW_TYPE_2D_ARRAY)
			{
				uint32 ArraySliceIndex = 0;
				uint32 NumArraySlices = 1;
				if (InRTInfo.ColorRenderTarget[Index].ArraySliceIndex == -1)
				{
					ArraySliceIndex = 0;
					NumArraySlices = Texture->GetNumberOfArrayLevels();
				}
				else
				{
					ArraySliceIndex = InRTInfo.ColorRenderTarget[Index].ArraySliceIndex;
					NumArraySlices = 1;
				}

				// About !RTLayout.GetIsMultiView(), from https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkFramebufferCreateInfo.html: 
				// If the render pass uses multiview, then layers must be one
				if (Texture->GetViewType() == VK_IMAGE_VIEW_TYPE_2D_ARRAY )//&& !RTLayout.GetIsMultiView())
				{
					NumLayers = NumArraySlices;
				}

				CreateOwnedView()->InitAsTextureView(
					  Texture->mImage
					, Texture->GetViewType()
					, Texture->GetFullAspectMask()
					, Desc.Format
					, Texture->mViewFormat
					, MipIndex
					, 1
					, ArraySliceIndex
					, NumArraySlices
					, true
					, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | (Texture->mImageUsageFlags & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)
				);
			}
			else if (Texture->GetViewType() == VK_IMAGE_VIEW_TYPE_CUBE || Texture->GetViewType() == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
			{
				// Cube always renders one face at a time
				CreateOwnedView()->InitAsTextureView(
					  Texture->mImage
					, VK_IMAGE_VIEW_TYPE_2D
					, Texture->GetFullAspectMask()
					, Desc.Format
					, Texture->mViewFormat
					, MipIndex
					, 1
					, InRTInfo.ColorRenderTarget[Index].ArraySliceIndex
					, 1
					, true
					, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | (Texture->mImageUsageFlags & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)
				);
			}
			else if (Texture->GetViewType() == VK_IMAGE_VIEW_TYPE_3D)
			{
				CreateOwnedView()->InitAsTextureView(
					  Texture->mImage
					, VK_IMAGE_VIEW_TYPE_2D_ARRAY
					, Texture->GetFullAspectMask()
					, Desc.Format
					, Texture->mViewFormat
					, MipIndex
					, 1
					, 0
					, Desc.Depth
					, true
					, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | (Texture->mImageUsageFlags & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)
				);
			}
			++NumColorAttachments;

			// Check the RTLayout as well to make sure the resolve attachment is needed (Vulkan and Feature level specific)
			// See: FVulkanRenderTargetLayout constructor with FRHIRenderPassInfo
			/*if (InRTInfo.bHasResolveAttachments && RTLayout.GetHasResolveAttachments() && RTLayout.GetResolveAttachmentReferences()[Index].layout != VK_IMAGE_LAYOUT_UNDEFINED)
			{
				FRHITexture* ResolveRHITexture = InRTInfo.ColorResolveRenderTarget[Index].Texture;
				FVulkanTexture* ResolveTexture = ResourceCast(ResolveRHITexture);
				ColorResolveTargetImages[Index] = ResolveTexture->Image;

				//resolve attachments only supported for 2d/2d array textures
				if (ResolveTexture->GetViewType() == VK_IMAGE_VIEW_TYPE_2D || ResolveTexture->GetViewType() == VK_IMAGE_VIEW_TYPE_2D_ARRAY)
				{
					CreateOwnedView()->InitAsTextureView(
						  ResolveTexture->Image
						, ResolveTexture->GetViewType()
						, ResolveTexture->GetFullAspectMask()
						, ResolveTexture->GetDesc().Format
						, ResolveTexture->ViewFormat
						, MipIndex
						, 1
						, MathUtils::Max(0, (int32)InRTInfo.ColorRenderTarget[Index].ArraySliceIndex)
						, ResolveTexture->GetNumberOfArrayLevels()
						, true
					);
				}
			}*/
		}

		/*if (RTLayout.GetHasDepthStencil())
		{
			FVulkanTexture* Texture = ResourceCast(InRTInfo.DepthStencilRenderTarget.Texture);
			const FRHITextureDesc& Desc = Texture->GetDesc();
			DepthStencilRenderTargetImage = Texture->Image;
			bool bHasStencil = (Texture->GetDesc().Format == PF_DepthStencil || Texture->GetDesc().Format == PF_X24_G8);

			check(Texture->PartialView);
			PartialDepthTextureView = Texture->PartialView;

			ensure(Texture->GetViewType() == VK_IMAGE_VIEW_TYPE_2D || Texture->GetViewType() == VK_IMAGE_VIEW_TYPE_2D_ARRAY || Texture->GetViewType() == VK_IMAGE_VIEW_TYPE_CUBE);
			if (NumColorAttachments == 0 && Texture->GetViewType() == VK_IMAGE_VIEW_TYPE_CUBE)
			{
				CreateOwnedView()->InitAsTextureView(
					  Texture->Image
					, VK_IMAGE_VIEW_TYPE_2D_ARRAY
					, Texture->GetFullAspectMask()
					, Texture->GetDesc().Format
					, Texture->ViewFormat
					, MipIndex
					, 1
					, 0
					, 6
					, true
				);

				NumLayers = 6;
			}
			else if (Texture->GetViewType() == VK_IMAGE_VIEW_TYPE_2D  || Texture->GetViewType() == VK_IMAGE_VIEW_TYPE_2D_ARRAY)
			{
				// depth attachments need a separate view to have no swizzle components, for validation correctness
				CreateOwnedView()->InitAsTextureView(
					  Texture->Image
					, Texture->GetViewType()
					, Texture->GetFullAspectMask()
					, Texture->GetDesc().Format
					, Texture->ViewFormat
					, MipIndex
					, 1
					, 0
					, Texture->GetNumberOfArrayLevels()
					, true
				);
			}
			else
			{
				AddExternalView(Texture->DefaultView);
			}

			if (RTLayout.GetHasDepthStencilResolve() && RTLayout.GetDepthStencilResolveAttachmentReference()->layout != VK_IMAGE_LAYOUT_UNDEFINED)
			{
				FRHITexture* ResolveRHITexture = InRTInfo.DepthStencilResolveRenderTarget.Texture;
				FVulkanTexture* ResolveTexture = ResourceCast(ResolveRHITexture);
				DepthStencilResolveRenderTargetImage = ResolveTexture->Image;

				// Resolve attachments only supported for 2d/2d array textures
				if (ResolveTexture->GetViewType() == VK_IMAGE_VIEW_TYPE_2D || ResolveTexture->GetViewType() == VK_IMAGE_VIEW_TYPE_2D_ARRAY)
				{
					CreateOwnedView()->InitAsTextureView(
						ResolveTexture->Image
						, ResolveTexture->GetViewType()
						, ResolveTexture->GetFullAspectMask()
						, ResolveTexture->GetDesc().Format
						, ResolveTexture->ViewFormat
						, MipIndex
						, 1
						, 0
						, ResolveTexture->GetNumberOfArrayLevels()
						, true
					);
				}
			}
		}*/

		std::vector<VkImageView> AttachmentViews;
		AttachmentViews.reserve(AttachmentTextureViews.size());
		for (CGIVulkanView const* View : AttachmentTextureViews)
		{
			AttachmentViews.push_back(View->GetTextureView().View);
		}

		VkFramebufferCreateInfo CreateInfo={};
		CreateInfo.sType=VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
		CreateInfo.renderPass = RenderPass.GetHandle();
		CreateInfo.attachmentCount = AttachmentViews.size();
		CreateInfo.pAttachments = AttachmentViews.data();
		CreateInfo.width  = RTExtents.width;
		CreateInfo.height = RTExtents.height;
		CreateInfo.layers = NumLayers;

		vkCreateFramebuffer(Device.GetDevice(), &CreateInfo, NULL, &Framebuffer);

		RenderArea.offset.x = 0;
		RenderArea.offset.y = 0;
		RenderArea.extent.width = RTExtents.width;
		RenderArea.extent.height = RTExtents.height;
	}
}
