diff --git a/renderdoc/driver/vulkan/vk_initstate.cpp b/renderdoc/driver/vulkan/vk_initstate.cpp index 8475be1ea..2aa2de2c6 100644 --- a/renderdoc/driver/vulkan/vk_initstate.cpp +++ b/renderdoc/driver/vulkan/vk_initstate.cpp @@ -900,6 +900,18 @@ bool WrappedVulkan::Serialise_InitialState(SerialiserType &ser, ResourceId id, W vkr = ObjDisp(d)->MapMemory(Unwrap(d), Unwrap(mappedMem.mem), initContents.mem.offs, initContents.mem.size, 0, (void **)&Contents); RDCASSERTEQUAL(vkr, VK_SUCCESS); + + // invalidate the cpu cache for this memory range to avoid reading stale data + VkMappedMemoryRange range = { + VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, + NULL, + Unwrap(mappedMem.mem), + mappedMem.offs, + mappedMem.size, + }; + + vkr = ObjDisp(d)->InvalidateMappedMemoryRanges(Unwrap(d), 1, &range); + RDCASSERTEQUAL(vkr, VK_SUCCESS); } else if(IsReplayingAndReading() && !ser.IsErrored()) { @@ -933,7 +945,24 @@ bool WrappedVulkan::Serialise_InitialState(SerialiserType &ser, ResourceId id, W // unmap the resource we mapped before - we need to do this on read and on write. if(!IsStructuredExporting(m_State) && mappedMem.mem != VK_NULL_HANDLE) + { + if(IsReplayingAndReading()) + { + // first ensure we flush the writes from the cpu to gpu memory + VkMappedMemoryRange range = { + VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, + NULL, + Unwrap(mappedMem.mem), + mappedMem.offs, + mappedMem.size, + }; + + vkr = ObjDisp(d)->FlushMappedMemoryRanges(Unwrap(d), 1, &range); + RDCASSERTEQUAL(vkr, VK_SUCCESS); + } + ObjDisp(d)->UnmapMemory(Unwrap(d), Unwrap(mappedMem.mem)); + } SERIALISE_CHECK_READ_ERRORS(); diff --git a/renderdoc/driver/vulkan/vk_sparse_initstate.cpp b/renderdoc/driver/vulkan/vk_sparse_initstate.cpp index 49e1c8229..a5f4bbea3 100644 --- a/renderdoc/driver/vulkan/vk_sparse_initstate.cpp +++ b/renderdoc/driver/vulkan/vk_sparse_initstate.cpp @@ -441,6 +441,18 @@ bool WrappedVulkan::Serialise_SparseBufferInitialState(SerialiserType &ser, Reso vkr = ObjDisp(d)->MapMemory(Unwrap(d), Unwrap(mappedMem.mem), mappedMem.offs, mappedMem.size, 0, (void **)&Contents); RDCASSERTEQUAL(vkr, VK_SUCCESS); + + // invalidate the cpu cache for this memory range to avoid reading stale data + VkMappedMemoryRange range = { + VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, + NULL, + Unwrap(mappedMem.mem), + mappedMem.offs, + mappedMem.size, + }; + + vkr = ObjDisp(d)->InvalidateMappedMemoryRanges(Unwrap(d), 1, &range); + RDCASSERTEQUAL(vkr, VK_SUCCESS); } else if(IsReplayingAndReading() && !ser.IsErrored()) { @@ -477,7 +489,24 @@ bool WrappedVulkan::Serialise_SparseBufferInitialState(SerialiserType &ser, Reso // unmap the resource we mapped before - we need to do this on read and on write. if(!IsStructuredExporting(m_State) && mappedMem.mem != VK_NULL_HANDLE) + { + if(IsReplayingAndReading()) + { + // first ensure we flush the writes from the cpu to gpu memory + VkMappedMemoryRange range = { + VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, + NULL, + Unwrap(mappedMem.mem), + mappedMem.offs, + mappedMem.size, + }; + + vkr = ObjDisp(d)->FlushMappedMemoryRanges(Unwrap(d), 1, &range); + RDCASSERTEQUAL(vkr, VK_SUCCESS); + } + ObjDisp(d)->UnmapMemory(Unwrap(d), Unwrap(mappedMem.mem)); + } SERIALISE_CHECK_READ_ERRORS();