For now, handle persistent maps during capture by forcing flush

* This is ugly and will change once maps are properly intercepted, but
  works for now.
This commit is contained in:
baldurk
2015-10-01 15:38:26 +02:00
parent 5911749596
commit e3ca8d0773
4 changed files with 43 additions and 2 deletions
+1 -1
View File
@@ -164,7 +164,7 @@ bool FindDiffRange(void *a, void *b, size_t bufSize, size_t &diffStart, size_t &
// do we have some unaligned bytes at the end of the buffer?
if(bufSize > alignedSize)
{
size_t numBytes = alignedSize-bufSize;
size_t numBytes = bufSize-alignedSize;
// if we haven't even found a start, check in these bytes
if(diffStart > bufSize)
+3 -1
View File
@@ -703,13 +703,15 @@ struct VkResourceRecord : public ResourceRecord
struct MemState
{
MemState()
: mapOffset(0), mapSize(0), size(0), mapFlags(0), mappedPtr(0), mapFlushed(false)
: device(VK_NULL_HANDLE), mapOffset(0), mapSize(0), size(0), mapFlags(0), mappedPtr(NULL), mapFlushed(false), refData(NULL)
{ }
VkDevice device;
VkDeviceSize mapOffset, mapSize;
VkDeviceSize size;
VkMemoryMapFlags mapFlags;
bool mapFlushed;
void *mappedPtr;
byte *refData;
};
struct ImgState
{
@@ -339,6 +339,42 @@ VkResult WrappedVulkan::vkQueueSubmit(
m_FrameCaptureRecord->AddChunk(scope.Get());
}
// VKTODOHIGH when maps are intercepted with local buffers, this will have to be
// done when not in capframe :(.
if(m_State == WRITING_CAPFRAME)
{
for(auto it = m_MemoryInfo.begin(); it != m_MemoryInfo.end(); ++it)
{
// potential persistent map, force a full flush
if(it->second.mappedPtr)
{
size_t diffStart = 0, diffEnd = 0;
bool found = true;
// if we have a previous set of data, compare.
// otherwise just serialise it all
if(it->second.refData)
found = FindDiffRange((byte *)it->second.mappedPtr, it->second.refData, (size_t)it->second.mapSize, diffStart, diffEnd);
else
diffEnd = (size_t)it->second.mapSize;
if(found)
{
{
RDCLOG("Persistent map flush forced for %llu (%llu -> %llu)", it->first, (uint64_t)diffStart, (uint64_t)diffEnd);
VkMappedMemoryRange range = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, NULL, GetResourceManager()->GetCurrentHandle<VkDeviceMemory>(it->first), it->second.mapOffset+diffStart, diffEnd-diffStart };
vkFlushMappedMemoryRanges(it->second.device, 1, &range);
}
// allocate ref data so we can compare next time to minimise serialised data
if(it->second.refData == NULL)
it->second.refData = new byte[it->second.mapSize];
memcpy(it->second.refData, it->second.mappedPtr, (size_t)it->second.mapSize);
}
}
}
}
for(uint32_t i=0; i < cmdBufferCount; i++)
{
ResourceId cmd = GetResID(pCmdBuffers[i]);
@@ -98,6 +98,7 @@ VkResult WrappedVulkan::vkAllocMemory(
GetResourceManager()->AddLiveResource(id, *pMem);
}
m_MemoryInfo[id].device = device;
m_MemoryInfo[id].size = pAllocInfo->allocationSize;
}
@@ -150,6 +151,7 @@ VkResult WrappedVulkan::vkMapMemory(
it->second.mapSize = size == 0 ? it->second.size : size;
it->second.mapFlags = flags;
it->second.mapFlushed = false;
it->second.refData = NULL;
}
}
else if(m_State >= WRITING)
@@ -263,6 +265,7 @@ VkResult WrappedVulkan::vkUnmapMemory(
}
it->second.mappedPtr = NULL;
SAFE_DELETE_ARRAY(it->second.refData);
}
}
}