Move m_MemoryInfo to m_CurrentMaps and ensure correct locking

This commit is contained in:
baldurk
2015-10-21 15:15:31 +02:00
parent 98808226bb
commit 9fb9b59c32
8 changed files with 159 additions and 131 deletions
+25 -7
View File
@@ -348,17 +348,35 @@ private:
vector<VkDescriptorInfo *> currentBindings;
};
// VKTODO all these m_*Info things need to be locked and ensure we only access
// them in slow path functions like creation, or just moved elsewhere like inside
// the wrapped objects
// VKTODOHIGH all of these need to be locked
map<ResourceId, MemState> m_MemoryInfo;
struct MapState
{
MapState()
: device(VK_NULL_HANDLE), mapOffset(0), mapSize(0), mapFlags(0)
, mapFrame(0), mapFlushed(false), mappedPtr(NULL), refData(NULL)
{ }
VkDevice device;
VkDeviceSize mapOffset, mapSize;
VkMemoryMapFlags mapFlags;
uint32_t mapFrame;
bool mapFlushed;
void *mappedPtr;
byte *refData;
};
// capture-side data
// holds the current list of mapped memory. Locked against concurrent use
// VKTODOLOW once maps are handled properly we will know which maps must be
// treated as coherent and this will be a vector of VkResourceRecords to
// iterate over and flush changes out at any queuesubmit. Then the main
// MapState can be moved into the resource record
map<ResourceId, MapState> m_CurrentMaps;
Threading::CriticalSection m_CurrentMapsLock;
map<ResourceId, ImgState> m_ImageInfo;
// below are replay-side data only, doesn't have to be thread protected
// current memory bindings, image/buffer id -> memory id
map<ResourceId, ResourceId> m_MemBindState;
// current descriptor set contents
map<ResourceId, DescriptorSetInfo> m_DescriptorSetState;
// data for a baked command buffer - its drawcalls and events, ready to submit
+5
View File
@@ -241,6 +241,11 @@ void VulkanCreationInfo::Framebuffer::Init(const VkFramebufferCreateInfo* pCreat
attachments[i].view = VKMGR()->GetNonDispWrapper(pCreateInfo->pAttachments[i])->id;
}
void VulkanCreationInfo::Buffer::Init(const VkBufferCreateInfo* pCreateInfo)
{
size = pCreateInfo->size;
}
void VulkanCreationInfo::BufferView::Init(const VkBufferViewCreateInfo* pCreateInfo)
{
buffer = VKMGR()->GetNonDispWrapper(pCreateInfo->buffer)->id;
+8
View File
@@ -170,6 +170,14 @@ struct VulkanCreationInfo
};
map<ResourceId, Framebuffer> m_Framebuffer;
struct Buffer
{
void Init(const VkBufferCreateInfo* pCreateInfo);
uint64_t size;
};
map<ResourceId, Buffer> m_Buffer;
struct BufferView
{
void Init(const VkBufferViewCreateInfo* pCreateInfo);
+10 -22
View File
@@ -53,13 +53,8 @@ bool WrappedVulkan::Prepare_InitialState(WrappedVkRes *res)
}
else if(type == eResDeviceMemory)
{
if(m_MemoryInfo.find(id) == m_MemoryInfo.end())
{
RDCERR("Couldn't find memory info");
return false;
}
MemState &meminfo = m_MemoryInfo[id];
VkResourceRecord *record = GetResourceManager()->GetResourceRecord(id);
RDCASSERT(record->Length > 0);
VkResult vkr = VK_SUCCESS;
@@ -73,7 +68,7 @@ bool WrappedVulkan::Prepare_InitialState(WrappedVkRes *res)
VkBufferCreateInfo bufInfo = {
VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, NULL,
meminfo.size, VK_BUFFER_USAGE_TRANSFER_SOURCE_BIT|VK_BUFFER_USAGE_TRANSFER_DESTINATION_BIT, 0,
record->Length, VK_BUFFER_USAGE_TRANSFER_SOURCE_BIT|VK_BUFFER_USAGE_TRANSFER_DESTINATION_BIT, 0,
VK_SHARING_MODE_EXCLUSIVE, 0, NULL,
};
@@ -92,7 +87,7 @@ bool WrappedVulkan::Prepare_InitialState(WrappedVkRes *res)
VkMemoryAllocInfo allocInfo = {
VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO, NULL,
meminfo.size, GetReadbackMemoryIndex(mrq.memoryTypeBits),
record->Length, GetReadbackMemoryIndex(mrq.memoryTypeBits),
};
allocInfo.allocationSize = AlignUp(allocInfo.allocationSize, mrq.alignment);
@@ -114,7 +109,7 @@ bool WrappedVulkan::Prepare_InitialState(WrappedVkRes *res)
vkr = ObjDisp(d)->BeginCommandBuffer(Unwrap(cmd), &beginInfo);
RDCASSERT(vkr == VK_SUCCESS);
VkBufferCopy region = { 0, 0, meminfo.size };
VkBufferCopy region = { 0, 0, record->Length };
ObjDisp(d)->CmdCopyBuffer(Unwrap(cmd), srcBuf, dstBuf, 1, &region);
@@ -131,7 +126,7 @@ bool WrappedVulkan::Prepare_InitialState(WrappedVkRes *res)
ObjDisp(d)->DestroyBuffer(Unwrap(d), srcBuf);
ObjDisp(d)->DestroyBuffer(Unwrap(d), dstBuf);
GetResourceManager()->SetInitialContents(id, VulkanResourceManager::InitialContentData(GetWrapped(mem), (uint32_t)meminfo.size, NULL));
GetResourceManager()->SetInitialContents(id, VulkanResourceManager::InitialContentData(GetWrapped(mem), (uint32_t)record->Length, NULL));
return true;
}
@@ -376,7 +371,7 @@ bool WrappedVulkan::Serialise_InitialState(WrappedVkRes *res)
m_FreeMems.push_back(mem);
GetResourceManager()->SetInitialContents(id, VulkanResourceManager::InitialContentData(GetWrapped(buf), eInitialContents_Copy, NULL));
GetResourceManager()->SetInitialContents(id, VulkanResourceManager::InitialContentData(GetWrapped(buf), (uint32_t)dataSize, NULL));
}
else if(type == eResImage)
{
@@ -471,15 +466,8 @@ void WrappedVulkan::Apply_InitialState(WrappedVkRes *live, VulkanResourceManager
}
else if(type == eResDeviceMemory)
{
if(m_MemoryInfo.find(id) == m_MemoryInfo.end())
{
RDCERR("Couldn't find memory info");
return;
}
MemState &meminfo = m_MemoryInfo[id];
VkBuffer srcBuf = (VkBuffer)(uint64_t)initial.resource;
uint32_t memsize = initial.num;
VkDeviceMemory dstMem = (VkDeviceMemory)(uint64_t)live; // maintain the wrapping, for consistency
VkResult vkr = VK_SUCCESS;
@@ -496,7 +484,7 @@ void WrappedVulkan::Apply_InitialState(WrappedVkRes *live, VulkanResourceManager
VkBufferCreateInfo bufInfo = {
VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, NULL,
meminfo.size, VK_BUFFER_USAGE_TRANSFER_DESTINATION_BIT|VK_BUFFER_USAGE_TRANSFER_DESTINATION_BIT, 0,
memsize, VK_BUFFER_USAGE_TRANSFER_DESTINATION_BIT|VK_BUFFER_USAGE_TRANSFER_DESTINATION_BIT, 0,
VK_SHARING_MODE_EXCLUSIVE, 0, NULL,
};
@@ -511,7 +499,7 @@ void WrappedVulkan::Apply_InitialState(WrappedVkRes *live, VulkanResourceManager
vkr = ObjDisp(d)->BindBufferMemory(Unwrap(d), dstBuf, Unwrap(dstMem), 0);
RDCASSERT(vkr == VK_SUCCESS);
VkBufferCopy region = { 0, 0, meminfo.size };
VkBufferCopy region = { 0, 0, memsize };
ObjDisp(cmd)->CmdCopyBuffer(Unwrap(cmd), Unwrap(srcBuf), dstBuf, 1, &region);
+5 -16
View File
@@ -1191,30 +1191,19 @@ vector<byte> VulkanReplay::GetBufferData(ResourceId buff, uint32_t offset, uint3
VkCmdBuffer cmd = m_pDriver->GetNextCmd();
const VkLayerDispatchTable *vt = ObjDisp(dev);
ResourceId memid;
{
auto it = m_pDriver->m_MemBindState.find(buff);
if(it == m_pDriver->m_MemBindState.end())
{
RDCWARN("Buffer has no memory bound, or no buffer of this ID");
return vector<byte>();
}
memid = it->second;
}
VkBuffer srcBuf = m_pDriver->GetResourceManager()->GetCurrentHandle<VkBuffer>(buff);
ResourceId origid = m_pDriver->GetResourceManager()->GetOriginalID(buff);
if(len == 0)
{
len = uint32_t(m_pDriver->m_MemoryInfo[memid].size - offset);
len = uint32_t(m_pDriver->m_CreationInfo.m_Buffer[origid].size - offset);
}
if(len > 0 && VkDeviceSize(offset+len) > m_pDriver->m_MemoryInfo[memid].size)
if(len > 0 && VkDeviceSize(offset+len) > m_pDriver->m_CreationInfo.m_Buffer[origid].size)
{
RDCWARN("Attempting to read off the end of the array. Will be clamped");
len = RDCMIN(len, uint32_t(m_pDriver->m_MemoryInfo[memid].size - offset));
len = RDCMIN(len, uint32_t(m_pDriver->m_CreationInfo.m_Buffer[origid].size - offset));
}
vector<byte> ret;
-14
View File
@@ -693,20 +693,6 @@ struct VkResourceRecord : public ResourceRecord
VkResourceRecord *memory;
};
struct MemState
{
MemState()
: device(VK_NULL_HANDLE), mapOffset(0), mapSize(0), size(0), mapFlags(0), mapFrame(0), mappedPtr(NULL), mapFlushed(false), refData(NULL)
{ }
VkDevice device;
VkDeviceSize mapOffset, mapSize;
VkDeviceSize size;
VkMemoryMapFlags mapFlags;
uint32_t mapFrame;
bool mapFlushed;
void *mappedPtr;
byte *refData;
};
struct ImgState
{
ImgState()
@@ -394,9 +394,15 @@ VkResult WrappedVulkan::vkQueueSubmit(
if(capframe)
{
map<ResourceId, MapState> maps;
{
SCOPED_LOCK(m_CurrentMapsLock);
maps = m_CurrentMaps;
}
// VKTODOHIGH when maps are intercepted with local buffers, this will have to be
// done when not in capframe :(.
for(auto it = m_MemoryInfo.begin(); it != m_MemoryInfo.end(); ++it)
for(auto it = maps.begin(); it != maps.end(); ++it)
{
// potential persistent map, force a full flush
// VKTODOHIGH need better detection than just 'has not been flushed and has
@@ -54,8 +54,6 @@ bool WrappedVulkan::Serialise_vkAllocMemory(
{
ResourceId live = GetResourceManager()->WrapResource(Unwrap(device), mem);
GetResourceManager()->AddLiveResource(id, mem);
m_MemoryInfo[live].size = info.allocationSize;
}
}
@@ -92,6 +90,10 @@ VkResult WrappedVulkan::vkAllocMemory(
record->AddChunk(chunk);
// VKTODOLOW Change record->Length to at least int64_t (maybe uint64_t)
record->Length = (int32_t)pAllocInfo->allocationSize;
RDCASSERT(pAllocInfo->allocationSize < 0x7FFFFFFF);
// VKTODOMED always treat memory as dirty for now, so its initial state
// is guaranteed to be prepared
{
@@ -106,9 +108,6 @@ VkResult WrappedVulkan::vkAllocMemory(
{
GetResourceManager()->AddLiveResource(id, *pMem);
}
m_MemoryInfo[id].device = device;
m_MemoryInfo[id].size = pAllocInfo->allocationSize;
}
return ret;
@@ -120,7 +119,6 @@ void WrappedVulkan::vkFreeMemory(
{
// we just need to clean up after ourselves on replay
WrappedVkNonDispRes *wrapped = (WrappedVkNonDispRes *)GetWrapped(mem);
m_MemoryInfo.erase(wrapped->id);
VkDeviceMemory unwrappedMem = wrapped->real.As<VkDeviceMemory>();
@@ -145,20 +143,20 @@ VkResult WrappedVulkan::vkMapMemory(
if(m_State >= WRITING)
{
auto it = m_MemoryInfo.find(id);
if(it == m_MemoryInfo.end())
MapState state;
state.device = device;
state.mappedPtr = *ppData;
state.mapOffset = offset;
state.mapSize = size == 0 ? GetRecord(mem)->Length : size;
state.mapFrame = m_FrameCounter;
state.mapFlags = flags;
state.mapFlushed = false;
state.refData = NULL;
{
RDCERR("vkMapMemory for unknown memory handle");
}
else
{
it->second.mappedPtr = *ppData;
it->second.mapOffset = offset;
it->second.mapSize = size == 0 ? it->second.size : size;
it->second.mapFrame = m_FrameCounter;
it->second.mapFlags = flags;
it->second.mapFlushed = false;
it->second.refData = NULL;
SCOPED_LOCK(m_CurrentMapsLock);
RDCASSERT(m_CurrentMaps.find(id) == m_CurrentMaps.end());
m_CurrentMaps[id] = state;
}
}
}
@@ -174,11 +172,16 @@ bool WrappedVulkan::Serialise_vkUnmapMemory(
SERIALISE_ELEMENT(ResourceId, devId, GetResID(device));
SERIALISE_ELEMENT(ResourceId, id, GetResID(mem));
auto it = m_MemoryInfo.find(id);
MapState state;
if(m_State >= WRITING)
{
SCOPED_LOCK(m_CurrentMapsLock);
state = m_CurrentMaps[id];
}
SERIALISE_ELEMENT(VkMemoryMapFlags, flags, it->second.mapFlags);
SERIALISE_ELEMENT(uint64_t, memOffset, it->second.mapOffset);
SERIALISE_ELEMENT(uint64_t, memSize, it->second.mapSize);
SERIALISE_ELEMENT(VkMemoryMapFlags, flags, state.mapFlags);
SERIALISE_ELEMENT(uint64_t, memOffset, state.mapOffset);
SERIALISE_ELEMENT(uint64_t, memSize, state.mapSize);
// VKTODOHIGH: this is really horrible - this could be write-combined memory that we're
// reading from to get the latest data. This saves on having to fetch the data some
@@ -186,7 +189,7 @@ bool WrappedVulkan::Serialise_vkUnmapMemory(
// we're also not doing any diff range checks, just serialising the whole memory region.
// In vulkan the common case will be one memory region for a large number of distinct
// bits of data so most maps will not change the whole region.
SERIALISE_ELEMENT_BUF(byte*, data, (byte *)it->second.mappedPtr + it->second.mapOffset, (size_t)memSize);
SERIALISE_ELEMENT_BUF(byte*, data, (byte *)state.mappedPtr + state.mapOffset, (size_t)memSize);
if(m_State < WRITING)
{
@@ -226,59 +229,73 @@ void WrappedVulkan::vkUnmapMemory(
{
ResourceId id = GetResID(mem);
if(m_State >= WRITING)
MapState state;
{
auto it = m_MemoryInfo.find(id);
if(it == m_MemoryInfo.end())
SCOPED_LOCK(m_CurrentMapsLock);
auto it = m_CurrentMaps.find(id);
if(it == m_CurrentMaps.end())
RDCERR("vkUnmapMemory for memory handle that's not currently mapped");
state = it->second;
}
{
// decide atomically if this chunk should be in-frame or not
// so that we're not in the else branch but haven't marked
// dirty when capframe starts, then we mark dirty while in-frame
bool capframe = false;
{
RDCERR("vkMapMemory for unknown memory handle");
SCOPED_LOCK(m_CapTransitionLock);
capframe = (m_State == WRITING_CAPFRAME);
if(!capframe)
GetResourceManager()->MarkDirtyResource(GetResID(mem));
}
else
if(capframe)
{
// decide atomically if this chunk should be in-frame or not
// so that we're not in the else branch but haven't marked
// dirty when capframe starts, then we mark dirty while in-frame
bool capframe = false;
if(!state.mapFlushed)
{
SCOPED_LOCK(m_CapTransitionLock);
capframe = (m_State == WRITING_CAPFRAME);
CACHE_THREAD_SERIALISER();
if(!capframe)
GetResourceManager()->MarkDirtyResource(GetResID(mem));
}
SCOPED_SERIALISE_CONTEXT(UNMAP_MEM);
Serialise_vkUnmapMemory(localSerialiser, device, mem);
if(capframe)
{
if(!it->second.mapFlushed)
VkResourceRecord *record = GetRecord(mem);
if(m_State == WRITING_IDLE)
{
CACHE_THREAD_SERIALISER();
SCOPED_SERIALISE_CONTEXT(UNMAP_MEM);
Serialise_vkUnmapMemory(localSerialiser, device, mem);
VkResourceRecord *record = GetRecord(mem);
if(m_State == WRITING_IDLE)
{
record->AddChunk(scope.Get());
}
else
{
m_FrameCaptureRecord->AddChunk(scope.Get());
GetResourceManager()->MarkResourceFrameReferenced(GetResID(mem), eFrameRef_Write);
}
record->AddChunk(scope.Get());
}
else
{
// VKTODOLOW for now assuming flushes cover all writes. Technically
// this is true for all non-coherent memory types.
m_FrameCaptureRecord->AddChunk(scope.Get());
GetResourceManager()->MarkResourceFrameReferenced(GetResID(mem), eFrameRef_Write);
}
}
it->second.mappedPtr = NULL;
SAFE_DELETE_ARRAY(it->second.refData);
else
{
// VKTODOLOW for now assuming flushes cover all writes. Technically
// this is true for all non-coherent memory types.
}
}
state.mappedPtr = NULL;
SAFE_DELETE_ARRAY(state.refData);
}
{
SCOPED_LOCK(m_CurrentMapsLock);
auto it = m_CurrentMaps.find(id);
if(it == m_CurrentMaps.end())
RDCERR("vkUnmapMemory for memory handle that's not currently mapped");
state = it->second;
m_CurrentMaps.erase(it);
}
}
}
@@ -291,10 +308,15 @@ bool WrappedVulkan::Serialise_vkFlushMappedMemoryRanges(
{
SERIALISE_ELEMENT(ResourceId, devId, GetResID(device));
SERIALISE_ELEMENT(ResourceId, id, GetResID(pMemRanges->mem));
MapState state;
if(m_State >= WRITING)
{
SCOPED_LOCK(m_CurrentMapsLock);
state = m_CurrentMaps[id];
}
auto it = m_MemoryInfo.find(id);
SERIALISE_ELEMENT(VkMemoryMapFlags, flags, it->second.mapFlags);
SERIALISE_ELEMENT(VkMemoryMapFlags, flags, state.mapFlags);
SERIALISE_ELEMENT(uint64_t, memOffset, pMemRanges->offset);
SERIALISE_ELEMENT(uint64_t, memSize, pMemRanges->size);
@@ -304,7 +326,7 @@ bool WrappedVulkan::Serialise_vkFlushMappedMemoryRanges(
// we're also not doing any diff range checks, just serialising the whole memory region.
// In vulkan the common case will be one memory region for a large number of distinct
// bits of data so most maps will not change the whole region.
SERIALISE_ELEMENT_BUF(byte*, data, (byte *)it->second.mappedPtr + (size_t)memOffset, (size_t)memSize);
SERIALISE_ELEMENT_BUF(byte*, data, (byte *)state.mappedPtr + (size_t)memOffset, (size_t)memSize);
if(m_State < WRITING)
{
@@ -352,8 +374,14 @@ VkResult WrappedVulkan::vkFlushMappedMemoryRanges(
for(uint32_t i=0; i < memRangeCount; i++)
{
auto it = m_MemoryInfo.find(GetResID(pMemRanges[i].mem));
it->second.mapFlushed = true;
ResourceId memid = GetResID(pMemRanges[i].mem);
{
SCOPED_LOCK(m_CurrentMapsLock);
auto it = m_CurrentMaps.find(memid);
RDCASSERT(it != m_CurrentMaps.end());
it->second.mapFlushed = true;
}
if(ret == VK_SUCCESS && m_State >= WRITING_CAPFRAME)
{
@@ -407,8 +435,6 @@ bool WrappedVulkan::Serialise_vkBindBufferMemory(
buffer = GetResourceManager()->GetLiveHandle<VkBuffer>(bufId);
mem = GetResourceManager()->GetLiveHandle<VkDeviceMemory>(memId);
m_MemBindState[GetResID(buffer)] = GetResID(mem);
ObjDisp(device)->BindBufferMemory(Unwrap(device), Unwrap(buffer), Unwrap(mem), offs);
}
@@ -598,6 +624,8 @@ bool WrappedVulkan::Serialise_vkCreateBuffer(
device = GetResourceManager()->GetLiveHandle<VkDevice>(devId);
VkBuffer buf = VK_NULL_HANDLE;
m_CreationInfo.m_Buffer[id].Init(&info);
// ensure we can always readback from buffers
info.usage |= VK_BUFFER_USAGE_TRANSFER_SOURCE_BIT;