mirror of
https://github.com/baldurk/renderdoc.git
synced 2026-05-06 10:00:40 +00:00
Don't perform queue submit until after flushing maps/references
* While active capturing we might do significant work to flush coherent mapped memory regions and prepare initial contents for postponed resources that are about to be write-referenced. We need to do that before submitting the actual work to the queue or else the contents may be corrupted.
This commit is contained in:
@@ -651,9 +651,6 @@ public:
|
||||
WrappedResourceType GetWrapper(RealResourceType real);
|
||||
void RemoveWrapper(RealResourceType real);
|
||||
|
||||
void Prepare_InitialStateIfPostponed(ResourceId id);
|
||||
void SkipOrPostponeOrPrepare_InitialState(ResourceId id, FrameRefType refType);
|
||||
|
||||
void ResetLastWriteTimes();
|
||||
void ResetCaptureStartTime();
|
||||
void ResetLastPartialUseTimes();
|
||||
@@ -687,6 +684,9 @@ protected:
|
||||
|
||||
void UpdateLastWriteAndPartialUseTime(ResourceId id, FrameRefType refType);
|
||||
|
||||
void Prepare_InitialStateIfPostponed(ResourceId id, bool midframe);
|
||||
void SkipOrPostponeOrPrepare_InitialState(ResourceId id, FrameRefType refType);
|
||||
|
||||
// very coarse lock, protects EVERYTHING. This could certainly be improved and it may be a
|
||||
// bottleneck
|
||||
// for performance. Given that the main use cases are write-rarely read-often the lock should be
|
||||
@@ -915,7 +915,7 @@ void ResourceManager<Configuration>::MarkResourceFrameReferenced(ResourceId id,
|
||||
|
||||
if(IsDirtyFrameRef(refType))
|
||||
{
|
||||
Prepare_InitialStateIfPostponed(id);
|
||||
Prepare_InitialStateIfPostponed(id, true);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1097,15 +1097,17 @@ void ResourceManager<Configuration>::FreeInitialContents()
|
||||
}
|
||||
|
||||
template <typename Configuration>
|
||||
void ResourceManager<Configuration>::Prepare_InitialStateIfPostponed(ResourceId id)
|
||||
void ResourceManager<Configuration>::Prepare_InitialStateIfPostponed(ResourceId id, bool midframe)
|
||||
{
|
||||
SCOPED_LOCK(m_Lock);
|
||||
|
||||
if(!IsResourcePostponed(id))
|
||||
return;
|
||||
|
||||
if(midframe)
|
||||
RDCLOG("Preparing resource %s after it has been postponed.", ToStr(id).c_str());
|
||||
|
||||
WrappedResourceType res = GetCurrentResource(id);
|
||||
RDCLOG("Preparing resource %s after it has been postponed.", ToStr(id).c_str());
|
||||
Prepare_InitialState(res);
|
||||
|
||||
m_PostponedResourceIDs.erase(id);
|
||||
@@ -1527,7 +1529,7 @@ void ResourceManager<Configuration>::InsertInitialContentsChunks(WriteSerialiser
|
||||
#endif
|
||||
|
||||
// Load postponed resource if needed.
|
||||
Prepare_InitialStateIfPostponed(id);
|
||||
Prepare_InitialStateIfPostponed(id, false);
|
||||
|
||||
dirty++;
|
||||
|
||||
@@ -1868,7 +1870,7 @@ void ResourceManager<Configuration>::ReleaseCurrentResource(ResourceId id)
|
||||
// if it was postponed, but is about to go away.
|
||||
if(IsActiveCapturing(m_State))
|
||||
{
|
||||
Prepare_InitialStateIfPostponed(id);
|
||||
Prepare_InitialStateIfPostponed(id, true);
|
||||
}
|
||||
|
||||
m_CurrentResourceMap.erase(id);
|
||||
|
||||
@@ -842,49 +842,7 @@ VkResult WrappedVulkan::vkQueueSubmit(VkQueue queue, uint32_t submitCount,
|
||||
tempmemSize += GetNextPatchSize(pSubmits[i].pNext);
|
||||
}
|
||||
|
||||
byte *memory = GetTempMemory(tempmemSize);
|
||||
|
||||
VkSubmitInfo *unwrappedSubmits = (VkSubmitInfo *)memory;
|
||||
memory += sizeof(VkSubmitInfo) * submitCount;
|
||||
|
||||
for(uint32_t i = 0; i < submitCount; i++)
|
||||
{
|
||||
RDCASSERT(pSubmits[i].sType == VK_STRUCTURE_TYPE_SUBMIT_INFO);
|
||||
unwrappedSubmits[i] = pSubmits[i];
|
||||
|
||||
VkSemaphore *unwrappedWaitSems = (VkSemaphore *)memory;
|
||||
memory += sizeof(VkSemaphore) * unwrappedSubmits[i].waitSemaphoreCount;
|
||||
|
||||
unwrappedSubmits[i].pWaitSemaphores =
|
||||
unwrappedSubmits[i].waitSemaphoreCount ? unwrappedWaitSems : NULL;
|
||||
for(uint32_t o = 0; o < unwrappedSubmits[i].waitSemaphoreCount; o++)
|
||||
unwrappedWaitSems[o] = Unwrap(pSubmits[i].pWaitSemaphores[o]);
|
||||
|
||||
VkCommandBuffer *unwrappedCommandBuffers = (VkCommandBuffer *)memory;
|
||||
memory += sizeof(VkCommandBuffer) * unwrappedSubmits[i].commandBufferCount;
|
||||
|
||||
unwrappedSubmits[i].pCommandBuffers =
|
||||
unwrappedSubmits[i].commandBufferCount ? unwrappedCommandBuffers : NULL;
|
||||
for(uint32_t o = 0; o < unwrappedSubmits[i].commandBufferCount; o++)
|
||||
unwrappedCommandBuffers[o] = Unwrap(pSubmits[i].pCommandBuffers[o]);
|
||||
unwrappedCommandBuffers += unwrappedSubmits[i].commandBufferCount;
|
||||
|
||||
VkSemaphore *unwrappedSignalSems = (VkSemaphore *)memory;
|
||||
memory += sizeof(VkSemaphore) * unwrappedSubmits[i].signalSemaphoreCount;
|
||||
|
||||
unwrappedSubmits[i].pSignalSemaphores =
|
||||
unwrappedSubmits[i].signalSemaphoreCount ? unwrappedSignalSems : NULL;
|
||||
for(uint32_t o = 0; o < unwrappedSubmits[i].signalSemaphoreCount; o++)
|
||||
unwrappedSignalSems[o] = Unwrap(pSubmits[i].pSignalSemaphores[o]);
|
||||
|
||||
UnwrapNextChain(m_State, "VkSubmitInfo", memory, (VkBaseInStructure *)&unwrappedSubmits[i]);
|
||||
appendChain((VkBaseInStructure *)&unwrappedSubmits[i], m_SubmitChain);
|
||||
}
|
||||
|
||||
VkResult ret;
|
||||
SERIALISE_TIME_CALL(ret = ObjDisp(queue)->QueueSubmit(Unwrap(queue), submitCount,
|
||||
unwrappedSubmits, Unwrap(fence)));
|
||||
|
||||
VkResult ret = VK_SUCCESS;
|
||||
bool present = false;
|
||||
|
||||
{
|
||||
@@ -1186,7 +1144,52 @@ VkResult WrappedVulkan::vkQueueSubmit(VkQueue queue, uint32_t submitCount,
|
||||
state.cpuReadPtr = state.mappedPtr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
byte *memory = GetTempMemory(tempmemSize);
|
||||
|
||||
VkSubmitInfo *unwrappedSubmits = (VkSubmitInfo *)memory;
|
||||
memory += sizeof(VkSubmitInfo) * submitCount;
|
||||
|
||||
for(uint32_t i = 0; i < submitCount; i++)
|
||||
{
|
||||
RDCASSERT(pSubmits[i].sType == VK_STRUCTURE_TYPE_SUBMIT_INFO);
|
||||
unwrappedSubmits[i] = pSubmits[i];
|
||||
|
||||
VkSemaphore *unwrappedWaitSems = (VkSemaphore *)memory;
|
||||
memory += sizeof(VkSemaphore) * unwrappedSubmits[i].waitSemaphoreCount;
|
||||
|
||||
unwrappedSubmits[i].pWaitSemaphores =
|
||||
unwrappedSubmits[i].waitSemaphoreCount ? unwrappedWaitSems : NULL;
|
||||
for(uint32_t o = 0; o < unwrappedSubmits[i].waitSemaphoreCount; o++)
|
||||
unwrappedWaitSems[o] = Unwrap(pSubmits[i].pWaitSemaphores[o]);
|
||||
|
||||
VkCommandBuffer *unwrappedCommandBuffers = (VkCommandBuffer *)memory;
|
||||
memory += sizeof(VkCommandBuffer) * unwrappedSubmits[i].commandBufferCount;
|
||||
|
||||
unwrappedSubmits[i].pCommandBuffers =
|
||||
unwrappedSubmits[i].commandBufferCount ? unwrappedCommandBuffers : NULL;
|
||||
for(uint32_t o = 0; o < unwrappedSubmits[i].commandBufferCount; o++)
|
||||
unwrappedCommandBuffers[o] = Unwrap(pSubmits[i].pCommandBuffers[o]);
|
||||
unwrappedCommandBuffers += unwrappedSubmits[i].commandBufferCount;
|
||||
|
||||
VkSemaphore *unwrappedSignalSems = (VkSemaphore *)memory;
|
||||
memory += sizeof(VkSemaphore) * unwrappedSubmits[i].signalSemaphoreCount;
|
||||
|
||||
unwrappedSubmits[i].pSignalSemaphores =
|
||||
unwrappedSubmits[i].signalSemaphoreCount ? unwrappedSignalSems : NULL;
|
||||
for(uint32_t o = 0; o < unwrappedSubmits[i].signalSemaphoreCount; o++)
|
||||
unwrappedSignalSems[o] = Unwrap(pSubmits[i].pSignalSemaphores[o]);
|
||||
|
||||
UnwrapNextChain(m_State, "VkSubmitInfo", memory, (VkBaseInStructure *)&unwrappedSubmits[i]);
|
||||
appendChain((VkBaseInStructure *)&unwrappedSubmits[i], m_SubmitChain);
|
||||
}
|
||||
|
||||
SERIALISE_TIME_CALL(ret = ObjDisp(queue)->QueueSubmit(Unwrap(queue), submitCount,
|
||||
unwrappedSubmits, Unwrap(fence)));
|
||||
|
||||
if(capframe)
|
||||
{
|
||||
{
|
||||
CACHE_THREAD_SERIALISER();
|
||||
|
||||
|
||||
Reference in New Issue
Block a user