Add specialised rdcarray which implements key/value lookup

This commit is contained in:
baldurk
2020-08-14 14:56:54 +01:00
parent d2e0b7ceb1
commit 73cc1f5476
15 changed files with 535 additions and 69 deletions
+274
View File
@@ -0,0 +1,274 @@
#pragma once
#include "rdcarray.h"
// this is a container with a key-value interface but no strong ordering guarantee.
// The storage is an array of K,V pairs, which are unsorted below a given threshold. As a result
// this should be favoured in cases where the absolute number of K,V pairs is relatively low - not
// many thousands.
// The map can be forced to be sorted if SortThreshold is set to 0.
// For ease of transition it presents a std::map like interface, though it has weaker guarantees
// than the STL structures.
DOCUMENT("");
template <typename Key, typename Value, size_t SortThreshold = 16>
struct rdcflatmap
{
using iterator = rdcpair<Key, Value> *;
using const_iterator = const rdcpair<Key, Value> *;
using size_type = size_t;
DOCUMENT("");
iterator find(const Key &id)
{
if(sorted)
return sorted_find(id);
return unsorted_find(id);
}
const_iterator find(const Key &id) const
{
if(sorted)
return sorted_find(id);
return unsorted_find(id);
}
void erase(const Key &id)
{
if(sorted)
return sorted_erase(id);
return unsorted_erase(id);
}
void erase(rdcpair<Key, Value> *it) { storage.erase(it - begin()); }
Value &operator[](const Key &id)
{
if(sorted)
return sorted_at(id);
// pessimistically assume an insertion
if(size() >= SortThreshold)
{
sort();
return sorted_at(id);
}
return unsorted_at(id);
}
iterator insert(rdcpair<Key, Value> *it, const rdcpair<Key, Value> &val)
{
size_t idx = it - begin();
if(sorted)
{
// if the map is sorted already, check that the `it` hint is actually valid.
// we require [idx] < val.first < [idx+1]. If val.first is already in the array then we're
// going to fail the insert but we'll treat that as if it's out of bounds.
// This we want to check if either half is broken
if((idx < storage.size() && !(val.first < storage.at(idx).first)) ||
(idx + 1 < storage.size() && !(val.first < storage.at(idx + 1).first)))
{
return insert(val).first;
}
}
storage.insert(idx, val);
return begin() + idx;
}
iterator insert(rdcpair<Key, Value> *it, rdcpair<Key, Value> &&val)
{
size_t idx = it - begin();
if(sorted)
{
// if the map is sorted already, check that the `it` hint is actually valid.
// we require [idx] < val.first < [idx+1]. If val.first is already in the array then we're
// going to fail the insert but we'll treat that as if it's out of bounds.
// This we want to check if either half is broken
if((idx < storage.size() && !(val.first < storage.at(idx).first)) ||
(idx + 1 < storage.size() && !(val.first < storage.at(idx + 1).first)))
{
return insert(val).first;
}
}
storage.insert(idx, std::move(val));
return begin() + idx;
}
rdcpair<iterator, bool> insert(const rdcpair<Key, Value> &val)
{
if(!sorted)
sort();
size_t idx = lower_bound(val.first);
bool inserted = false;
if(idx >= size() || storage.at(idx).first != val.first)
{
storage.insert(idx, val);
inserted = true;
}
return {(begin() + idx), inserted};
}
rdcpair<iterator, bool> insert(rdcpair<Key, Value> &&val)
{
if(!sorted)
sort();
size_t idx = lower_bound(val.first);
bool inserted = false;
if(idx >= size() || storage.at(idx).first != val.first)
{
storage.insert(idx, std::move(val));
inserted = true;
}
return {(begin() + idx), inserted};
}
iterator upper_bound(const Key &key)
{
if(!sorted)
sort();
size_t idx = lower_bound(key);
// almost the same behaviour as lower_bound, except if we actually have the key, return the next
// element.
if(idx < size() && storage.at(idx).first == key)
return begin() + idx + 1;
return begin() + idx;
}
const_iterator upper_bound(const Key &key) const
{
size_t idx = lower_bound(key);
// almost the same behaviour as lower_bound, except if we actually have the key, return the next
// element.
if(idx < size() && storage.at(idx).first == key)
return begin() + idx + 1;
return begin() + idx;
}
iterator begin() { return storage.begin(); }
iterator end() { return storage.end(); }
const_iterator begin() const { return storage.begin(); }
const_iterator end() const { return storage.end(); }
bool empty() const { return storage.empty(); }
size_t size() const { return storage.size(); }
void swap(rdcflatmap &other)
{
std::swap(sorted, other.sorted);
storage.swap(other.storage);
}
void clear() { storage.clear(); }
private:
rdcarray<rdcpair<Key, Value>> storage;
bool sorted = (SortThreshold == 0);
void sort()
{
std::sort(storage.begin(), storage.end(),
[](const rdcpair<Key, Value> &a, const rdcpair<Key, Value> &b) {
return a.first < b.first;
});
sorted = true;
}
iterator sorted_find(const Key &id)
{
size_t idx = lower_bound(id);
if(idx >= size() || storage.at(idx).first != id)
return end();
return begin() + idx;
}
const_iterator sorted_find(const Key &id) const
{
size_t idx = lower_bound(id);
if(idx >= size() || storage.at(idx).first != id)
return end();
return begin() + idx;
}
void sorted_erase(const Key &id)
{
size_t idx = lower_bound(id);
if(idx < size() && storage.at(idx).first == id)
storage.erase(idx);
}
Value &sorted_at(const Key &id)
{
size_t idx = lower_bound(id);
if(idx >= size() || storage.at(idx).first != id)
{
storage.insert(idx, {id, Value()});
}
return (begin() + idx)->second;
}
size_t lower_bound(const Key &id) const
{
// start looking at the whole range
size_t start = 0, sz = size();
// continue iterating until the range is empty
while(sz > 0)
{
const size_t halfsz = sz / 2;
const size_t mid = start + halfsz;
const Key comp = storage.at(mid).first;
if(comp < id)
{
start = mid + 1;
sz -= halfsz + 1;
}
else
{
sz = halfsz;
}
}
return start;
}
iterator unsorted_find(const Key &id)
{
for(auto it = begin(); it != end(); ++it)
if(it->first == id)
return it;
return end();
}
const_iterator unsorted_find(const Key &id) const
{
for(auto it = begin(); it != end(); ++it)
if(it->first == id)
return it;
return end();
}
void unsorted_erase(const Key &id)
{
auto it = find(id);
if(it != end())
storage.erase(it - begin());
}
Value &unsorted_at(const Key &id)
{
auto it = find(id);
if(it != end())
return it->second;
// only allocate once for the unsorted size
storage.reserve(SortThreshold);
storage.push_back({id, Value()});
return storage.back().second;
}
};
+14 -17
View File
@@ -25,6 +25,8 @@
#pragma once
#include <map>
#include "api/replay/rdcflatmap.h"
#include "api/replay/renderdoc_replay.h"
#include "common/common.h"
template <typename T>
@@ -81,7 +83,7 @@ public:
inline void split(uint64_t x)
{
if(this->start() < x)
this->iter = this->owner->insert(std::pair<uint64_t, T>(x, this->value())).first;
this->iter = this->owner->insert(rdcpair<uint64_t, T>(x, this->value())).first;
}
// Merge this interval with the interval to the left, if both intervals have
@@ -160,35 +162,30 @@ template <typename T>
struct Intervals
{
public:
typedef IntervalRef<T, std::map<uint64_t, T>, typename std::map<uint64_t, T>::iterator> interval;
typedef IntervalsIter<T, std::map<uint64_t, T>, typename std::map<uint64_t, T>::iterator, interval> iterator;
using MapType = rdcflatmap<uint64_t, T, 0>;
typedef ConstIntervalRef<T, const std::map<uint64_t, T>, typename std::map<uint64_t, T>::const_iterator>
const_interval;
typedef IntervalsIter<T, const std::map<uint64_t, T>,
typename std::map<uint64_t, T>::const_iterator, const_interval>
const_iterator;
typedef IntervalRef<T, MapType, typename MapType::iterator> interval;
typedef IntervalsIter<T, MapType, typename MapType::iterator, interval> iterator;
typedef ConstIntervalRef<T, const MapType, typename MapType::const_iterator> const_interval;
typedef IntervalsIter<T, const MapType, typename MapType::const_iterator, const_interval> const_iterator;
private:
std::map<uint64_t, T> StartPoints;
MapType StartPoints;
iterator Wrap(typename std::map<uint64_t, T>::iterator iter)
{
return iterator(&StartPoints, iter);
}
const_iterator Wrap(typename std::map<uint64_t, T>::const_iterator iter) const
iterator Wrap(typename MapType::iterator iter) { return iterator(&StartPoints, iter); }
const_iterator Wrap(typename MapType::const_iterator iter) const
{
return const_iterator(&StartPoints, iter);
}
public:
Intervals() : StartPoints{{0, T()}} {}
Intervals() { StartPoints.insert({0, T()}); }
inline iterator end() { return Wrap(StartPoints.end()); }
inline iterator begin() { return Wrap(StartPoints.begin()); }
inline const_iterator begin() const { return Wrap(StartPoints.begin()); }
inline const_iterator end() const { return Wrap(StartPoints.end()); }
typedef typename std::map<uint64_t, T>::size_type size_type;
typedef typename MapType::size_type size_type;
inline size_type size() const { return StartPoints.size(); }
// Find the interval containing `x`.
iterator find(uint64_t x)
+7 -13
View File
@@ -28,6 +28,7 @@
#include <algorithm>
#include <map>
#include <set>
#include "api/replay/rdcflatmap.h"
#include "api/replay/resourceid.h"
#include "common/threading.h"
#include "core/core.h"
@@ -610,7 +611,7 @@ public:
void MarkResourceFrameReferenced(ResourceId id, FrameRefType refType, Compose comp);
inline void MarkResourceFrameReferenced(ResourceId id, FrameRefType refType);
void MarkBackgroundFrameReferenced(const rdcarray<rdcpair<ResourceId, FrameRefType>> &refs);
void MarkBackgroundFrameReferenced(const rdcflatmap<ResourceId, FrameRefType> &refs);
void CleanBackgroundFrameReferences();
///////////////////////////////////////////
@@ -829,7 +830,7 @@ ResourceManager<Configuration>::~ResourceManager()
template <typename Configuration>
void ResourceManager<Configuration>::MarkBackgroundFrameReferenced(
const rdcarray<rdcpair<ResourceId, FrameRefType>> &refs)
const rdcflatmap<ResourceId, FrameRefType> &refs)
{
SCOPED_LOCK(m_Lock);
@@ -837,24 +838,17 @@ void ResourceManager<Configuration>::MarkBackgroundFrameReferenced(
{
if(refs.size() <= m_ResourceRefTimes.size())
{
for(const rdcpair<ResourceId, FrameRefType> &ref : refs)
{
UpdateLastWriteAndPartialUseTime(ref.first, ref.second);
}
for(auto it = refs.begin(); it != refs.end(); ++it)
UpdateLastWriteAndPartialUseTime(it->first, it->second);
}
else
{
for(const ResourceRefTimes &res : m_ResourceRefTimes)
{
const rdcpair<ResourceId, FrameRefType> *it = std::lower_bound(
refs.begin(), refs.end(), make_rdcpair(res.id, eFrameRef_None),
[](const rdcpair<ResourceId, FrameRefType> &a,
const rdcpair<ResourceId, FrameRefType> &b) { return a.first < b.first; });
auto it = refs.find(res.id);
if(it != refs.end() && it->first == res.id)
{
if(it != refs.end())
UpdateLastWriteAndPartialUseTime(it->first, it->second);
}
}
}
}
+6 -8
View File
@@ -1161,7 +1161,7 @@ void DescriptorSetData::UpdateBackgroundRefCache(const rdcarray<ResourceId> &ids
if(backgroundFrameRefs.empty())
{
for(auto refit = bindFrameRefs.begin(); refit != bindFrameRefs.end(); ++refit)
backgroundFrameRefs.push_back(make_rdcpair(refit->first, refit->second.second));
backgroundFrameRefs.insert(make_rdcpair(refit->first, refit->second.second));
return;
}
@@ -1172,11 +1172,9 @@ void DescriptorSetData::UpdateBackgroundRefCache(const rdcarray<ResourceId> &ids
// find the Id we're looking for in the remainder of the cache. This won't skip over any one
// that we care about because we're iterating in ascending Id order
cacheit = std::lower_bound(
cacheit, backgroundFrameRefs.end(), make_rdcpair(id, eFrameRef_None),
[](const rdcpair<ResourceId, FrameRefType> &a, const rdcpair<ResourceId, FrameRefType> &b) {
return a.first < b.first;
});
cacheit = std::lower_bound(cacheit, backgroundFrameRefs.end(), id,
[](const rdcpair<ResourceId, FrameRefType> &a,
const ResourceId &id) { return a.first < id; });
auto bindit = bindFrameRefs.find(id);
@@ -1184,7 +1182,7 @@ void DescriptorSetData::UpdateBackgroundRefCache(const rdcarray<ResourceId> &ids
if(bindit == bindFrameRefs.end())
{
if(cacheit != backgroundFrameRefs.end())
backgroundFrameRefs.erase(cacheit - backgroundFrameRefs.begin());
backgroundFrameRefs.erase(cacheit);
continue;
}
@@ -1196,7 +1194,7 @@ void DescriptorSetData::UpdateBackgroundRefCache(const rdcarray<ResourceId> &ids
// calculate the index
size_t idx = cacheit - backgroundFrameRefs.begin();
// insert the entry
backgroundFrameRefs.insert(idx, {id, refType});
backgroundFrameRefs.insert(cacheit, {id, refType});
// re-initialise our iterator to point here, as the above insert might have invalidated it due
// to a resize
cacheit = backgroundFrameRefs.begin() + idx;
+1 -1
View File
@@ -4293,7 +4293,7 @@ bool WrappedVulkan::EraseImageState(ResourceId id)
return false;
}
void WrappedVulkan::UpdateImageStates(const std::map<ResourceId, ImageState> &dstStates)
void WrappedVulkan::UpdateImageStates(const rdcflatmap<ResourceId, ImageState> &dstStates)
{
// this function expects the number of updates to be orders of magnitude fewer than the number of
// existing images. If there are a small number of images in total then it doesn't matter much,
+2 -2
View File
@@ -587,7 +587,7 @@ private:
VulkanRenderState state;
std::map<ResourceId, ImageState> imageStates;
rdcflatmap<ResourceId, ImageState> imageStates;
ResourceId pushDescriptorID[2][64];
@@ -1118,7 +1118,7 @@ public:
LockedImageStateRef InsertImageState(VkImage wrappedHandle, ResourceId id, const ImageInfo &info,
FrameRefType refType, bool *inserted = NULL);
bool EraseImageState(ResourceId id);
void UpdateImageStates(const std::map<ResourceId, ImageState> &dstStates);
void UpdateImageStates(const rdcflatmap<ResourceId, ImageState> &dstStates);
inline ImageTransitionInfo GetImageTransitionInfo() const
{
+2 -2
View File
@@ -947,8 +947,8 @@ void ImageState::MergeCaptureBeginState(const ImageState &initialState)
maxRefType = initialState.maxRefType;
}
void ImageState::Merge(std::map<ResourceId, ImageState> &states,
const std::map<ResourceId, ImageState> &dstStates, ImageTransitionInfo info)
void ImageState::Merge(rdcflatmap<ResourceId, ImageState> &states,
const rdcflatmap<ResourceId, ImageState> &dstStates, ImageTransitionInfo info)
{
auto it = states.begin();
auto dstIt = dstStates.begin();
+5 -5
View File
@@ -425,7 +425,7 @@ bool VulkanResourceManager::Serialise_DeviceMemoryRefs(SerialiserType &ser,
{
ResourceId mem = it_data->memory;
auto res = m_MemFrameRefs.insert(std::pair<ResourceId, MemRefs>(mem, MemRefs()));
auto res = m_MemFrameRefs.insert(rdcpair<ResourceId, MemRefs>(mem, MemRefs()));
RDCASSERTMSG("MemRefIntervals for each memory resource must be contiguous", res.second);
Intervals<FrameRefType> &rangeRefs = res.first->second.rangeRefs;
@@ -784,7 +784,7 @@ void VulkanResourceManager::ApplyBarriers(uint32_t queueFamilyIndex,
}
}
void VulkanResourceManager::RecordBarriers(std::map<ResourceId, ImageState> &states,
void VulkanResourceManager::RecordBarriers(rdcflatmap<ResourceId, ImageState> &states,
uint32_t queueFamilyIndex, uint32_t numBarriers,
const VkImageMemoryBarrier *barriers)
{
@@ -866,7 +866,7 @@ void VulkanResourceManager::MarkMemoryFrameReferenced(ResourceId mem, VkDeviceSi
void VulkanResourceManager::AddMemoryFrameRefs(ResourceId mem)
{
m_MemFrameRefs.insert({mem, MemRefs()});
m_MemFrameRefs[mem] = MemRefs();
}
void VulkanResourceManager::AddDeviceMemory(ResourceId mem)
@@ -883,7 +883,7 @@ void VulkanResourceManager::RemoveDeviceMemory(ResourceId mem)
m_DeviceMemories.erase(mem);
}
void VulkanResourceManager::MergeReferencedMemory(std::map<ResourceId, MemRefs> &memRefs)
void VulkanResourceManager::MergeReferencedMemory(rdcflatmap<ResourceId, MemRefs> &memRefs)
{
SCOPED_LOCK(m_Lock);
@@ -891,7 +891,7 @@ void VulkanResourceManager::MergeReferencedMemory(std::map<ResourceId, MemRefs>
{
auto i = m_MemFrameRefs.find(j->first);
if(i == m_MemFrameRefs.end())
m_MemFrameRefs.insert(*j);
m_MemFrameRefs[j->first] = j->second;
else
i->second.Merge(j->second);
}
+3 -3
View File
@@ -269,7 +269,7 @@ public:
rdcarray<rdcpair<ResourceId, ImageRegionState> > &states,
std::map<ResourceId, ImageLayouts> &layouts);
void RecordBarriers(std::map<ResourceId, ImageState> &states, uint32_t queueFamilyIndex,
void RecordBarriers(rdcflatmap<ResourceId, ImageState> &states, uint32_t queueFamilyIndex,
uint32_t numBarriers, const VkImageMemoryBarrier *barriers);
template <typename SerialiserType>
@@ -447,7 +447,7 @@ public:
void AddDeviceMemory(ResourceId mem);
void RemoveDeviceMemory(ResourceId mem);
void MergeReferencedMemory(std::map<ResourceId, MemRefs> &memRefs);
void MergeReferencedMemory(rdcflatmap<ResourceId, MemRefs> &memRefs);
void ClearReferencedMemory();
MemRefs *FindMemRefs(ResourceId mem);
ImgRefs *FindImgRefs(ResourceId img);
@@ -482,7 +482,7 @@ private:
rdcarray<ResourceId> InitialContentResources();
WrappedVulkan *m_Core;
std::map<ResourceId, MemRefs> m_MemFrameRefs;
rdcflatmap<ResourceId, MemRefs> m_MemFrameRefs;
std::set<ResourceId> m_DeviceMemories;
InitPolicy m_InitPolicy = eInitPolicy_CopyAll;
};
+1 -1
View File
@@ -4198,7 +4198,7 @@ void ResourceInfo::Update(uint32_t numBindings, const VkSparseMemoryBind *pBindi
}
}
FrameRefType MarkImageReferenced(std::map<ResourceId, ImageState> &imageStates, ResourceId img,
FrameRefType MarkImageReferenced(rdcflatmap<ResourceId, ImageState> &imageStates, ResourceId img,
const ImageInfo &imageInfo, const ImageSubresourceRange &range,
uint32_t queueFamilyIndex, FrameRefType refType)
{
+16 -16
View File
@@ -1032,9 +1032,9 @@ struct CmdBufferRecordingInfo
rdcarray<VkResourceRecord *> subcmds;
std::map<ResourceId, ImageState> imageStates;
rdcflatmap<ResourceId, ImageState> imageStates;
std::map<ResourceId, MemRefs> memFrameRefs;
rdcflatmap<ResourceId, MemRefs> memFrameRefs;
// AdvanceFrame/Present should be called after this buffer is submitted
bool present;
@@ -1063,13 +1063,13 @@ struct DescriptorSetData
// the refcount has the high-bit set if this resource has sparse
// mapping information
static const uint32_t SPARSE_REF_BIT = 0x80000000;
std::map<ResourceId, rdcpair<uint32_t, FrameRefType>> bindFrameRefs;
std::map<ResourceId, MemRefs> bindMemRefs;
std::map<ResourceId, ImageState> bindImageStates;
rdcflatmap<ResourceId, rdcpair<uint32_t, FrameRefType>> bindFrameRefs;
rdcflatmap<ResourceId, MemRefs> bindMemRefs;
rdcflatmap<ResourceId, ImageState> bindImageStates;
void UpdateBackgroundRefCache(const rdcarray<ResourceId> &ids);
rdcarray<rdcpair<ResourceId, FrameRefType>> backgroundFrameRefs;
rdcflatmap<ResourceId, FrameRefType> backgroundFrameRefs;
};
struct PipelineLayoutData
@@ -1675,8 +1675,8 @@ struct ImageState
FrameRefCompFunc compose);
void Merge(const ImageState &other, ImageTransitionInfo info);
void MergeCaptureBeginState(const ImageState &initialState);
static void Merge(std::map<ResourceId, ImageState> &states,
const std::map<ResourceId, ImageState> &dstStates, ImageTransitionInfo info);
static void Merge(rdcflatmap<ResourceId, ImageState> &states,
const rdcflatmap<ResourceId, ImageState> &dstStates, ImageTransitionInfo info);
void DiscardContents(const ImageSubresourceRange &range);
inline void DiscardContents() { DiscardContents(GetImageInfo().FullRange()); }
inline void RecordUse(const ImageSubresourceRange &range, FrameRefType refType,
@@ -2053,23 +2053,23 @@ FrameRefType MemRefs::Merge(MemRefs &other, Compose comp)
struct ImageLayouts;
template <typename Compose>
FrameRefType MarkImageReferenced(std::map<ResourceId, ImgRefs> &imgRefs, ResourceId img,
FrameRefType MarkImageReferenced(rdcflatmap<ResourceId, ImgRefs> &imgRefs, ResourceId img,
const ImageInfo &imageInfo, const ImageRange &range,
FrameRefType refType, Compose comp);
inline FrameRefType MarkImageReferenced(std::map<ResourceId, ImgRefs> &imgRefs, ResourceId img,
inline FrameRefType MarkImageReferenced(rdcflatmap<ResourceId, ImgRefs> &imgRefs, ResourceId img,
const ImageInfo &imageInfo, const ImageRange &range,
FrameRefType refType)
{
return MarkImageReferenced(imgRefs, img, imageInfo, range, refType, ComposeFrameRefs);
}
FrameRefType MarkImageReferenced(std::map<ResourceId, ImageState> &imageStates, ResourceId img,
FrameRefType MarkImageReferenced(rdcflatmap<ResourceId, ImageState> &imageStates, ResourceId img,
const ImageInfo &imageInfo, const ImageSubresourceRange &range,
uint32_t queueFamilyIndex, FrameRefType refType);
template <typename Compose>
FrameRefType MarkMemoryReferenced(std::map<ResourceId, MemRefs> &memRefs, ResourceId mem,
FrameRefType MarkMemoryReferenced(rdcflatmap<ResourceId, MemRefs> &memRefs, ResourceId mem,
VkDeviceSize offset, VkDeviceSize size, FrameRefType refType,
Compose comp)
{
@@ -2078,7 +2078,7 @@ FrameRefType MarkMemoryReferenced(std::map<ResourceId, MemRefs> &memRefs, Resour
auto refs = memRefs.find(mem);
if(refs == memRefs.end())
{
memRefs.insert(std::pair<ResourceId, MemRefs>(mem, MemRefs(offset, size, refType)));
memRefs[mem] = MemRefs(offset, size, refType);
return refType;
}
else
@@ -2087,7 +2087,7 @@ FrameRefType MarkMemoryReferenced(std::map<ResourceId, MemRefs> &memRefs, Resour
}
}
inline FrameRefType MarkMemoryReferenced(std::map<ResourceId, MemRefs> &memRefs, ResourceId mem,
inline FrameRefType MarkMemoryReferenced(rdcflatmap<ResourceId, MemRefs> &memRefs, ResourceId mem,
VkDeviceSize offset, VkDeviceSize size, FrameRefType refType)
{
return MarkMemoryReferenced(memRefs, mem, offset, size, refType, ComposeFrameRefs);
@@ -2452,7 +2452,7 @@ uint32_t GetPlaneByteSize(uint32_t Width, uint32_t Height, uint32_t Depth, VkFor
uint32_t mip, uint32_t plane);
template <typename Compose>
FrameRefType MarkImageReferenced(std::map<ResourceId, ImgRefs> &imgRefs, ResourceId img,
FrameRefType MarkImageReferenced(rdcflatmap<ResourceId, ImgRefs> &imgRefs, ResourceId img,
const ImageInfo &imageInfo, const ImageRange &range,
FrameRefType refType, Compose comp)
{
@@ -2461,7 +2461,7 @@ FrameRefType MarkImageReferenced(std::map<ResourceId, ImgRefs> &imgRefs, Resourc
auto refs = imgRefs.find(img);
if(refs == imgRefs.end())
{
refs = imgRefs.insert(std::make_pair(img, ImgRefs(imageInfo))).first;
refs = imgRefs.insert(make_rdcpair(img, ImgRefs(imageInfo))).first;
}
return refs->second.Update(range, refType, comp);
}
@@ -1094,7 +1094,7 @@ bool WrappedVulkan::Serialise_vkEndCommandBuffer(SerialiserType &ser, VkCommandB
// pretend they never happened, we then reverse their effects so that our layout tracking
// is accurate and the images end up in the layout they were in during the last active
// subpass
std::map<ResourceId, ImageState> renderPassEndStates;
rdcflatmap<ResourceId, ImageState> renderPassEndStates;
for(uint32_t sub = renderstate.subpass + 1; sub < numSubpasses; sub++)
{
+1
View File
@@ -181,6 +181,7 @@
<ClInclude Include="api\replay\gl_pipestate.h" />
<ClInclude Include="api\replay\pipestate.h" />
<ClInclude Include="api\replay\rdcarray.h" />
<ClInclude Include="api\replay\rdcflatmap.h" />
<ClInclude Include="api\replay\rdcpair.h" />
<ClInclude Include="api\replay\rdcstr.h" />
<ClInclude Include="api\replay\renderdoc_replay.h" />
+3
View File
@@ -543,6 +543,9 @@
<ClInclude Include="3rdparty\compressonator\Common_Def.h">
<Filter>3rdparty\compressonator</Filter>
</ClInclude>
<ClInclude Include="api\replay\rdcflatmap.h">
<Filter>API\Replay</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<ClCompile Include="maths\camera.cpp">
+199
View File
@@ -27,8 +27,10 @@
#if ENABLED(ENABLE_UNIT_TESTS)
#include "api/replay/rdcarray.h"
#include "api/replay/rdcflatmap.h"
#include "api/replay/rdcpair.h"
#include "api/replay/rdcstr.h"
#include "common/formatting.h"
#include "common/globalconfig.h"
#include "common/timing.h"
#include "os/os_specific.h"
@@ -1695,4 +1697,201 @@ TEST_CASE("Test string type", "[basictypes][string]")
};
};
TEST_CASE("Test flatmap type", "[basictypes][flatmap]")
{
SECTION("basic lookup of values before and after sorting")
{
rdcflatmap<uint32_t, rdcstr, 16> test;
test[5] = "foo";
test[7] = "bar";
test[3] = "asdf";
CHECK(test[5] == "foo");
CHECK(test[7] == "bar");
CHECK(test[3] == "asdf");
CHECK(!test.empty());
CHECK(test.size() == 3);
// order is not guaranteed, but multiplying the keys in any order will give us a unique value
// because they're prime
uint32_t product = 1;
uint32_t count = 0;
for(auto it = test.begin(); it != test.end(); ++it)
{
product *= it->first;
count++;
}
CHECK(product == 3 * 5 * 7);
CHECK(count == 3);
// force the map to sort itself
for(uint32_t i = 0; i < 24; i++)
test[999 + i] = StringFormat::Fmt("test%u", 999 + i);
// we should still be able to look up the same values
CHECK(test[5] == "foo");
CHECK(test[7] == "bar");
CHECK(test[3] == "asdf");
CHECK(!test.empty());
CHECK(test.size() == 27);
CHECK(test.find(5)->second == "foo");
CHECK(test.find(6) == test.end());
CHECK(test.find(7)->second == "bar");
CHECK(test.find(8) == test.end());
// check clearing
test.clear();
CHECK(test.empty());
CHECK(test.size() == 0);
// this inserts the values as default-initialised, as std::map does
CHECK(test[5] == "");
CHECK(test[7] == "");
CHECK(test[3] == "");
CHECK(test.size() == 3);
};
SECTION("swap")
{
rdcflatmap<uint32_t, rdcstr> test;
test[5] = "foo";
test[7] = "bar";
test[3] = "asdf";
rdcflatmap<uint32_t, rdcstr> swapped;
test.swap(swapped);
CHECK(swapped[5] == "foo");
CHECK(swapped[7] == "bar");
CHECK(swapped[3] == "asdf");
CHECK(swapped.size() == 3);
CHECK(test.empty());
};
SECTION("insert with no hint")
{
rdcflatmap<uint32_t, rdcstr> test;
test[5] = "foo";
test[7] = "bar";
test[3] = "asdf";
CHECK(test[5] == "foo");
CHECK(test[7] == "bar");
CHECK(test[3] == "asdf");
CHECK(test.find(15) == test.end());
test.insert({15, "inserted"});
CHECK(test.find(15)->second == "inserted");
};
SECTION("insert with hint")
{
rdcflatmap<uint32_t, rdcstr> test;
test[5] = "foo";
test[7] = "bar";
test[3] = "asdf";
// insert value with proper hint
test.insert(test.begin() + 1, {6, "middle"});
CHECK(test.find(3)->second == "asdf");
CHECK(test.find(5)->second == "foo");
CHECK(test.find(6)->second == "middle");
CHECK(test.find(7)->second == "bar");
// insert value with wrong hint
test.insert(test.begin(), {100, "highvalue"});
CHECK(test.find(100)->second == "highvalue");
// force the map to sort itself
for(uint32_t i = 0; i < 24; i++)
test[999 + i] = StringFormat::Fmt("test%u", 999 + i);
test.insert(test.begin(), {101, "highvalue2"});
CHECK(test.find(101)->second == "highvalue2");
};
SECTION("erase")
{
rdcflatmap<uint32_t, rdcstr> test;
test[5] = "foo";
test[7] = "bar";
test[3] = "asdf";
CHECK(test.find(5)->second == "foo");
test.erase(5);
CHECK(test.find(5) == test.end());
test[5] = "foo";
CHECK(test.find(5)->second == "foo");
test.erase(3);
test.erase(4);
test.erase(6);
test.erase(7);
CHECK(test.find(5)->second == "foo");
};
SECTION("upper_bound")
{
// set SortThreshold to 0 to force sorted semantics always
rdcflatmap<uint32_t, rdcstr, 0> test;
test[5] = "foo";
test[7] = "bar";
test[3] = "asdf";
// check that they got sorted
auto it = test.begin();
CHECK(it->first == 3);
CHECK(it->second == "asdf");
++it;
CHECK(it->first == 5);
CHECK(it->second == "foo");
++it;
CHECK(it->first == 7);
CHECK(it->second == "bar");
it = test.upper_bound(2);
CHECK(it->first == 3);
CHECK(it->second == "asdf");
it = test.upper_bound(3);
CHECK(it->first == 5);
CHECK(it->second == "foo");
it = test.upper_bound(4);
CHECK(it->first == 5);
CHECK(it->second == "foo");
it = test.upper_bound(5);
CHECK(it->first == 7);
CHECK(it->second == "bar");
it = test.upper_bound(6);
CHECK(it->first == 7);
CHECK(it->second == "bar");
it = test.upper_bound(7);
CHECK(it == test.end());
it = test.upper_bound(8);
CHECK(it == test.end());
};
};
#endif // ENABLED(ENABLE_UNIT_TESTS)