From 8430a532ba33324b7006c72acdf29fe2635a9619 Mon Sep 17 00:00:00 2001 From: Pasukhin Dmitry Date: Mon, 23 Feb 2026 09:28:17 +0000 Subject: [PATCH] Foundation Classes - Align FlatMap/FlatDataMap lookup path and update usage notes (#1108) - Simplify `findSlotIndex()` in `NCollection_FlatMap` and `NCollection_FlatDataMap` to probe until empty slot or key match. - Remove lookup early-exit based on probe distance to keep lookup behavior consistent between both flat containers. - Reorder `NCollection_FlatDataMap::Slot` members to keep hash/probe metadata before key/value storage. - Refresh class-level Doxygen comments with practical usage guidance and relative notes vs `NCollection_Map` / `NCollection_DataMap`. --- .../NCollection/NCollection_FlatDataMap.hxx | 34 +++++++++---------- .../NCollection/NCollection_FlatMap.hxx | 33 +++++++++--------- 2 files changed, 33 insertions(+), 34 deletions(-) diff --git a/src/FoundationClasses/TKernel/NCollection/NCollection_FlatDataMap.hxx b/src/FoundationClasses/TKernel/NCollection/NCollection_FlatDataMap.hxx index 095f42ee42..6f2d21ad18 100644 --- a/src/FoundationClasses/TKernel/NCollection/NCollection_FlatDataMap.hxx +++ b/src/FoundationClasses/TKernel/NCollection/NCollection_FlatDataMap.hxx @@ -39,10 +39,20 @@ * - Power-of-2 sizing for fast modulo operations * - No per-element allocations * - * Best suited for: + * Typical faster usage patterns: * - POD or small key/value types * - Performance-critical code paths * - Lookup-heavy workloads + * - Full traversal / iteration-heavy workloads + * - Stable-size maps with Reserve() called once before bulk Bind() + * + * Container-specific implementation notes: + * - UnBind() keeps probe clusters consistent using backward-shift compaction. + * + * Relative to NCollection_DataMap: + * - Bind()/UnBind() can be faster in many workloads thanks to contiguous storage and + * no per-element node allocation. + * - Iteration is often faster due to contiguous slot scanning and reduced pointer chasing. * * Limitations: * - Keys and values must be movable @@ -83,11 +93,11 @@ private: #endif struct Slot { - alignas(TheKeyType) char myKeyStorage[sizeof(TheKeyType)]; - alignas(TheItemType) char myItemStorage[sizeof(TheItemType)]; size_t myHash; //!< Cached hash code //! Distance from ideal bucket plus one; 0 means Empty, otherwise Used. size_t myProbeDistancePlus1; + alignas(TheKeyType) char myKeyStorage[sizeof(TheKeyType)]; + alignas(TheItemType) char myItemStorage[sizeof(TheItemType)]; Slot() noexcept : myHash(0), @@ -832,13 +842,11 @@ private: //! @return true if key was found bool findSlotIndex(const TheKeyType& theKey, size_t& theIndex) const { - const size_t aHash = myHasher(theKey); - const size_t aMask = myCapacity - 1; - size_t aIndex = aHash & aMask; - size_t aProbe = 0; - const size_t aMaxProbe = myCapacity; + const size_t aHash = myHasher(theKey); + const size_t aMask = myCapacity - 1; + size_t aIndex = aHash & aMask; - while (aProbe < aMaxProbe) + while (true) { const Slot& aSlot = mySlots[aIndex]; @@ -852,16 +860,8 @@ private: theIndex = aIndex; return true; } - - if (aProbe > aSlot.ProbeDistance()) - { - return false; - } - - ++aProbe; aIndex = (aIndex + 1) & aMask; } - return false; } template diff --git a/src/FoundationClasses/TKernel/NCollection/NCollection_FlatMap.hxx b/src/FoundationClasses/TKernel/NCollection/NCollection_FlatMap.hxx index 0ec351d03a..f365d8b184 100644 --- a/src/FoundationClasses/TKernel/NCollection/NCollection_FlatMap.hxx +++ b/src/FoundationClasses/TKernel/NCollection/NCollection_FlatMap.hxx @@ -37,10 +37,20 @@ * - Power-of-2 sizing for fast modulo operations * - No per-element allocations * - * Best suited for: + * Typical faster usage patterns: * - POD or small key types * - Performance-critical code paths - * - Membership testing workloads + * - Lookup-heavy workloads (Contains()/Seek()) + * - Full traversal / iteration-heavy workloads + * - Stable-size maps with Reserve() called once before bulk insert + * + * Container-specific implementation notes: + * - Remove() keeps probe clusters consistent using backward-shift compaction. + * + * Relative to NCollection_Map: + * - Add()/Remove() can be faster in many workloads thanks to contiguous storage and + * no per-element node allocation. + * - Iteration is often faster due to contiguous slot scanning and reduced pointer chasing. * * Limitations: * - Keys must be movable @@ -606,13 +616,11 @@ private: //! @return true if key was found bool findSlotIndex(const TheKeyType& theKey, size_t& theIndex) const { - const size_t aHash = myHasher(theKey); - const size_t aMask = myCapacity - 1; - size_t aIndex = aHash & aMask; - size_t aProbe = 0; - const size_t aMaxProbe = myCapacity; + const size_t aHash = myHasher(theKey); + const size_t aMask = myCapacity - 1; + size_t aIndex = aHash & aMask; - while (aProbe < aMaxProbe) + while (true) { const Slot& aSlot = mySlots[aIndex]; @@ -626,17 +634,8 @@ private: theIndex = aIndex; return true; } - - // Robin Hood optimization: if current probe > slot's probe, key can't exist further - if (aProbe > aSlot.ProbeDistance()) - { - return false; - } - - ++aProbe; aIndex = (aIndex + 1) & aMask; } - return false; } template