WeakReferenceDictionary.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #pragma once
  6. namespace JsUtil
  7. {
  8. interface IWeakReferenceDictionary
  9. {
  10. virtual void Cleanup() = 0;
  11. };
  12. template <
  13. class TKey,
  14. class TValue,
  15. class SizePolicy = PowerOf2SizePolicy,
  16. template <typename ValueOrKey> class Comparer = DefaultComparer
  17. >
  18. class WeakReferenceDictionary: public BaseDictionary<TKey, RecyclerWeakReference<TValue>*, RecyclerNonLeafAllocator, SizePolicy, Comparer, WeakRefValueDictionaryEntry>,
  19. public IWeakReferenceDictionary
  20. {
  21. typedef BaseDictionary<TKey, RecyclerWeakReference<TValue>*, RecyclerNonLeafAllocator, SizePolicy, Comparer, WeakRefValueDictionaryEntry> Base;
  22. typedef typename Base::EntryType EntryType;
  23. public:
  24. WeakReferenceDictionary(Recycler* recycler, int capacity = 0):
  25. Base(recycler, capacity)
  26. {
  27. Assert(reinterpret_cast<void*>(this) == reinterpret_cast<void*>((IWeakReferenceDictionary*) this));
  28. }
  29. virtual void Cleanup() override
  30. {
  31. this->MapAndRemoveIf([](typename Base::EntryType &entry)
  32. {
  33. return (Base::EntryType::NeedsCleanup(entry));
  34. });
  35. }
  36. private:
  37. using Base::Clone;
  38. using Base::Copy;
  39. PREVENT_COPY(WeakReferenceDictionary);
  40. };
  41. #if ENABLE_WEAK_REFERENCE_REGIONS
  42. template <class TKey>
  43. class WeakRefRegionValueDictionaryEntry : public SimpleDictionaryKeyEntry<TKey>
  44. {
  45. public:
  46. void Clear()
  47. {
  48. this->value = TKey();
  49. }
  50. };
  51. // TODO: It would be good to adapt WeaklyReferencedKeyDictionary to also use WeakRefRegions
  52. // One possibility is to create a BaseSplitDictionary which has the collections of
  53. // buckets, entries, and RecyclerWeakReferenceRegionItems, and then the entries are
  54. // either value/next or key/next pairs, with the weak ref region storing the keys or
  55. // values in a weak manner.
  56. template <
  57. class TKey,
  58. class TValue,
  59. class SizePolicy = PowerOf2SizePolicy,
  60. template <typename ValueOrKey> class Comparer = DefaultComparer,
  61. typename Lock = NoResizeLock,
  62. class AllocType = Recycler // Should always be recycler; this is to sufficiently confuse the RecyclerChecker
  63. >
  64. class WeakReferenceRegionDictionary : protected Lock, public IWeakReferenceDictionary
  65. {
  66. typedef WeakRefRegionValueDictionaryEntry<TKey> EntryType;
  67. typedef RecyclerWeakReferenceRegionItem<TValue> ValueType;
  68. typedef typename AllocatorInfo<Recycler, TValue>::AllocatorFunc EntryAllocatorFuncType;
  69. private:
  70. Field(int*) buckets;
  71. Field(EntryType*) entries;
  72. Field(ValueType*) values;
  73. FieldNoBarrier(Recycler*) alloc;
  74. Field(int) size;
  75. Field(uint) bucketCount;
  76. Field(int) count;
  77. Field(int) freeList;
  78. Field(int) freeCount;
  79. Field(int) modFunctionIndex;
  80. static const int FreeListSentinel = -2;
  81. PREVENT_COPY(WeakReferenceRegionDictionary);
  82. enum InsertOperations
  83. {
  84. Insert_Add, // FatalInternalError if the item already exist in debug build
  85. Insert_AddNew, // Ignore add if the item already exist
  86. Insert_Item // Replace the item if it already exist
  87. };
  88. class AutoDoResize
  89. {
  90. public:
  91. AutoDoResize(Lock& lock) : lock(lock) { lock.BeginResize(); };
  92. ~AutoDoResize() { lock.EndResize(); };
  93. private:
  94. Lock & lock;
  95. };
  96. public:
  97. virtual void Cleanup() override
  98. {
  99. this->MapAndRemoveIf([](EntryType& entry, ValueType& value)
  100. {
  101. return value == nullptr;
  102. });
  103. }
  104. WeakReferenceRegionDictionary(Recycler* allocator, int capacity = 0)
  105. : buckets(nullptr),
  106. entries(nullptr),
  107. values(nullptr),
  108. alloc(allocator),
  109. size(0),
  110. bucketCount(0),
  111. count(0),
  112. freeList(0),
  113. freeCount(0),
  114. modFunctionIndex(UNKNOWN_MOD_INDEX)
  115. {
  116. Assert(reinterpret_cast<void*>(this) == reinterpret_cast<void*>((IWeakReferenceDictionary*)this));
  117. Assert(allocator);
  118. // If initial capacity is negative or 0, lazy initialization on
  119. // the first insert operation is performed.
  120. if (capacity > 0)
  121. {
  122. Initialize(capacity);
  123. }
  124. }
  125. inline int Capacity() const
  126. {
  127. return size;
  128. }
  129. inline int Count() const
  130. {
  131. return count - freeCount;
  132. }
  133. TValue Item(const TKey& key)
  134. {
  135. int i = FindEntry(key);
  136. Assert(i >= 0);
  137. return values[i];
  138. }
  139. const TValue Item(const TKey& key) const
  140. {
  141. int i = FindEntry(key);
  142. Assert(i >= 0);
  143. return values[i];
  144. }
  145. int Add(const TKey& key, const TValue& value)
  146. {
  147. return Insert<Insert_Add>(key, value);
  148. }
  149. int AddNew(const TKey& key, const TValue& value)
  150. {
  151. return Insert<Insert_AddNew>(key, value);
  152. }
  153. int Item(const TKey& key, const TValue& value)
  154. {
  155. return Insert<Insert_Item>(key, value);
  156. }
  157. bool Contains(KeyValuePair<TKey, TValue> keyValuePair)
  158. {
  159. int i = FindEntry(keyValuePair.Key());
  160. if (i >= 0 && Comparer<TValue>::Equals(values[i], keyValuePair.Value()))
  161. {
  162. return true;
  163. }
  164. return false;
  165. }
  166. bool Remove(KeyValuePair<TKey, TValue> keyValuePair)
  167. {
  168. int i, last;
  169. uint targetBucket;
  170. if (FindEntryWithKey(keyValuePair.Key(), &i, &last, &targetBucket))
  171. {
  172. const TValue &value = values[i];
  173. if (Comparer<TValue>::Equals(value, keyValuePair.Value()))
  174. {
  175. RemoveAt(i, last, targetBucket);
  176. return true;
  177. }
  178. }
  179. return false;
  180. }
  181. void Clear()
  182. {
  183. if (count > 0)
  184. {
  185. memset(buckets, -1, bucketCount * sizeof(buckets[0]));
  186. memset(entries, 0, sizeof(EntryType) * size);
  187. memset(values, 0, sizeof(ValueType) * size); // TODO: issues with background threads?
  188. count = 0;
  189. freeCount = 0;
  190. }
  191. }
  192. void ResetNoDelete()
  193. {
  194. this->size = 0;
  195. this->bucketCount = 0;
  196. this->buckets = nullptr;
  197. this->entries = nullptr;
  198. this->values = nullptr;
  199. this->count = 0;
  200. this->freeCount = 0;
  201. this->modFunctionIndex = UNKNOWN_MOD_INDEX;
  202. }
  203. void Reset()
  204. {
  205. ResetNoDelete();
  206. }
  207. bool ContainsKey(const TKey& key) const
  208. {
  209. return FindEntry(key) >= 0;
  210. }
  211. template <typename TLookup>
  212. inline const TValue& LookupWithKey(const TLookup& key, const TValue& defaultValue) const
  213. {
  214. int i = FindEntryWithKey(key);
  215. if (i >= 0)
  216. {
  217. return values[i];
  218. }
  219. return defaultValue;
  220. }
  221. inline const TValue& Lookup(const TKey& key, const TValue& defaultValue) const
  222. {
  223. return LookupWithKey<TKey>(key, defaultValue);
  224. }
  225. template <typename TLookup>
  226. bool TryGetValue(const TLookup& key, TValue* value) const
  227. {
  228. int i = FindEntryWithKey(key);
  229. if (i >= 0)
  230. {
  231. *value = values[i];
  232. return true;
  233. }
  234. return false;
  235. }
  236. bool TryGetValueAndRemove(const TKey& key, TValue* value)
  237. {
  238. int i, last;
  239. uint targetBucket;
  240. if (FindEntryWithKey(key, &i, &last, &targetBucket))
  241. {
  242. *value = values[i];
  243. RemoveAt(i, last, targetBucket);
  244. return true;
  245. }
  246. return false;
  247. }
  248. const TValue& GetValueAt(const int index) const
  249. {
  250. Assert(index >= 0);
  251. Assert(index < count);
  252. return values[index];
  253. }
  254. TKey const& GetKeyAt(const int index) const
  255. {
  256. Assert(index >= 0);
  257. Assert(index < count);
  258. return entries[index].Key();
  259. }
  260. bool TryGetValueAt(int index, TValue * value) const
  261. {
  262. if (index >= 0 && index < count)
  263. {
  264. *value = values[index];
  265. return true;
  266. }
  267. return false;
  268. }
  269. bool Remove(const TKey& key)
  270. {
  271. int i, last;
  272. uint targetBucket;
  273. if (FindEntryWithKey(key, &i, &last, &targetBucket))
  274. {
  275. RemoveAt(i, last, targetBucket);
  276. return true;
  277. }
  278. return false;
  279. }
  280. // Returns whether the dictionary was resized or not
  281. bool EnsureCapacity()
  282. {
  283. if (freeCount == 0 && count == size)
  284. {
  285. Resize();
  286. return true;
  287. }
  288. return false;
  289. }
  290. int GetNextIndex()
  291. {
  292. if (freeCount != 0)
  293. {
  294. Assert(freeCount > 0);
  295. Assert(freeList >= 0);
  296. Assert(freeList < count);
  297. return freeList;
  298. }
  299. return count;
  300. }
  301. int GetLastIndex()
  302. {
  303. return count - 1;
  304. }
  305. void LockResize()
  306. {
  307. __super::LockResize();
  308. }
  309. void UnlockResize()
  310. {
  311. __super::UnlockResize();
  312. }
  313. private:
  314. template <typename TLookup>
  315. static hash_t GetHashCodeWithKey(const TLookup& key)
  316. {
  317. // set last bit to 1 to avoid false positive to make hash appears to be a valid recycler address.
  318. // In the same line, 0 should be use to indicate a non-existing entry.
  319. return TAGHASH(Comparer<TLookup>::GetHashCode(key));
  320. }
  321. static hash_t GetHashCode(const TKey& key)
  322. {
  323. return GetHashCodeWithKey<TKey>(key);
  324. }
  325. static uint GetBucket(hash_t hashCode, int bucketCount, int modFunctionIndex)
  326. {
  327. return SizePolicy::GetBucket(UNTAGHASH(hashCode), bucketCount, modFunctionIndex);
  328. }
  329. uint GetBucket(hash_t hashCode) const
  330. {
  331. return GetBucket(hashCode, this->bucketCount, modFunctionIndex);
  332. }
  333. static bool IsFreeEntry(const EntryType &entry)
  334. {
  335. // A free entry's next index will be (FreeListSentinel - nextIndex), such that it is always <= FreeListSentinel, for fast entry iteration
  336. // allowing for skipping over free entries. -1 is reserved for the end-of-chain marker for a used entry.
  337. return entry.next <= FreeListSentinel;
  338. }
  339. void SetNextFreeEntryIndex(EntryType &freeEntry, const int nextFreeEntryIndex)
  340. {
  341. Assert(!IsFreeEntry(freeEntry));
  342. Assert(nextFreeEntryIndex >= -1);
  343. Assert(nextFreeEntryIndex < count);
  344. // The last entry in the free list chain will have a next of FreeListSentinel to indicate that it is a free entry. The end of the
  345. // free list chain is identified using freeCount.
  346. freeEntry.next = nextFreeEntryIndex >= 0 ? FreeListSentinel - nextFreeEntryIndex : FreeListSentinel;
  347. }
  348. static int GetNextFreeEntryIndex(const EntryType &freeEntry)
  349. {
  350. Assert(IsFreeEntry(freeEntry));
  351. return FreeListSentinel - freeEntry.next;
  352. }
  353. template <typename LookupType>
  354. inline int FindEntryWithKey(const LookupType& key) const
  355. {
  356. int * localBuckets = buckets;
  357. if (localBuckets != nullptr)
  358. {
  359. hash_t hashCode = GetHashCodeWithKey<LookupType>(key);
  360. uint targetBucket = this->GetBucket(hashCode);
  361. EntryType * localEntries = entries;
  362. for (int i = localBuckets[targetBucket]; i >= 0; i = localEntries[i].next)
  363. {
  364. if (localEntries[i].template KeyEquals<Comparer<TKey>>(key, hashCode))
  365. {
  366. return i;
  367. }
  368. }
  369. }
  370. return -1;
  371. }
  372. inline int FindEntry(const TKey& key) const
  373. {
  374. return FindEntryWithKey<TKey>(key);
  375. }
  376. template <typename LookupType>
  377. inline bool FindEntryWithKey(const LookupType& key, int *const i, int *const last, uint *const targetBucket)
  378. {
  379. int * localBuckets = buckets;
  380. if (localBuckets != nullptr)
  381. {
  382. hash_t hashCode = GetHashCodeWithKey<LookupType>(key);
  383. *targetBucket = this->GetBucket(hashCode);
  384. *last = -1;
  385. EntryType * localEntries = entries;
  386. for (*i = localBuckets[*targetBucket]; *i >= 0; *last = *i, *i = localEntries[*i].next)
  387. {
  388. if (localEntries[*i].template KeyEquals<Comparer<TKey>>(key, hashCode))
  389. {
  390. return true;
  391. }
  392. }
  393. }
  394. return false;
  395. }
  396. template<class Fn>
  397. void MapAndRemoveIf(Fn fn)
  398. {
  399. for (uint i = 0; i < bucketCount; i++)
  400. {
  401. if (buckets[i] != -1)
  402. {
  403. for (int currentIndex = buckets[i], lastIndex = -1; currentIndex != -1;)
  404. {
  405. // If the predicate says we should remove this item
  406. if (fn(entries[currentIndex], values[currentIndex]) == true)
  407. {
  408. const int nextIndex = entries[currentIndex].next;
  409. RemoveAt(currentIndex, lastIndex, i);
  410. currentIndex = nextIndex;
  411. }
  412. else
  413. {
  414. lastIndex = currentIndex;
  415. currentIndex = entries[currentIndex].next;
  416. }
  417. }
  418. }
  419. }
  420. }
  421. void Initialize(int capacity)
  422. {
  423. // minimum capacity is 4
  424. int initSize = max(capacity, 4);
  425. int modIndex = UNKNOWN_MOD_INDEX;
  426. uint initBucketCount = SizePolicy::GetBucketSize(initSize, &modIndex);
  427. AssertMsg(initBucketCount > 0, "Size returned by policy should be greater than 0");
  428. int* newBuckets = nullptr;
  429. EntryType* newEntries = nullptr;
  430. ValueType* newValues = nullptr;
  431. Allocate(&newBuckets, &newEntries, &newValues, initBucketCount, initSize);
  432. // Allocation can throw - assign only after allocation has succeeded.
  433. this->buckets = newBuckets;
  434. this->entries = newEntries;
  435. this->values = newValues;
  436. this->bucketCount = initBucketCount;
  437. this->size = initSize;
  438. this->modFunctionIndex = modIndex;
  439. Assert(this->freeCount == 0);
  440. }
  441. template <InsertOperations op>
  442. int Insert(const TKey& key, const TValue& value)
  443. {
  444. int * localBuckets = buckets;
  445. if (localBuckets == nullptr)
  446. {
  447. Initialize(0);
  448. localBuckets = buckets;
  449. }
  450. #if DBG
  451. // Always search and verify
  452. const bool needSearch = true;
  453. #else
  454. const bool needSearch = (op != Insert_Add);
  455. #endif
  456. hash_t hashCode = GetHashCode(key);
  457. uint targetBucket = this->GetBucket(hashCode);
  458. if (needSearch)
  459. {
  460. EntryType * localEntries = entries;
  461. for (int i = localBuckets[targetBucket]; i >= 0; i = localEntries[i].next)
  462. {
  463. if (localEntries[i].template KeyEquals<Comparer<TKey>>(key, hashCode))
  464. {
  465. Assert(op != Insert_Add);
  466. if (op == Insert_Item)
  467. {
  468. values[i] = value;
  469. return i;
  470. }
  471. return -1;
  472. }
  473. }
  474. }
  475. // Ideally we'd do cleanup only if weak references have been collected since the last resize
  476. // but that would require us to use an additional field to store the last recycler cleanup id
  477. // that we saw
  478. // We can add that optimization later if we have to.
  479. if (freeCount == 0 && count == size)
  480. {
  481. this->Cleanup();
  482. }
  483. int index;
  484. if (freeCount != 0)
  485. {
  486. Assert(freeCount > 0);
  487. Assert(freeList >= 0);
  488. Assert(freeList < count);
  489. index = freeList;
  490. freeCount--;
  491. if (freeCount != 0)
  492. {
  493. freeList = GetNextFreeEntryIndex(entries[index]);
  494. }
  495. }
  496. else
  497. {
  498. // If there's nothing free, then in general, we set index to count, and increment count
  499. // If we resize, we also need to recalculate the target
  500. // However, if cleanup is supported, then before resize, we should try and clean up and see
  501. // if something got freed, and if it did, reuse that index
  502. if (count == size)
  503. {
  504. Resize();
  505. targetBucket = this->GetBucket(hashCode);
  506. index = count;
  507. count++;
  508. }
  509. else
  510. {
  511. index = count;
  512. count++;
  513. }
  514. Assert(count <= size);
  515. Assert(index < size);
  516. }
  517. entries[index].Set(key, hashCode);
  518. values[index] = value;
  519. entries[index].next = buckets[targetBucket];
  520. buckets[targetBucket] = index;
  521. return index;
  522. }
  523. void Resize()
  524. {
  525. AutoDoResize autoDoResize(*this);
  526. int newSize = SizePolicy::GetNextSize(count);
  527. int modIndex = UNKNOWN_MOD_INDEX;
  528. uint newBucketCount = SizePolicy::GetBucketSize(newSize, &modIndex);
  529. __analysis_assume(newSize > count);
  530. int* newBuckets = nullptr;
  531. EntryType* newEntries = nullptr;
  532. ValueType* newValues = nullptr;
  533. if (newBucketCount == bucketCount)
  534. {
  535. // no need to rehash
  536. newEntries = AllocateEntries(newSize);
  537. newValues = AllocateValues(newSize);
  538. CopyArray<EntryType>(newEntries, newSize, entries, count);
  539. CopyArray<ValueType>(newValues, newSize, values, count); // TODO: concurrency issues?
  540. this->entries = newEntries;
  541. this->values = newValues;
  542. this->size = newSize;
  543. this->modFunctionIndex = modIndex;
  544. return;
  545. }
  546. Allocate(&newBuckets, &newEntries, &newValues, newBucketCount, newSize);
  547. CopyArray<EntryType>(newEntries, newSize, entries, count);
  548. CopyArray<ValueType>(newValues, newSize, values, count); // TODO: concurrency issues?
  549. // When TAllocator is of type Recycler, it is possible that the Allocate above causes a collection, which
  550. // in turn can cause entries in the dictionary to be removed - i.e. the dictionary contains weak references
  551. // that remove themselves when no longer valid. This means the free list might not be empty anymore.
  552. this->modFunctionIndex = modIndex;
  553. for (int i = 0; i < count; i++)
  554. {
  555. __analysis_assume(i < newSize);
  556. if (!IsFreeEntry(newEntries[i]))
  557. {
  558. hash_t hashCode = newEntries[i].template GetHashCode<Comparer<TKey>>();
  559. int bucket = GetBucket(hashCode, newBucketCount, modFunctionIndex);
  560. newEntries[i].next = newBuckets[bucket];
  561. newBuckets[bucket] = i;
  562. }
  563. }
  564. this->buckets = newBuckets;
  565. this->entries = newEntries;
  566. this->values = newValues;
  567. bucketCount = newBucketCount;
  568. size = newSize;
  569. }
  570. __ecount(bucketCount) int *AllocateBuckets(DECLSPEC_GUARD_OVERFLOW const uint bucketCount)
  571. {
  572. return
  573. AllocateArray<AllocType, int, false>(
  574. TRACK_ALLOC_INFO(alloc, int, AllocType, 0, bucketCount),
  575. TypeAllocatorFunc<AllocType, int>::GetAllocFunc(),
  576. bucketCount);
  577. }
  578. __ecount(size) EntryType * AllocateEntries(DECLSPEC_GUARD_OVERFLOW int size, const bool zeroAllocate = true)
  579. {
  580. // Note that the choice of leaf/non-leaf node is decided for the EntryType on the basis of TValue. By default, if
  581. // TValue is a pointer, a non-leaf allocation is done. This behavior can be overridden by specializing
  582. // TypeAllocatorFunc for TValue.
  583. return
  584. AllocateArray<Recycler, EntryType, false>(
  585. TRACK_ALLOC_INFO(alloc, EntryType, Recycler, 0, size),
  586. zeroAllocate ? EntryAllocatorFuncType::GetAllocZeroFunc() : EntryAllocatorFuncType::GetAllocFunc(),
  587. size);
  588. }
  589. __ecount(size) ValueType * AllocateValues(DECLSPEC_GUARD_OVERFLOW int size)
  590. {
  591. return alloc->CreateWeakReferenceRegion<TValue>(size);
  592. }
  593. void Allocate(__deref_out_ecount(bucketCount) int** ppBuckets, __deref_out_ecount(size) EntryType** ppEntries, __deref_out_ecount(size) ValueType** ppValues, DECLSPEC_GUARD_OVERFLOW uint bucketCount, DECLSPEC_GUARD_OVERFLOW int size)
  594. {
  595. int *const buckets = AllocateBuckets(bucketCount);
  596. Assert(buckets); // no-throw allocators are currently not supported
  597. EntryType *entries;
  598. entries = AllocateEntries(size);
  599. Assert(entries); // no-throw allocators are currently not supported
  600. ValueType * values = AllocateValues(size);
  601. memset(buckets, -1, bucketCount * sizeof(buckets[0]));
  602. *ppBuckets = buckets;
  603. *ppEntries = entries;
  604. *ppValues = values;
  605. }
  606. inline void RemoveAt(const int i, const int last, const uint targetBucket)
  607. {
  608. if (last < 0)
  609. {
  610. buckets[targetBucket] = entries[i].next;
  611. }
  612. else
  613. {
  614. entries[last].next = entries[i].next;
  615. }
  616. entries[i].Clear();
  617. SetNextFreeEntryIndex(entries[i], freeCount == 0 ? -1 : freeList);
  618. freeList = i;
  619. freeCount++;
  620. }
  621. };
  622. #endif
  623. };