1/*
2 * Copyright (C) 2016-2018 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#include "ExceptionHelpers.h"
29#include "JSObject.h"
30
31namespace JSC {
32
33JS_EXPORT_PRIVATE const ClassInfo* getHashMapBucketKeyClassInfo();
34JS_EXPORT_PRIVATE const ClassInfo* getHashMapBucketKeyValueClassInfo();
35JS_EXPORT_PRIVATE const ClassInfo* getHashMapImplKeyClassInfo();
36JS_EXPORT_PRIVATE const ClassInfo* getHashMapImplKeyValueClassInfo();
37
38enum class HashTableType {
39 Key,
40 KeyValue
41};
42
43struct HashMapBucketDataKey {
44 static const HashTableType Type = HashTableType::Key;
45 WriteBarrier<Unknown> key;
46};
47
48struct HashMapBucketDataKeyValue {
49 static const HashTableType Type = HashTableType::KeyValue;
50 WriteBarrier<Unknown> key;
51 WriteBarrier<Unknown> value;
52};
53
54template <typename Data>
55class HashMapBucket : public JSCell {
56 typedef JSCell Base;
57
58 template <typename T = Data>
59 static typename std::enable_if<std::is_same<T, HashMapBucketDataKey>::value, Structure*>::type selectStructure(VM& vm)
60 {
61 return vm.hashMapBucketSetStructure.get();
62 }
63
64 template <typename T = Data>
65 static typename std::enable_if<std::is_same<T, HashMapBucketDataKeyValue>::value, Structure*>::type selectStructure(VM& vm)
66 {
67 return vm.hashMapBucketMapStructure.get();
68 }
69
70public:
71 static const HashTableType Type = Data::Type;
72 static const ClassInfo s_info; // This is never accessed directly, since that would break linkage on some compilers.
73
74
75 static const ClassInfo* info()
76 {
77 switch (Type) {
78 case HashTableType::Key:
79 return getHashMapBucketKeyClassInfo();
80 case HashTableType::KeyValue:
81 return getHashMapBucketKeyValueClassInfo();
82 }
83 RELEASE_ASSERT_NOT_REACHED();
84 }
85
86 static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
87 {
88 return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
89 }
90
91 static HashMapBucket* create(VM& vm)
92 {
93 HashMapBucket* bucket = new (NotNull, allocateCell<HashMapBucket<Data>>(vm.heap)) HashMapBucket(vm, selectStructure(vm));
94 bucket->finishCreation(vm);
95 ASSERT(!bucket->next());
96 ASSERT(!bucket->prev());
97 return bucket;
98 }
99
100 static HashMapBucket* createSentinel(VM& vm)
101 {
102 auto* bucket = create(vm);
103 bucket->setKey(vm, jsUndefined());
104 bucket->setValue(vm, jsUndefined());
105 ASSERT(!bucket->deleted());
106 return bucket;
107 }
108
109 HashMapBucket(VM& vm, Structure* structure)
110 : Base(vm, structure)
111 {
112 ASSERT(deleted());
113 }
114
115 ALWAYS_INLINE void setNext(VM& vm, HashMapBucket* bucket)
116 {
117 m_next.set(vm, this, bucket);
118 }
119 ALWAYS_INLINE void setPrev(VM& vm, HashMapBucket* bucket)
120 {
121 m_prev.set(vm, this, bucket);
122 }
123
124 ALWAYS_INLINE void setKey(VM& vm, JSValue key)
125 {
126 m_data.key.set(vm, this, key);
127 }
128
129 template <typename T = Data>
130 ALWAYS_INLINE typename std::enable_if<std::is_same<T, HashMapBucketDataKeyValue>::value>::type setValue(VM& vm, JSValue value)
131 {
132 m_data.value.set(vm, this, value);
133 }
134 template <typename T = Data>
135 ALWAYS_INLINE typename std::enable_if<std::is_same<T, HashMapBucketDataKey>::value>::type setValue(VM&, JSValue) { }
136
137 ALWAYS_INLINE JSValue key() const { return m_data.key.get(); }
138
139 template <typename T = Data>
140 ALWAYS_INLINE typename std::enable_if<std::is_same<T, HashMapBucketDataKeyValue>::value, JSValue>::type value() const
141 {
142 return m_data.value.get();
143 }
144
145 static void visitChildren(JSCell*, SlotVisitor&);
146
147 ALWAYS_INLINE HashMapBucket* next() const { return m_next.get(); }
148 ALWAYS_INLINE HashMapBucket* prev() const { return m_prev.get(); }
149
150 ALWAYS_INLINE bool deleted() const { return !key(); }
151 ALWAYS_INLINE void makeDeleted(VM& vm)
152 {
153 setKey(vm, JSValue());
154 setValue(vm, JSValue());
155 }
156
157 static ptrdiff_t offsetOfKey()
158 {
159 return OBJECT_OFFSETOF(HashMapBucket, m_data) + OBJECT_OFFSETOF(Data, key);
160 }
161
162 template <typename T = Data>
163 static typename std::enable_if<std::is_same<T, HashMapBucketDataKeyValue>::value, ptrdiff_t>::type offsetOfValue()
164 {
165 return OBJECT_OFFSETOF(HashMapBucket, m_data) + OBJECT_OFFSETOF(Data, value);
166 }
167
168 static ptrdiff_t offsetOfNext()
169 {
170 return OBJECT_OFFSETOF(HashMapBucket, m_next);
171 }
172
173 template <typename T = Data>
174 ALWAYS_INLINE static typename std::enable_if<std::is_same<T, HashMapBucketDataKeyValue>::value, JSValue>::type extractValue(const HashMapBucket& bucket)
175 {
176 return bucket.value();
177 }
178
179 template <typename T = Data>
180 ALWAYS_INLINE static typename std::enable_if<std::is_same<T, HashMapBucketDataKey>::value, JSValue>::type extractValue(const HashMapBucket&)
181 {
182 return JSValue();
183 }
184
185private:
186 WriteBarrier<HashMapBucket> m_next;
187 WriteBarrier<HashMapBucket> m_prev;
188 Data m_data;
189};
190
191template <typename BucketType>
192class HashMapBuffer {
193public:
194 HashMapBuffer() = delete;
195
196 static size_t allocationSize(Checked<size_t> capacity)
197 {
198 return (capacity * sizeof(BucketType*)).unsafeGet();
199 }
200
201 ALWAYS_INLINE BucketType** buffer() const
202 {
203 return bitwise_cast<BucketType**>(this);
204 }
205
206 static HashMapBuffer* create(ExecState* exec, VM& vm, JSCell*, uint32_t capacity)
207 {
208 auto scope = DECLARE_THROW_SCOPE(vm);
209 size_t allocationSize = HashMapBuffer::allocationSize(capacity);
210 void* data = vm.jsValueGigacageAuxiliarySpace.allocateNonVirtual(vm, allocationSize, nullptr, AllocationFailureMode::ReturnNull);
211 if (!data) {
212 throwOutOfMemoryError(exec, scope);
213 return nullptr;
214 }
215
216 HashMapBuffer* buffer = static_cast<HashMapBuffer*>(data);
217 buffer->reset(capacity);
218 return buffer;
219 }
220
221 ALWAYS_INLINE void reset(uint32_t capacity)
222 {
223 memset(this, -1, allocationSize(capacity));
224 }
225};
226
227ALWAYS_INLINE static bool areKeysEqual(ExecState* exec, JSValue a, JSValue b)
228{
229 // We want +0 and -0 to be compared to true here. sameValue() itself doesn't
230 // guarantee that, however, we normalize all keys before comparing and storing
231 // them in the map. The normalization will convert -0.0 and 0.0 to the integer
232 // representation for 0.
233 return sameValue(exec, a, b);
234}
235
236// Note that normalization is inlined in DFG's NormalizeMapKey.
237// Keep in sync with the implementation of DFG and FTL normalization.
238ALWAYS_INLINE JSValue normalizeMapKey(JSValue key)
239{
240 if (!key.isNumber())
241 return key;
242
243 if (key.isInt32())
244 return key;
245
246 double d = key.asDouble();
247 if (std::isnan(d))
248 return key;
249
250 int i = static_cast<int>(d);
251 if (i == d) {
252 // When a key is -0, we convert it to positive zero.
253 // When a key is the double representation for an integer, we convert it to an integer.
254 return jsNumber(i);
255 }
256 // This means key is definitely not negative zero, and it's definitely not a double representation of an integer.
257 return key;
258}
259
260static ALWAYS_INLINE uint32_t wangsInt64Hash(uint64_t key)
261{
262 key += ~(key << 32);
263 key ^= (key >> 22);
264 key += ~(key << 13);
265 key ^= (key >> 8);
266 key += (key << 3);
267 key ^= (key >> 15);
268 key += ~(key << 27);
269 key ^= (key >> 31);
270 return static_cast<unsigned>(key);
271}
272
273ALWAYS_INLINE uint32_t jsMapHash(ExecState* exec, VM& vm, JSValue value)
274{
275 ASSERT_WITH_MESSAGE(normalizeMapKey(value) == value, "We expect normalized values flowing into this function.");
276
277 if (value.isString()) {
278 auto scope = DECLARE_THROW_SCOPE(vm);
279 const String& wtfString = asString(value)->value(exec);
280 RETURN_IF_EXCEPTION(scope, UINT_MAX);
281 return wtfString.impl()->hash();
282 }
283
284 return wangsInt64Hash(JSValue::encode(value));
285}
286
287ALWAYS_INLINE Optional<uint32_t> concurrentJSMapHash(JSValue key)
288{
289 key = normalizeMapKey(key);
290 if (key.isString()) {
291 JSString* string = asString(key);
292 if (string->length() > 10 * 1024)
293 return WTF::nullopt;
294 const StringImpl* impl = string->tryGetValueImpl();
295 if (!impl)
296 return WTF::nullopt;
297 return impl->concurrentHash();
298 }
299
300 uint64_t rawValue = JSValue::encode(key);
301 return wangsInt64Hash(rawValue);
302}
303
304ALWAYS_INLINE uint32_t shouldShrink(uint32_t capacity, uint32_t keyCount)
305{
306 return 8 * keyCount <= capacity && capacity > 4;
307}
308
309ALWAYS_INLINE uint32_t shouldRehashAfterAdd(uint32_t capacity, uint32_t keyCount, uint32_t deleteCount)
310{
311 return 2 * (keyCount + deleteCount) >= capacity;
312}
313
314ALWAYS_INLINE uint32_t nextCapacity(uint32_t capacity, uint32_t keyCount)
315{
316 if (shouldShrink(capacity, keyCount)) {
317 ASSERT((capacity / 2) >= 4);
318 return capacity / 2;
319 }
320
321 if (3 * keyCount <= capacity && capacity > 64) {
322 // We stay at the same size if rehashing would cause us to be no more than
323 // 1/3rd full. This comes up for programs like this:
324 // Say the hash table grew to a key count of 64, causing it to grow to a capacity of 256.
325 // Then, the table added 63 items. The load is now 127. Then, 63 items are deleted.
326 // The load is still 127. Then, another item is added. The load is now 128, and we
327 // decide that we need to rehash. The key count is 65, almost exactly what it was
328 // when we grew to a capacity of 256. We don't really need to grow to a capacity
329 // of 512 in this situation. Instead, we choose to rehash at the same size. This
330 // will bring the load down to 65. We rehash into the same size when we determine
331 // that the new load ratio will be under 1/3rd. (We also pick a minumum capacity
332 // at which this rule kicks in because otherwise we will be too sensitive to rehashing
333 // at the same capacity).
334 return capacity;
335 }
336 return (Checked<uint32_t>(capacity) * 2).unsafeGet();
337}
338
339template <typename HashMapBucketType>
340class HashMapImpl : public JSNonFinalObject {
341 using Base = JSNonFinalObject;
342 using HashMapBufferType = HashMapBuffer<HashMapBucketType>;
343
344public:
345 using BucketType = HashMapBucketType;
346
347 static void visitChildren(JSCell*, SlotVisitor&);
348
349 static size_t estimatedSize(JSCell*, VM&);
350
351 HashMapImpl(VM& vm, Structure* structure)
352 : Base(vm, structure)
353 , m_keyCount(0)
354 , m_deleteCount(0)
355 , m_capacity(4)
356 {
357 }
358
359 HashMapImpl(VM& vm, Structure* structure, uint32_t sizeHint)
360 : Base(vm, structure)
361 , m_keyCount(0)
362 , m_deleteCount(0)
363 {
364 uint32_t capacity = ((Checked<uint32_t>(sizeHint) * 2) + 1).unsafeGet();
365 capacity = std::max<uint32_t>(WTF::roundUpToPowerOfTwo(capacity), 4U);
366 m_capacity = capacity;
367 }
368
369 ALWAYS_INLINE HashMapBucketType** buffer() const
370 {
371 return m_buffer->buffer();
372 }
373
374 void finishCreation(ExecState* exec, VM& vm)
375 {
376 ASSERT_WITH_MESSAGE(HashMapBucket<HashMapBucketDataKey>::offsetOfKey() == HashMapBucket<HashMapBucketDataKeyValue>::offsetOfKey(), "We assume this to be true in the DFG and FTL JIT.");
377
378 auto scope = DECLARE_THROW_SCOPE(vm);
379 Base::finishCreation(vm);
380
381 makeAndSetNewBuffer(exec, vm);
382 RETURN_IF_EXCEPTION(scope, void());
383
384 setUpHeadAndTail(exec, vm);
385 }
386
387 void finishCreation(ExecState* exec, VM& vm, HashMapImpl* base)
388 {
389 auto scope = DECLARE_THROW_SCOPE(vm);
390 Base::finishCreation(vm);
391
392 // This size should be the same to the case when you clone the map by calling add() repeatedly.
393 uint32_t capacity = ((Checked<uint32_t>(base->m_keyCount) * 2) + 1).unsafeGet();
394 RELEASE_ASSERT(capacity <= (1U << 31));
395 capacity = std::max<uint32_t>(WTF::roundUpToPowerOfTwo(capacity), 4U);
396 m_capacity = capacity;
397 makeAndSetNewBuffer(exec, vm);
398 RETURN_IF_EXCEPTION(scope, void());
399
400 setUpHeadAndTail(exec, vm);
401
402 HashMapBucketType* bucket = base->m_head.get()->next();
403 while (bucket) {
404 if (!bucket->deleted()) {
405 addNormalizedNonExistingForCloning(exec, bucket->key(), HashMapBucketType::extractValue(*bucket));
406 RETURN_IF_EXCEPTION(scope, void());
407 }
408 bucket = bucket->next();
409 }
410 checkConsistency();
411 }
412
413 static HashMapBucketType* emptyValue()
414 {
415 return bitwise_cast<HashMapBucketType*>(static_cast<uintptr_t>(-1));
416 }
417
418 static ALWAYS_INLINE bool isEmpty(HashMapBucketType* bucket)
419 {
420 return bucket == emptyValue();
421 }
422
423 static HashMapBucketType* deletedValue()
424 {
425 return bitwise_cast<HashMapBucketType*>(static_cast<uintptr_t>(-3));
426 }
427
428 static ALWAYS_INLINE bool isDeleted(HashMapBucketType* bucket)
429 {
430 return bucket == deletedValue();
431 }
432
433 ALWAYS_INLINE HashMapBucketType** findBucket(ExecState* exec, JSValue key)
434 {
435 VM& vm = exec->vm();
436 auto scope = DECLARE_THROW_SCOPE(vm);
437 key = normalizeMapKey(key);
438 uint32_t hash = jsMapHash(exec, vm, key);
439 RETURN_IF_EXCEPTION(scope, nullptr);
440 return findBucket(exec, key, hash);
441 }
442
443 ALWAYS_INLINE HashMapBucketType** findBucket(ExecState* exec, JSValue key, uint32_t hash)
444 {
445 ASSERT_WITH_MESSAGE(normalizeMapKey(key) == key, "We expect normalized values flowing into this function.");
446 return findBucketAlreadyHashedAndNormalized(exec, key, hash);
447 }
448
449 template <typename T = HashMapBucketType>
450 ALWAYS_INLINE typename std::enable_if<std::is_same<T, HashMapBucket<HashMapBucketDataKeyValue>>::value, JSValue>::type get(ExecState* exec, JSValue key)
451 {
452 if (HashMapBucketType** bucket = findBucket(exec, key))
453 return (*bucket)->value();
454 return jsUndefined();
455 }
456
457 ALWAYS_INLINE bool has(ExecState* exec, JSValue key)
458 {
459 return !!findBucket(exec, key);
460 }
461
462 ALWAYS_INLINE void add(ExecState* exec, JSValue key, JSValue value = JSValue())
463 {
464 key = normalizeMapKey(key);
465 addNormalizedInternal(exec, key, value, [&] (HashMapBucketType* bucket) {
466 return !isDeleted(bucket) && areKeysEqual(exec, key, bucket->key());
467 });
468 if (shouldRehashAfterAdd())
469 rehash(exec);
470 }
471
472 ALWAYS_INLINE HashMapBucketType* addNormalized(ExecState* exec, JSValue key, JSValue value, uint32_t hash)
473 {
474 ASSERT_WITH_MESSAGE(normalizeMapKey(key) == key, "We expect normalized values flowing into this function.");
475 ASSERT_WITH_MESSAGE(jsMapHash(exec, exec->vm(), key) == hash, "We expect hash value is what we expect.");
476
477 auto* bucket = addNormalizedInternal(exec->vm(), key, value, hash, [&] (HashMapBucketType* bucket) {
478 return !isDeleted(bucket) && areKeysEqual(exec, key, bucket->key());
479 });
480 if (shouldRehashAfterAdd())
481 rehash(exec);
482 return bucket;
483 }
484
485 ALWAYS_INLINE bool remove(ExecState* exec, JSValue key)
486 {
487 HashMapBucketType** bucket = findBucket(exec, key);
488 if (!bucket)
489 return false;
490
491 VM& vm = exec->vm();
492 HashMapBucketType* impl = *bucket;
493 impl->next()->setPrev(vm, impl->prev());
494 impl->prev()->setNext(vm, impl->next());
495 impl->makeDeleted(vm);
496
497 *bucket = deletedValue();
498
499 ++m_deleteCount;
500 ASSERT(m_keyCount > 0);
501 --m_keyCount;
502
503 if (shouldShrink())
504 rehash(exec);
505
506 return true;
507 }
508
509 ALWAYS_INLINE uint32_t size() const
510 {
511 return m_keyCount;
512 }
513
514 ALWAYS_INLINE void clear(ExecState* exec)
515 {
516 VM& vm = exec->vm();
517 m_keyCount = 0;
518 m_deleteCount = 0;
519 HashMapBucketType* head = m_head.get();
520 HashMapBucketType* bucket = m_head->next();
521 HashMapBucketType* tail = m_tail.get();
522 while (bucket != tail) {
523 HashMapBucketType* next = bucket->next();
524 // We restart each iterator by pointing it to the head of the list.
525 bucket->setNext(vm, head);
526 bucket->makeDeleted(vm);
527 bucket = next;
528 }
529 m_head->setNext(vm, m_tail.get());
530 m_tail->setPrev(vm, m_head.get());
531 m_capacity = 4;
532 makeAndSetNewBuffer(exec, vm);
533 checkConsistency();
534 }
535
536 ALWAYS_INLINE size_t bufferSizeInBytes() const
537 {
538 return m_capacity * sizeof(HashMapBucketType*);
539 }
540
541 static ptrdiff_t offsetOfHead()
542 {
543 return OBJECT_OFFSETOF(HashMapImpl<HashMapBucketType>, m_head);
544 }
545
546 static ptrdiff_t offsetOfBuffer()
547 {
548 return OBJECT_OFFSETOF(HashMapImpl<HashMapBucketType>, m_buffer);
549 }
550
551 static ptrdiff_t offsetOfCapacity()
552 {
553 return OBJECT_OFFSETOF(HashMapImpl<HashMapBucketType>, m_capacity);
554 }
555
556 HashMapBucketType* head() { return m_head.get(); }
557 HashMapBucketType* tail() { return m_tail.get(); }
558
559 size_t approximateSize() const
560 {
561 size_t size = sizeof(HashMapImpl);
562 size += bufferSizeInBytes();
563 size += 2 * sizeof(HashMapBucketType); // Head and tail members.
564 size += m_keyCount * sizeof(HashMapBucketType); // Number of members that are on the list.
565 return size;
566 }
567
568private:
569 ALWAYS_INLINE uint32_t shouldRehashAfterAdd() const
570 {
571 return JSC::shouldRehashAfterAdd(m_capacity, m_keyCount, m_deleteCount);
572 }
573
574 ALWAYS_INLINE uint32_t shouldShrink() const
575 {
576 return JSC::shouldShrink(m_capacity, m_keyCount);
577 }
578
579 ALWAYS_INLINE void setUpHeadAndTail(ExecState*, VM& vm)
580 {
581 m_head.set(vm, this, HashMapBucketType::create(vm));
582 m_tail.set(vm, this, HashMapBucketType::create(vm));
583
584 m_head->setNext(vm, m_tail.get());
585 m_tail->setPrev(vm, m_head.get());
586 ASSERT(m_head->deleted());
587 ASSERT(m_tail->deleted());
588 }
589
590 ALWAYS_INLINE void addNormalizedNonExistingForCloning(ExecState* exec, JSValue key, JSValue value = JSValue())
591 {
592 addNormalizedInternal(exec, key, value, [&] (HashMapBucketType*) {
593 return false;
594 });
595 }
596
597 template<typename CanUseBucket>
598 ALWAYS_INLINE void addNormalizedInternal(ExecState* exec, JSValue key, JSValue value, const CanUseBucket& canUseBucket)
599 {
600 VM& vm = exec->vm();
601 auto scope = DECLARE_THROW_SCOPE(vm);
602
603 uint32_t hash = jsMapHash(exec, vm, key);
604 RETURN_IF_EXCEPTION(scope, void());
605 scope.release();
606 addNormalizedInternal(vm, key, value, hash, canUseBucket);
607 }
608
609 template<typename CanUseBucket>
610 ALWAYS_INLINE HashMapBucketType* addNormalizedInternal(VM& vm, JSValue key, JSValue value, uint32_t hash, const CanUseBucket& canUseBucket)
611 {
612 ASSERT_WITH_MESSAGE(normalizeMapKey(key) == key, "We expect normalized values flowing into this function.");
613
614 const uint32_t mask = m_capacity - 1;
615 uint32_t index = hash & mask;
616 HashMapBucketType** buffer = this->buffer();
617 HashMapBucketType* bucket = buffer[index];
618 while (!isEmpty(bucket)) {
619 if (canUseBucket(bucket)) {
620 bucket->setValue(vm, value);
621 return bucket;
622 }
623 index = (index + 1) & mask;
624 bucket = buffer[index];
625 }
626
627 HashMapBucketType* newEntry = m_tail.get();
628 buffer[index] = newEntry;
629 newEntry->setKey(vm, key);
630 newEntry->setValue(vm, value);
631 ASSERT(!newEntry->deleted());
632 HashMapBucketType* newTail = HashMapBucketType::create(vm);
633 m_tail.set(vm, this, newTail);
634 newTail->setPrev(vm, newEntry);
635 ASSERT(newTail->deleted());
636 newEntry->setNext(vm, newTail);
637
638 ++m_keyCount;
639 return newEntry;
640 }
641
642 ALWAYS_INLINE HashMapBucketType** findBucketAlreadyHashedAndNormalized(ExecState* exec, JSValue key, uint32_t hash)
643 {
644 const uint32_t mask = m_capacity - 1;
645 uint32_t index = hash & mask;
646 HashMapBucketType** buffer = this->buffer();
647 HashMapBucketType* bucket = buffer[index];
648
649 while (!isEmpty(bucket)) {
650 if (!isDeleted(bucket) && areKeysEqual(exec, key, bucket->key()))
651 return buffer + index;
652 index = (index + 1) & mask;
653 bucket = buffer[index];
654 }
655 return nullptr;
656 }
657
658 void rehash(ExecState* exec)
659 {
660 VM& vm = exec->vm();
661 auto scope = DECLARE_THROW_SCOPE(vm);
662
663 uint32_t oldCapacity = m_capacity;
664 m_capacity = nextCapacity(m_capacity, m_keyCount);
665
666 if (m_capacity != oldCapacity) {
667 makeAndSetNewBuffer(exec, vm);
668 RETURN_IF_EXCEPTION(scope, void());
669 } else {
670 m_buffer->reset(m_capacity);
671 assertBufferIsEmpty();
672 }
673
674 HashMapBucketType* iter = m_head->next();
675 HashMapBucketType* end = m_tail.get();
676 const uint32_t mask = m_capacity - 1;
677 RELEASE_ASSERT(!(m_capacity & (m_capacity - 1)));
678 HashMapBucketType** buffer = this->buffer();
679 while (iter != end) {
680 uint32_t index = jsMapHash(exec, vm, iter->key()) & mask;
681 EXCEPTION_ASSERT_WITH_MESSAGE(!scope.exception(), "All keys should already be hashed before, so this should not throw because it won't resolve ropes.");
682 {
683 HashMapBucketType* bucket = buffer[index];
684 while (!isEmpty(bucket)) {
685 index = (index + 1) & mask;
686 bucket = buffer[index];
687 }
688 }
689 buffer[index] = iter;
690 iter = iter->next();
691 }
692
693 m_deleteCount = 0;
694
695 checkConsistency();
696 }
697
698 ALWAYS_INLINE void checkConsistency() const
699 {
700 if (!ASSERT_DISABLED) {
701 HashMapBucketType* iter = m_head->next();
702 HashMapBucketType* end = m_tail.get();
703 uint32_t size = 0;
704 while (iter != end) {
705 ++size;
706 iter = iter->next();
707 }
708 ASSERT(size == m_keyCount);
709 }
710 }
711
712 void makeAndSetNewBuffer(ExecState* exec, VM& vm)
713 {
714 ASSERT(!(m_capacity & (m_capacity - 1)));
715
716 HashMapBufferType* buffer = HashMapBufferType::create(exec, vm, this, m_capacity);
717 if (UNLIKELY(!buffer))
718 return;
719
720 m_buffer.set(vm, this, buffer);
721 assertBufferIsEmpty();
722 }
723
724 ALWAYS_INLINE void assertBufferIsEmpty() const
725 {
726 if (!ASSERT_DISABLED) {
727 for (unsigned i = 0; i < m_capacity; i++)
728 ASSERT(isEmpty(buffer()[i]));
729 }
730 }
731
732 WriteBarrier<HashMapBucketType> m_head;
733 WriteBarrier<HashMapBucketType> m_tail;
734 AuxiliaryBarrier<HashMapBufferType*> m_buffer;
735 uint32_t m_keyCount;
736 uint32_t m_deleteCount;
737 uint32_t m_capacity;
738};
739
740} // namespace JSC
741