diff --git a/common.gypi b/common.gypi index 5e06df763c6942..c426352cefc787 100644 --- a/common.gypi +++ b/common.gypi @@ -33,7 +33,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.49', + 'v8_embedder_string': '-node.50', # Enable disassembler for `--print-code` v8 options 'v8_enable_disassembler': 1, diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc index d73e3235c158df..657aa9212a6153 100644 --- a/deps/v8/src/heap/store-buffer.cc +++ b/deps/v8/src/heap/store-buffer.cc @@ -30,22 +30,28 @@ StoreBuffer::StoreBuffer(Heap* heap) } void StoreBuffer::SetUp() { - // Allocate 3x the buffer size, so that we can start the new store buffer - // aligned to 2x the size. This lets us use a bit test to detect the end of - // the area. + const size_t requested_size = kStoreBufferSize * kStoreBuffers; + // Allocate buffer memory aligned at least to kStoreBufferSize. This lets us + // use a bit test to detect the ends of the buffers. + const size_t alignment = + std::max(kStoreBufferSize, AllocatePageSize()); + void* hint = AlignedAddress(heap_->GetRandomMmapAddr(), alignment); VirtualMemory reservation; - if (!AllocVirtualMemory(kStoreBufferSize * 3, heap_->GetRandomMmapAddr(), - &reservation)) { + if (!AlignedAllocVirtualMemory(requested_size, alignment, hint, + &reservation)) { heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp"); } + Address start = reservation.address(); - start_[0] = reinterpret_cast(::RoundUp(start, kStoreBufferSize)); + const size_t allocated_size = reservation.size(); + + start_[0] = reinterpret_cast(start); limit_[0] = start_[0] + (kStoreBufferSize / kPointerSize); start_[1] = limit_[0]; limit_[1] = start_[1] + (kStoreBufferSize / kPointerSize); - Address* vm_limit = reinterpret_cast(start + reservation.size()); - + // Sanity check the buffers. + Address* vm_limit = reinterpret_cast(start + allocated_size); USE(vm_limit); for (int i = 0; i < kStoreBuffers; i++) { DCHECK(reinterpret_cast
(start_[i]) >= reservation.address()); @@ -55,8 +61,9 @@ void StoreBuffer::SetUp() { DCHECK_EQ(0, reinterpret_cast
(limit_[i]) & kStoreBufferMask); } - if (!reservation.SetPermissions(reinterpret_cast
(start_[0]), - kStoreBufferSize * kStoreBuffers, + // Set RW permissions only on the pages we use. + const size_t used_size = RoundUp(requested_size, CommitPageSize()); + if (!reservation.SetPermissions(start, used_size, PageAllocator::kReadWrite)) { heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp"); } @@ -65,7 +72,6 @@ void StoreBuffer::SetUp() { virtual_memory_.TakeControl(&reservation); } - void StoreBuffer::TearDown() { if (virtual_memory_.IsReserved()) virtual_memory_.Free(); top_ = nullptr;