Skip to content

Commit

Permalink
deps: upgrade v8 to 4.2.77.13
Browse files Browse the repository at this point in the history
This commit applies a secondary change in order to make `make test`
pass cleanly, specifically re-disabling post-mortem debugging in
common.gypi.
  • Loading branch information
chrisdickinson committed Apr 25, 2015
1 parent e1eae1c commit 3d57c7b
Show file tree
Hide file tree
Showing 14 changed files with 185 additions and 146 deletions.
5 changes: 3 additions & 2 deletions common.gypi
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,17 @@
# Enable disassembler for `--print-code` v8 options
'v8_enable_disassembler': 1,

# Disable support for postmortem debugging, continuously broken.
'v8_postmortem_support%': 'false',

# Don't bake anything extra into the snapshot.
'v8_use_external_startup_data%': 0,

'conditions': [
['OS == "win"', {
'os_posix': 0,
'v8_postmortem_support%': 'false',
}, {
'os_posix': 1,
'v8_postmortem_support%': 'true',
}],
['GENERATOR == "ninja" or OS== "mac"', {
'OBJ_DIR': '<(PRODUCT_DIR)/obj',
Expand Down
2 changes: 1 addition & 1 deletion deps/v8/AUTHORS
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>
James Pike <g00gle@chilon.net>
Jianghua Yang <jianghua.yjh@alibaba-inc.com>
Joel Stanley <joel.stan@gmail.com>
Joel Stanley <joel@jms.id.au>
Johan Bergström <johan@bergstroem.nu>
Jonathan Liu <net147@gmail.com>
Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
Expand Down
2 changes: 1 addition & 1 deletion deps/v8/include/v8-version.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
#define V8_MAJOR_VERSION 4
#define V8_MINOR_VERSION 2
#define V8_BUILD_NUMBER 77
#define V8_PATCH_LEVEL 15
#define V8_PATCH_LEVEL 18

// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
Expand Down
36 changes: 18 additions & 18 deletions deps/v8/include/v8.h
Original file line number Diff line number Diff line change
Expand Up @@ -932,6 +932,24 @@ class V8_EXPORT EscapableHandleScope : public HandleScope {
internal::Object** escape_slot_;
};

class V8_EXPORT SealHandleScope {
public:
SealHandleScope(Isolate* isolate);
~SealHandleScope();

private:
// Make it hard to create heap-allocated or illegal handle scopes by
// disallowing certain operations.
SealHandleScope(const SealHandleScope&);
void operator=(const SealHandleScope&);
void* operator new(size_t size);
void operator delete(void*, size_t);

internal::Isolate* isolate_;
int prev_level_;
internal::Object** prev_limit_;
};


/**
* A simple Maybe type, representing an object which may or may not have a
Expand Down Expand Up @@ -1004,24 +1022,6 @@ class ScriptOrigin {
Handle<Integer> script_id_;
};

class V8_EXPORT SealHandleScope {
public:
SealHandleScope(Isolate* isolate);
~SealHandleScope();

private:
// Make it hard to create heap-allocated or illegal handle scopes by
// disallowing certain operations.
SealHandleScope(const SealHandleScope&);
void operator=(const SealHandleScope&);
void* operator new(size_t size);
void operator delete(void*, size_t);

internal::Isolate* isolate_;
int prev_level_;
internal::Object** prev_limit_;
};


/**
* A compiled JavaScript script, not yet tied to a Context.
Expand Down
22 changes: 5 additions & 17 deletions deps/v8/src/compiler/x64/instruction-selector-x64.cc
Original file line number Diff line number Diff line change
Expand Up @@ -812,22 +812,12 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Node* value = node->InputAt(0);
if (CanCover(node, value)) {
switch (value->opcode()) {
case IrOpcode::kWord64Sar: {
case IrOpcode::kWord64Sar:
case IrOpcode::kWord64Shr: {
Int64BinopMatcher m(value);
if (m.right().IsInRange(1, 32)) {
if (m.right().Is(32)) {
Emit(kX64Shr, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()),
g.UseImmediate(m.right().node()));
return;
}
break;
}
case IrOpcode::kWord64Shl: {
Int64BinopMatcher m(value);
if (m.right().IsInRange(1, 31)) {
Emit(kX64Shl32, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()),
g.UseImmediate(m.right().node()));
g.UseRegister(m.left().node()), g.TempImmediate(32));
return;
}
break;
Expand All @@ -836,9 +826,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
break;
}
}
// Otherwise truncation from 64-bit to 32-bit is a no-nop, as 32-bit
// operations just ignore the upper 64-bit.
Emit(kArchNop, g.DefineAsRegister(node), g.Use(value));
Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
}


Expand Down
3 changes: 3 additions & 0 deletions deps/v8/src/heap/gc-idle-time-handler.h
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,9 @@ class GCIdleTimeHandler {
// That is the maximum idle time we will have during frame rendering.
static const size_t kMaxFrameRenderingIdleTime = 16;

// Minimum idle time to start incremental marking.
static const size_t kMinIdleTimeToStartIncrementalMarking = 10;

// If we haven't recorded any scavenger events yet, we use a conservative
// lower bound for the scavenger speed.
static const size_t kInitialConservativeScavengeSpeed = 100 * KB;
Expand Down
53 changes: 38 additions & 15 deletions deps/v8/src/heap/heap.cc
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,8 @@ Heap::Heap()
allocation_timeout_(0),
#endif // DEBUG
old_generation_allocation_limit_(initial_old_generation_size_),
idle_old_generation_allocation_limit_(
kMinimumOldGenerationAllocationLimit),
old_gen_exhausted_(false),
inline_allocation_disabled_(false),
store_buffer_rebuilder_(store_buffer()),
Expand Down Expand Up @@ -1159,8 +1161,7 @@ bool Heap::PerformGarbageCollection(
// Temporarily set the limit for case when PostGarbageCollectionProcessing
// allocates and triggers GC. The real limit is set at after
// PostGarbageCollectionProcessing.
old_generation_allocation_limit_ =
OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
old_gen_exhausted_ = false;
old_generation_size_configured_ = true;
} else {
Expand Down Expand Up @@ -1194,8 +1195,8 @@ bool Heap::PerformGarbageCollection(
// Register the amount of external allocated memory.
amount_of_external_allocated_memory_at_last_global_gc_ =
amount_of_external_allocated_memory_;
old_generation_allocation_limit_ = OldGenerationAllocationLimit(
PromotedSpaceSizeOfObjects(), freed_global_handles);
SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
freed_global_handles);
// We finished a marking cycle. We can uncommit the marking deque until
// we start marking again.
mark_compact_collector_.UncommitMarkingDeque();
Expand Down Expand Up @@ -4558,7 +4559,7 @@ bool Heap::TryFinalizeIdleIncrementalMarking(

bool Heap::WorthActivatingIncrementalMarking() {
return incremental_marking()->IsStopped() &&
incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull();
incremental_marking()->ShouldActivate();
}


Expand All @@ -4583,6 +4584,7 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
static_cast<double>(base::Time::kMillisecondsPerSecond);
HistogramTimerScope idle_notification_scope(
isolate_->counters()->gc_idle_notification());
double idle_time_in_ms = deadline_in_ms - MonotonicallyIncreasingTimeInMs();

GCIdleTimeHandler::HeapState heap_state;
heap_state.contexts_disposed = contexts_disposed_;
Expand All @@ -4591,8 +4593,15 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
// TODO(ulan): Start incremental marking only for large heaps.
intptr_t limit = old_generation_allocation_limit_;
if (static_cast<size_t>(idle_time_in_ms) >
GCIdleTimeHandler::kMinIdleTimeToStartIncrementalMarking) {
limit = idle_old_generation_allocation_limit_;
}

heap_state.can_start_incremental_marking =
incremental_marking()->ShouldActivate() && FLAG_incremental_marking;
incremental_marking()->WorthActivating() &&
NextGCIsLikelyToBeFull(limit) && FLAG_incremental_marking;
heap_state.sweeping_in_progress =
mark_compact_collector()->sweeping_in_progress();
heap_state.mark_compact_speed_in_bytes_per_ms =
Expand All @@ -4610,7 +4619,6 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
static_cast<size_t>(
tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());

double idle_time_in_ms = deadline_in_ms - MonotonicallyIncreasingTimeInMs();
GCIdleTimeAction action =
gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
Expand Down Expand Up @@ -5358,21 +5366,37 @@ int64_t Heap::PromotedExternalMemorySize() {
}


intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
int freed_global_handles) {
intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
intptr_t old_gen_size) {
CHECK(factor > 1.0);
CHECK(old_gen_size > 0);
intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
limit = Max(limit, kMinimumOldGenerationAllocationLimit);
limit += new_space_.Capacity();
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
return Min(limit, halfway_to_the_max);
}


void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
int freed_global_handles) {
const int kMaxHandles = 1000;
const int kMinHandles = 100;
double min_factor = 1.1;
const double min_factor = 1.1;
double max_factor = 4;
const double idle_max_factor = 1.5;
// We set the old generation growing factor to 2 to grow the heap slower on
// memory-constrained devices.
if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
max_factor = 2;
}

// If there are many freed global handles, then the next full GC will
// likely collect a lot of garbage. Choose the heap growing factor
// depending on freed global handles.
// TODO(ulan, hpayer): Take into account mutator utilization.
// TODO(hpayer): The idle factor could make the handles heuristic obsolete.
// Look into that.
double factor;
if (freed_global_handles <= kMinHandles) {
factor = max_factor;
Expand All @@ -5391,11 +5415,10 @@ intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
factor = min_factor;
}

intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
limit = Max(limit, kMinimumOldGenerationAllocationLimit);
limit += new_space_.Capacity();
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
return Min(limit, halfway_to_the_max);
old_generation_allocation_limit_ =
CalculateOldGenerationAllocationLimit(factor, old_gen_size);
idle_old_generation_allocation_limit_ = CalculateOldGenerationAllocationLimit(
Min(factor, idle_max_factor), old_gen_size);
}


Expand Down
23 changes: 18 additions & 5 deletions deps/v8/src/heap/heap.h
Original file line number Diff line number Diff line change
Expand Up @@ -628,6 +628,10 @@ class Heap {
// Returns of size of all objects residing in the heap.
intptr_t SizeOfObjects();

intptr_t old_generation_allocation_limit() const {
return old_generation_allocation_limit_;
}

// Return the starting address and a mask for the new space. And-masking an
// address with the mask will result in the start address of the new space
// for all addresses in either semispace.
Expand Down Expand Up @@ -1112,8 +1116,14 @@ class Heap {
static const int kMaxExecutableSizeHugeMemoryDevice =
256 * kPointerMultiplier;

intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size,
int freed_global_handles);
// Calculates the allocation limit based on a given growing factor and a
// given old generation size.
intptr_t CalculateOldGenerationAllocationLimit(double factor,
intptr_t old_gen_size);

// Sets the allocation limit to trigger the next full garbage collection.
void SetOldGenerationAllocationLimit(intptr_t old_gen_size,
int freed_global_handles);

// Indicates whether inline bump-pointer allocation has been disabled.
bool inline_allocation_disabled() { return inline_allocation_disabled_; }
Expand Down Expand Up @@ -1219,13 +1229,12 @@ class Heap {
survived_since_last_expansion_ += survived;
}

inline bool NextGCIsLikelyToBeFull() {
inline bool NextGCIsLikelyToBeFull(intptr_t limit) {
if (FLAG_gc_global) return true;

if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;

intptr_t adjusted_allocation_limit =
old_generation_allocation_limit_ - new_space_.Capacity();
intptr_t adjusted_allocation_limit = limit - new_space_.Capacity();

if (PromotedTotalSize() >= adjusted_allocation_limit) return true;

Expand Down Expand Up @@ -1604,6 +1613,10 @@ class Heap {
// generation and on every allocation in large object space.
intptr_t old_generation_allocation_limit_;

// The allocation limit when there is > kMinIdleTimeToStartIncrementalMarking
// idle time in the idle time handler.
intptr_t idle_old_generation_allocation_limit_;

// Indicates that an allocation has failed in the old generation since the
// last GC.
bool old_gen_exhausted_;
Expand Down
4 changes: 3 additions & 1 deletion deps/v8/src/heap/incremental-marking.cc
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,9 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {


bool IncrementalMarking::ShouldActivate() {
return WorthActivating() && heap_->NextGCIsLikelyToBeFull();
return WorthActivating() &&
heap_->NextGCIsLikelyToBeFull(
heap_->old_generation_allocation_limit());
}


Expand Down
22 changes: 7 additions & 15 deletions deps/v8/src/hydrogen.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2391,6 +2391,8 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
PropertyAccessType access_type,
LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode) {
DCHECK(top_info()->IsStub() || checked_object->IsCompareMap() ||
checked_object->IsCheckMaps());
DCHECK((!IsExternalArrayElementsKind(elements_kind) &&
!IsFixedTypedArrayElementsKind(elements_kind)) ||
!is_js_array);
Expand Down Expand Up @@ -8401,11 +8403,10 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
new_size = AddUncasted<HAdd>(length, graph()->GetConstant1());

bool is_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
BuildUncheckedMonomorphicElementAccess(array, length,
value_to_push, is_array,
elements_kind, STORE,
NEVER_RETURN_HOLE,
STORE_AND_GROW_NO_TRANSITION);
HValue* checked_array = Add<HCheckMaps>(array, receiver_map);
BuildUncheckedMonomorphicElementAccess(
checked_array, length, value_to_push, is_array, elements_kind,
STORE, NEVER_RETURN_HOLE, STORE_AND_GROW_NO_TRANSITION);

if (!ast_context()->IsEffect()) Push(new_size);
Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
Expand Down Expand Up @@ -8763,18 +8764,9 @@ void HOptimizedGraphBuilder::HandleIndirectCall(Call* expr, HValue* function,
int args_count_no_receiver = arguments_count - 1;
if (function->IsConstant() &&
HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
HValue* receiver = environment()->ExpressionStackAt(args_count_no_receiver);
Handle<Map> receiver_map;
if (receiver->IsConstant() &&
HConstant::cast(receiver)->handle(isolate())->IsHeapObject()) {
receiver_map =
handle(Handle<HeapObject>::cast(
HConstant::cast(receiver)->handle(isolate()))->map());
}

known_function =
Handle<JSFunction>::cast(HConstant::cast(function)->handle(isolate()));
if (TryInlineBuiltinMethodCall(expr, known_function, receiver_map,
if (TryInlineBuiltinMethodCall(expr, known_function, Handle<Map>(),
args_count_no_receiver)) {
if (FLAG_trace_inlining) {
PrintF("Inlining builtin ");
Expand Down
Loading

0 comments on commit 3d57c7b

Please sign in to comment.