From 85d9b7da27397d4f8c4a75d4a738cbdc628dc4be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl=20Zasso?= Date: Sat, 22 Sep 2018 21:01:20 +0200 Subject: [PATCH 1/7] deps: update V8 to 7.1.163 --- deps/v8/.clang-tidy | 20 + deps/v8/.gitattributes | 2 + deps/v8/.gitignore | 1 - deps/v8/AUTHORS | 2 + deps/v8/BUILD.gn | 141 +- deps/v8/ChangeLog | 945 ++++++ deps/v8/DEPS | 27 +- deps/v8/include/libplatform/v8-tracing.h | 10 +- deps/v8/include/v8-inspector.h | 14 +- deps/v8/include/v8-internal.h | 316 ++ deps/v8/include/v8-profiler.h | 18 +- deps/v8/include/v8-util.h | 8 +- deps/v8/include/v8-version.h | 6 +- deps/v8/include/v8.h | 679 ++--- deps/v8/include/v8config.h | 30 + deps/v8/infra/config/cq.cfg | 17 + deps/v8/infra/mb/mb_config.pyl | 31 +- deps/v8/infra/testing/builders.pyl | 106 +- deps/v8/src/DEPS | 2 + deps/v8/src/accessors.cc | 5 +- deps/v8/src/accessors.h | 50 +- deps/v8/src/address-map.cc | 5 +- deps/v8/src/allocation-site-scopes-inl.h | 52 + deps/v8/src/allocation-site-scopes.h | 34 +- deps/v8/src/allocation.cc | 169 +- deps/v8/src/allocation.h | 78 +- deps/v8/src/api-arguments-inl.h | 85 +- deps/v8/src/api-arguments.h | 7 +- deps/v8/src/api-natives.cc | 54 +- deps/v8/src/api-natives.h | 9 +- deps/v8/src/api.cc | 246 +- deps/v8/src/api.h | 1 + deps/v8/src/arguments.h | 2 +- deps/v8/src/arm/assembler-arm.cc | 15 + deps/v8/src/arm/assembler-arm.h | 11 +- deps/v8/src/arm/code-stubs-arm.cc | 10 +- deps/v8/src/arm/codegen-arm.cc | 46 +- deps/v8/src/arm/interface-descriptors-arm.cc | 14 +- deps/v8/src/arm/macro-assembler-arm.cc | 17 +- deps/v8/src/arm/macro-assembler-arm.h | 21 +- deps/v8/src/arm/simulator-arm.cc | 10 +- deps/v8/src/arm64/assembler-arm64-inl.h | 4 +- deps/v8/src/arm64/assembler-arm64.cc | 16 + deps/v8/src/arm64/assembler-arm64.h | 4 +- deps/v8/src/arm64/code-stubs-arm64.cc | 9 +- deps/v8/src/arm64/codegen-arm64.cc | 5 +- .../src/arm64/interface-descriptors-arm64.cc | 14 +- deps/v8/src/arm64/macro-assembler-arm64.cc | 27 +- deps/v8/src/arm64/macro-assembler-arm64.h | 23 +- deps/v8/src/asmjs/asm-js.cc | 2 +- deps/v8/src/assembler.cc | 32 +- deps/v8/src/assembler.h | 46 +- deps/v8/src/assert-scope.h | 4 +- .../ast/ast-function-literal-id-reindexer.cc | 2 +- deps/v8/src/ast/ast-source-ranges.h | 20 +- deps/v8/src/ast/ast-value-factory.cc | 11 + deps/v8/src/ast/ast-value-factory.h | 11 +- deps/v8/src/ast/ast.cc | 6 - deps/v8/src/ast/ast.h | 34 +- deps/v8/src/ast/prettyprinter.cc | 4 +- deps/v8/src/ast/scopes-inl.h | 66 + deps/v8/src/ast/scopes.cc | 181 +- deps/v8/src/ast/scopes.h | 33 +- deps/v8/src/ast/variables.h | 1 + deps/v8/src/base/address-region.h | 56 + deps/v8/src/base/atomic-utils.h | 16 + deps/v8/src/base/bits.h | 8 + deps/v8/src/base/bounded-page-allocator.cc | 83 + deps/v8/src/base/bounded-page-allocator.h | 78 + deps/v8/src/base/build_config.h | 4 +- deps/v8/src/base/debug/stack_trace.cc | 2 +- deps/v8/src/base/debug/stack_trace_posix.cc | 8 +- deps/v8/src/base/ieee754.cc | 20 +- deps/v8/src/base/logging.h | 10 +- deps/v8/src/base/lsan-page-allocator.cc | 59 + deps/v8/src/base/lsan-page-allocator.h | 56 + deps/v8/src/base/macros.h | 39 +- deps/v8/src/base/optional.h | 2 +- deps/v8/src/base/page-allocator.cc | 8 +- deps/v8/src/base/page-allocator.h | 11 +- deps/v8/src/base/platform/OWNERS | 2 + deps/v8/src/base/platform/platform-fuchsia.cc | 12 +- .../src/base/platform/platform-posix-time.h | 2 +- deps/v8/src/base/platform/platform-posix.cc | 9 +- deps/v8/src/base/platform/platform-posix.h | 2 +- deps/v8/src/base/platform/platform-win32.cc | 6 +- deps/v8/src/base/platform/platform.h | 2 +- deps/v8/src/base/platform/time.h | 5 +- deps/v8/src/base/region-allocator.cc | 284 ++ deps/v8/src/base/region-allocator.h | 158 + deps/v8/src/base/safe_math.h | 2 +- deps/v8/src/base/timezone-cache.h | 2 +- deps/v8/src/basic-block-profiler.cc | 7 - deps/v8/src/basic-block-profiler.h | 4 +- deps/v8/src/bit-vector.h | 9 +- deps/v8/src/bootstrapper.cc | 193 +- deps/v8/src/bootstrapper.h | 5 +- deps/v8/src/builtins/arm/builtins-arm.cc | 112 +- deps/v8/src/builtins/arm64/builtins-arm64.cc | 104 +- deps/v8/src/builtins/array-copywithin.tq | 26 +- deps/v8/src/builtins/array-foreach.tq | 53 +- deps/v8/src/builtins/array-lastindexof.tq | 151 + deps/v8/src/builtins/array-reverse.tq | 71 +- deps/v8/src/builtins/array-splice.tq | 394 +++ deps/v8/src/builtins/array-unshift.tq | 106 + deps/v8/src/builtins/array.tq | 315 +- deps/v8/src/builtins/base.tq | 319 +- deps/v8/src/builtins/builtins-api.cc | 2 +- .../v8/src/builtins/builtins-arguments-gen.cc | 6 +- deps/v8/src/builtins/builtins-array-gen.cc | 38 +- deps/v8/src/builtins/builtins-array.cc | 318 +- deps/v8/src/builtins/builtins-arraybuffer.cc | 20 +- deps/v8/src/builtins/builtins-async-gen.cc | 6 +- .../builtins/builtins-async-generator-gen.cc | 8 +- deps/v8/src/builtins/builtins-boolean.cc | 5 +- deps/v8/src/builtins/builtins-call-gen.cc | 55 +- .../src/builtins/builtins-collections-gen.cc | 31 +- .../src/builtins/builtins-constructor-gen.cc | 25 +- .../src/builtins/builtins-conversion-gen.cc | 72 +- deps/v8/src/builtins/builtins-data-view-gen.h | 14 +- deps/v8/src/builtins/builtins-dataview.cc | 29 +- deps/v8/src/builtins/builtins-date-gen.cc | 6 +- deps/v8/src/builtins/builtins-date.cc | 51 + deps/v8/src/builtins/builtins-definitions.h | 173 +- deps/v8/src/builtins/builtins-descriptors.h | 3 +- deps/v8/src/builtins/builtins-function-gen.cc | 5 +- deps/v8/src/builtins/builtins-handler-gen.cc | 2 +- deps/v8/src/builtins/builtins-ic-gen.cc | 8 +- deps/v8/src/builtins/builtins-internal-gen.cc | 210 +- .../src/builtins/builtins-interpreter-gen.cc | 18 + deps/v8/src/builtins/builtins-intl-gen.cc | 15 +- deps/v8/src/builtins/builtins-intl.cc | 1136 ++++---- deps/v8/src/builtins/builtins-intl.h | 30 - deps/v8/src/builtins/builtins-iterator-gen.cc | 127 +- deps/v8/src/builtins/builtins-iterator-gen.h | 5 +- deps/v8/src/builtins/builtins-number-gen.cc | 10 +- deps/v8/src/builtins/builtins-object-gen.cc | 200 +- deps/v8/src/builtins/builtins-promise-gen.cc | 36 +- deps/v8/src/builtins/builtins-promise-gen.h | 5 +- deps/v8/src/builtins/builtins-proxy-gen.cc | 8 +- deps/v8/src/builtins/builtins-reflect.cc | 2 +- deps/v8/src/builtins/builtins-regexp-gen.cc | 41 +- .../builtins-sharedarraybuffer-gen.cc | 20 +- .../builtins/builtins-sharedarraybuffer.cc | 54 +- deps/v8/src/builtins/builtins-string-gen.cc | 190 +- deps/v8/src/builtins/builtins-string-gen.h | 10 +- deps/v8/src/builtins/builtins-symbol.cc | 2 +- .../src/builtins/builtins-typed-array-gen.cc | 180 +- .../src/builtins/builtins-typed-array-gen.h | 6 +- deps/v8/src/builtins/builtins-wasm-gen.cc | 44 +- deps/v8/src/builtins/builtins.cc | 36 +- deps/v8/src/builtins/builtins.h | 52 +- .../src/builtins/constants-table-builder.cc | 6 +- deps/v8/src/builtins/data-view.tq | 554 ++-- .../generate-bytecodes-builtins-list.cc | 97 + deps/v8/src/builtins/ia32/builtins-ia32.cc | 931 +++--- deps/v8/src/builtins/mips/builtins-mips.cc | 116 +- .../v8/src/builtins/mips64/builtins-mips64.cc | 114 +- deps/v8/src/builtins/ppc/builtins-ppc.cc | 112 +- deps/v8/src/builtins/s390/builtins-s390.cc | 116 +- .../src/builtins/setup-builtins-internal.cc | 64 +- deps/v8/src/builtins/typed-array.tq | 148 +- deps/v8/src/builtins/x64/builtins-x64.cc | 115 +- deps/v8/src/callable.h | 2 +- deps/v8/src/char-predicates-inl.h | 11 +- deps/v8/src/checks.h | 2 +- deps/v8/src/code-events.h | 4 +- deps/v8/src/code-factory.cc | 30 +- deps/v8/src/code-factory.h | 3 +- deps/v8/src/code-stub-assembler.cc | 1075 ++++--- deps/v8/src/code-stub-assembler.h | 211 +- deps/v8/src/code-stubs.h | 6 +- deps/v8/src/codegen.cc | 24 +- deps/v8/src/codegen.h | 10 +- deps/v8/src/collector.h | 6 +- deps/v8/src/compilation-cache.cc | 2 - deps/v8/src/compilation-cache.h | 2 +- deps/v8/src/compilation-statistics.h | 2 +- .../compiler-dispatcher-job.h | 28 +- .../compiler-dispatcher-tracer.cc | 2 +- .../compiler-dispatcher.cc | 331 +-- .../compiler-dispatcher/compiler-dispatcher.h | 34 +- .../optimizing-compile-dispatcher.cc | 16 +- .../unoptimized-compile-job.cc | 360 +-- .../unoptimized-compile-job.h | 33 +- deps/v8/src/compiler.cc | 81 +- deps/v8/src/compiler.h | 6 +- deps/v8/src/compiler/access-builder.cc | 14 +- deps/v8/src/compiler/access-builder.h | 1 + deps/v8/src/compiler/access-info.cc | 49 +- deps/v8/src/compiler/access-info.h | 7 +- .../v8/src/compiler/arm/code-generator-arm.cc | 140 +- .../src/compiler/arm/instruction-codes-arm.h | 557 ++-- .../compiler/arm/instruction-scheduler-arm.cc | 21 - .../compiler/arm/instruction-selector-arm.cc | 212 +- .../compiler/arm64/code-generator-arm64.cc | 5 +- .../arm64/instruction-selector-arm64.cc | 2 + deps/v8/src/compiler/branch-elimination.cc | 2 +- deps/v8/src/compiler/bytecode-analysis.cc | 6 +- deps/v8/src/compiler/bytecode-analysis.h | 2 +- .../v8/src/compiler/bytecode-graph-builder.cc | 33 +- deps/v8/src/compiler/checkpoint-elimination.h | 2 +- deps/v8/src/compiler/code-assembler.cc | 35 +- deps/v8/src/compiler/code-assembler.h | 51 +- deps/v8/src/compiler/code-generator.cc | 42 +- deps/v8/src/compiler/code-generator.h | 28 +- deps/v8/src/compiler/common-node-cache.h | 2 +- .../v8/src/compiler/common-operator-reducer.h | 2 +- deps/v8/src/compiler/common-operator.cc | 21 +- deps/v8/src/compiler/common-operator.h | 11 +- .../src/compiler/compilation-dependencies.cc | 22 +- .../src/compiler/constant-folding-reducer.cc | 2 +- deps/v8/src/compiler/dead-code-elimination.h | 2 +- .../src/compiler/effect-control-linearizer.cc | 490 +++- .../src/compiler/effect-control-linearizer.h | 15 +- deps/v8/src/compiler/escape-analysis.cc | 7 +- deps/v8/src/compiler/escape-analysis.h | 8 +- deps/v8/src/compiler/frame-states.cc | 6 + deps/v8/src/compiler/gap-resolver.cc | 13 +- deps/v8/src/compiler/gap-resolver.h | 2 +- deps/v8/src/compiler/graph-assembler.cc | 17 +- deps/v8/src/compiler/graph-assembler.h | 10 +- deps/v8/src/compiler/graph-reducer.cc | 2 +- deps/v8/src/compiler/graph-reducer.h | 6 +- deps/v8/src/compiler/graph-trimmer.cc | 2 +- deps/v8/src/compiler/graph-visualizer.cc | 10 +- deps/v8/src/compiler/graph-visualizer.h | 2 +- deps/v8/src/compiler/graph.h | 2 +- .../src/compiler/ia32/code-generator-ia32.cc | 150 +- .../compiler/ia32/instruction-codes-ia32.h | 721 +++-- .../ia32/instruction-scheduler-ia32.cc | 21 - .../ia32/instruction-selector-ia32.cc | 196 +- .../src/compiler/instruction-selector-impl.h | 3 + deps/v8/src/compiler/instruction-selector.cc | 64 +- deps/v8/src/compiler/instruction.cc | 12 +- deps/v8/src/compiler/instruction.h | 21 +- deps/v8/src/compiler/int64-lowering.cc | 12 +- deps/v8/src/compiler/js-call-reducer.cc | 435 ++- deps/v8/src/compiler/js-call-reducer.h | 8 +- .../src/compiler/js-context-specialization.cc | 8 +- deps/v8/src/compiler/js-create-lowering.cc | 113 +- deps/v8/src/compiler/js-create-lowering.h | 10 +- deps/v8/src/compiler/js-generic-lowering.cc | 40 +- deps/v8/src/compiler/js-graph.cc | 16 +- deps/v8/src/compiler/js-graph.h | 6 - deps/v8/src/compiler/js-heap-broker.cc | 2103 ++++++++++---- deps/v8/src/compiler/js-heap-broker.h | 166 +- deps/v8/src/compiler/js-heap-copy-reducer.cc | 43 +- deps/v8/src/compiler/js-intrinsic-lowering.cc | 46 - deps/v8/src/compiler/js-intrinsic-lowering.h | 7 +- .../js-native-context-specialization.cc | 227 +- .../js-native-context-specialization.h | 18 +- deps/v8/src/compiler/js-operator.cc | 11 +- deps/v8/src/compiler/js-operator.h | 2 +- deps/v8/src/compiler/js-type-hint-lowering.cc | 20 +- deps/v8/src/compiler/js-type-hint-lowering.h | 3 + deps/v8/src/compiler/js-typed-lowering.cc | 367 +-- deps/v8/src/compiler/js-typed-lowering.h | 12 +- deps/v8/src/compiler/linkage.cc | 9 +- deps/v8/src/compiler/load-elimination.cc | 117 +- deps/v8/src/compiler/load-elimination.h | 44 +- deps/v8/src/compiler/loop-peeling.h | 2 +- .../src/compiler/loop-variable-optimizer.cc | 4 + .../v8/src/compiler/machine-graph-verifier.cc | 1 + .../src/compiler/machine-operator-reducer.cc | 55 +- .../src/compiler/machine-operator-reducer.h | 2 +- deps/v8/src/compiler/machine-operator.cc | 111 +- deps/v8/src/compiler/machine-operator.h | 16 +- deps/v8/src/compiler/memory-optimizer.cc | 56 +- deps/v8/src/compiler/memory-optimizer.h | 12 +- .../src/compiler/mips/code-generator-mips.cc | 8 +- .../compiler/mips64/code-generator-mips64.cc | 16 +- .../mips64/instruction-selector-mips64.cc | 6 + deps/v8/src/compiler/move-optimizer.cc | 8 +- deps/v8/src/compiler/node-cache.cc | 4 +- deps/v8/src/compiler/node-cache.h | 4 +- deps/v8/src/compiler/node-properties.cc | 1 - deps/v8/src/compiler/node.h | 17 +- deps/v8/src/compiler/opcodes.h | 24 +- deps/v8/src/compiler/operation-typer.cc | 119 +- deps/v8/src/compiler/operation-typer.h | 4 +- deps/v8/src/compiler/operator-properties.cc | 2 +- deps/v8/src/compiler/operator.h | 4 +- deps/v8/src/compiler/pipeline.cc | 140 +- deps/v8/src/compiler/pipeline.h | 10 +- .../v8/src/compiler/ppc/code-generator-ppc.cc | 23 +- .../compiler/ppc/instruction-selector-ppc.cc | 7 + .../src/compiler/property-access-builder.cc | 2 + deps/v8/src/compiler/raw-machine-assembler.h | 8 +- .../v8/src/compiler/redundancy-elimination.cc | 116 +- deps/v8/src/compiler/redundancy-elimination.h | 5 +- deps/v8/src/compiler/register-allocator.cc | 14 +- deps/v8/src/compiler/representation-change.cc | 245 +- deps/v8/src/compiler/representation-change.h | 32 +- .../src/compiler/s390/code-generator-s390.cc | 13 +- .../s390/instruction-selector-s390.cc | 5 + deps/v8/src/compiler/select-lowering.cc | 2 +- deps/v8/src/compiler/select-lowering.h | 2 +- deps/v8/src/compiler/simd-scalar-lowering.cc | 3 +- deps/v8/src/compiler/simplified-lowering.cc | 318 +- deps/v8/src/compiler/simplified-lowering.h | 2 +- .../compiler/simplified-operator-reducer.cc | 2 +- deps/v8/src/compiler/simplified-operator.cc | 19 +- deps/v8/src/compiler/simplified-operator.h | 14 +- .../src/compiler/store-store-elimination.cc | 14 +- deps/v8/src/compiler/type-cache.h | 17 + .../v8/src/compiler/type-narrowing-reducer.cc | 2 +- deps/v8/src/compiler/typed-optimization.cc | 41 +- deps/v8/src/compiler/typed-optimization.h | 4 +- deps/v8/src/compiler/typer.cc | 38 +- deps/v8/src/compiler/types.cc | 55 +- deps/v8/src/compiler/types.h | 22 +- .../src/compiler/value-numbering-reducer.cc | 2 +- .../v8/src/compiler/value-numbering-reducer.h | 2 +- deps/v8/src/compiler/verifier.cc | 40 +- deps/v8/src/compiler/wasm-compiler.cc | 305 +- deps/v8/src/compiler/wasm-compiler.h | 34 +- .../v8/src/compiler/x64/code-generator-x64.cc | 30 +- .../compiler/x64/instruction-selector-x64.cc | 15 +- deps/v8/src/contexts.h | 103 +- deps/v8/src/conversions.cc | 16 +- deps/v8/src/counters.cc | 62 +- deps/v8/src/counters.h | 69 +- deps/v8/src/d8-posix.cc | 2 +- deps/v8/src/d8.cc | 138 +- deps/v8/src/d8.h | 7 +- deps/v8/src/d8.js | 2 +- deps/v8/src/date.cc | 2 +- deps/v8/src/dateparser.h | 8 +- deps/v8/src/debug/debug-coverage.h | 2 +- deps/v8/src/debug/debug-evaluate.cc | 87 +- deps/v8/src/debug/debug-evaluate.h | 1 - deps/v8/src/debug/debug-frames.cc | 1 + deps/v8/src/debug/debug-frames.h | 2 +- deps/v8/src/debug/debug-interface.h | 18 +- .../src/debug/debug-stack-trace-iterator.cc | 2 +- deps/v8/src/debug/debug-type-profile.h | 2 +- deps/v8/src/debug/debug.cc | 93 +- deps/v8/src/debug/debug.h | 15 +- deps/v8/src/debug/ia32/debug-ia32.cc | 20 +- deps/v8/src/debug/interface-types.h | 2 +- deps/v8/src/deoptimizer.cc | 93 +- deps/v8/src/deoptimizer.h | 27 +- deps/v8/src/disasm.h | 2 +- deps/v8/src/disassembler.cc | 4 +- deps/v8/src/elements.cc | 129 +- deps/v8/src/elements.h | 6 +- .../externalize-string-extension.cc | 6 +- .../extensions/externalize-string-extension.h | 4 +- .../v8/src/extensions/free-buffer-extension.h | 4 +- deps/v8/src/extensions/gc-extension.h | 4 +- deps/v8/src/extensions/statistics-extension.h | 4 +- .../extensions/trigger-failure-extension.h | 4 +- deps/v8/src/external-reference-table.cc | 4 +- deps/v8/src/external-reference-table.h | 2 +- deps/v8/src/external-reference.cc | 39 +- deps/v8/src/external-reference.h | 9 +- deps/v8/src/feedback-vector-inl.h | 18 +- deps/v8/src/feedback-vector.cc | 130 +- deps/v8/src/feedback-vector.h | 53 +- deps/v8/src/flag-definitions.h | 52 +- deps/v8/src/frames.cc | 4 +- deps/v8/src/frames.h | 21 +- deps/v8/src/futex-emulation.cc | 8 +- deps/v8/src/gdb-jit.cc | 42 +- deps/v8/src/globals.h | 55 +- deps/v8/src/handles.cc | 7 +- deps/v8/src/handles.h | 2 +- deps/v8/src/heap-symbols.h | 20 +- deps/v8/src/heap/array-buffer-collector.cc | 43 +- deps/v8/src/heap/array-buffer-collector.h | 23 +- deps/v8/src/heap/array-buffer-tracker-inl.h | 9 +- deps/v8/src/heap/array-buffer-tracker.cc | 28 +- deps/v8/src/heap/array-buffer-tracker.h | 4 + deps/v8/src/heap/concurrent-marking.cc | 121 +- deps/v8/src/heap/concurrent-marking.h | 5 +- deps/v8/src/heap/embedder-tracing.cc | 7 - deps/v8/src/heap/embedder-tracing.h | 3 +- deps/v8/src/heap/factory-inl.h | 69 +- deps/v8/src/heap/factory.cc | 174 +- deps/v8/src/heap/factory.h | 45 +- deps/v8/src/heap/gc-tracer.cc | 7 +- deps/v8/src/heap/gc-tracer.h | 3 +- deps/v8/src/heap/heap-controller.cc | 82 +- deps/v8/src/heap/heap-controller.h | 47 +- deps/v8/src/heap/heap-inl.h | 61 +- deps/v8/src/heap/heap-write-barrier-inl.h | 4 +- deps/v8/src/heap/heap.cc | 614 +--- deps/v8/src/heap/heap.h | 394 ++- deps/v8/src/heap/incremental-marking-inl.h | 21 +- deps/v8/src/heap/incremental-marking.cc | 47 +- deps/v8/src/heap/incremental-marking.h | 21 +- deps/v8/src/heap/item-parallel-job.cc | 2 +- deps/v8/src/heap/item-parallel-job.h | 4 +- deps/v8/src/heap/mark-compact-inl.h | 98 +- deps/v8/src/heap/mark-compact.cc | 136 +- deps/v8/src/heap/mark-compact.h | 62 +- deps/v8/src/heap/object-stats.cc | 13 +- deps/v8/src/heap/objects-visiting-inl.h | 9 - deps/v8/src/heap/objects-visiting.h | 6 +- deps/v8/src/heap/scavenger-inl.h | 172 +- deps/v8/src/heap/scavenger.cc | 228 +- deps/v8/src/heap/scavenger.h | 69 +- deps/v8/src/heap/setup-heap-internal.cc | 68 +- deps/v8/src/heap/spaces-inl.h | 45 + deps/v8/src/heap/spaces.cc | 455 +-- deps/v8/src/heap/spaces.h | 266 +- deps/v8/src/heap/store-buffer.cc | 8 +- deps/v8/src/heap/store-buffer.h | 2 +- deps/v8/src/heap/sweeper.cc | 6 +- deps/v8/src/ia32/assembler-ia32-inl.h | 4 + deps/v8/src/ia32/assembler-ia32.cc | 118 +- deps/v8/src/ia32/assembler-ia32.h | 152 +- deps/v8/src/ia32/code-stubs-ia32.cc | 38 +- deps/v8/src/ia32/codegen-ia32.cc | 32 +- .../v8/src/ia32/interface-descriptors-ia32.cc | 65 +- deps/v8/src/ia32/macro-assembler-ia32.cc | 175 +- deps/v8/src/ia32/macro-assembler-ia32.h | 67 +- deps/v8/src/ic/accessor-assembler.cc | 302 +- deps/v8/src/ic/accessor-assembler.h | 14 +- deps/v8/src/ic/binary-op-assembler.cc | 5 +- deps/v8/src/ic/call-optimization.h | 2 +- deps/v8/src/ic/handler-configuration.cc | 4 +- deps/v8/src/ic/handler-configuration.h | 2 +- deps/v8/src/ic/ic-inl.h | 7 +- deps/v8/src/ic/ic-stats.cc | 2 +- deps/v8/src/ic/ic.cc | 60 +- deps/v8/src/ic/ic.h | 11 +- deps/v8/src/ic/keyed-store-generic.cc | 306 +- deps/v8/src/ic/keyed-store-generic.h | 5 + deps/v8/src/identity-map.h | 2 +- deps/v8/src/inspector/injected-script.cc | 6 +- deps/v8/src/inspector/injected-script.h | 8 +- deps/v8/src/inspector/remote-object-id.h | 6 +- deps/v8/src/inspector/string-16.cc | 23 +- .../v8/src/inspector/v8-console-agent-impl.cc | 2 +- deps/v8/src/inspector/v8-console-message.cc | 25 +- deps/v8/src/inspector/v8-console.cc | 3 +- .../src/inspector/v8-debugger-agent-impl.cc | 24 +- deps/v8/src/inspector/v8-debugger-script.cc | 56 +- deps/v8/src/inspector/v8-debugger-script.h | 3 +- deps/v8/src/inspector/v8-debugger.cc | 28 +- deps/v8/src/inspector/v8-debugger.h | 8 +- .../inspector/v8-heap-profiler-agent-impl.cc | 2 +- deps/v8/src/inspector/v8-inspector-impl.cc | 10 +- deps/v8/src/inspector/v8-inspector-impl.h | 7 +- .../src/inspector/v8-inspector-session-impl.h | 2 +- deps/v8/src/inspector/v8-regex.h | 3 +- .../v8/src/inspector/v8-runtime-agent-impl.cc | 2 +- deps/v8/src/inspector/v8-schema-agent-impl.cc | 2 +- deps/v8/src/inspector/v8-stack-trace-impl.cc | 6 +- deps/v8/src/inspector/wasm-translation.cc | 6 +- deps/v8/src/instruction-stream.cc | 16 +- deps/v8/src/interface-descriptors.cc | 13 +- deps/v8/src/interface-descriptors.h | 87 +- .../interpreter/bytecode-array-accessor.cc | 13 + .../src/interpreter/bytecode-array-accessor.h | 2 + .../src/interpreter/bytecode-array-builder.cc | 13 +- .../src/interpreter/bytecode-array-builder.h | 6 + deps/v8/src/interpreter/bytecode-generator.cc | 321 +- deps/v8/src/interpreter/bytecode-generator.h | 13 +- deps/v8/src/interpreter/bytecode-operands.h | 13 + .../interpreter/bytecode-register-allocator.h | 4 +- .../interpreter/bytecode-register-optimizer.h | 6 +- deps/v8/src/interpreter/bytecodes.cc | 8 +- deps/v8/src/interpreter/bytecodes.h | 17 +- .../src/interpreter/constant-array-builder.h | 2 +- .../src/interpreter/control-flow-builders.h | 16 +- .../src/interpreter/handler-table-builder.h | 2 +- .../src/interpreter/interpreter-assembler.cc | 24 +- .../src/interpreter/interpreter-generator.cc | 47 +- .../src/interpreter/interpreter-generator.h | 11 +- .../interpreter-intrinsics-generator.cc | 27 - .../src/interpreter/interpreter-intrinsics.h | 4 - deps/v8/src/interpreter/interpreter.cc | 98 +- deps/v8/src/interpreter/interpreter.h | 9 +- .../interpreter/setup-interpreter-internal.cc | 102 - deps/v8/src/interpreter/setup-interpreter.h | 33 - deps/v8/src/isolate-inl.h | 11 - deps/v8/src/isolate.cc | 134 +- deps/v8/src/isolate.h | 53 +- deps/v8/src/js/array.js | 321 -- deps/v8/src/js/intl.js | 1073 +------ deps/v8/src/js/macros.py | 2 - deps/v8/src/json-parser.h | 4 +- deps/v8/src/json-stringifier.cc | 88 +- deps/v8/src/keys.cc | 7 +- deps/v8/src/keys.h | 4 +- deps/v8/src/libplatform/default-platform.h | 2 +- .../default-worker-threads-task-runner.cc | 1 + .../default-worker-threads-task-runner.h | 2 +- .../src/libplatform/tracing/trace-buffer.cc | 2 - .../v8/src/libplatform/tracing/trace-buffer.h | 2 +- .../v8/src/libplatform/tracing/trace-writer.h | 2 +- .../libplatform/tracing/tracing-controller.cc | 2 +- deps/v8/src/libplatform/worker-thread.h | 2 +- deps/v8/src/libsampler/sampler.cc | 2 +- deps/v8/src/locked-queue.h | 2 +- deps/v8/src/log-utils.h | 4 +- deps/v8/src/log.cc | 64 +- deps/v8/src/log.h | 50 +- deps/v8/src/lookup.cc | 4 +- deps/v8/src/lookup.h | 4 +- deps/v8/src/machine-type.h | 6 + deps/v8/src/macro-assembler.h | 2 +- deps/v8/src/maybe-handles-inl.h | 6 +- deps/v8/src/maybe-handles.h | 2 +- deps/v8/src/messages.cc | 90 +- deps/v8/src/messages.h | 17 +- deps/v8/src/mips/assembler-mips.cc | 14 + deps/v8/src/mips/assembler-mips.h | 6 +- deps/v8/src/mips/code-stubs-mips.cc | 10 +- deps/v8/src/mips/codegen-mips.cc | 32 +- .../v8/src/mips/interface-descriptors-mips.cc | 16 +- deps/v8/src/mips/macro-assembler-mips.cc | 22 +- deps/v8/src/mips/macro-assembler-mips.h | 22 +- deps/v8/src/mips64/assembler-mips64.cc | 53 +- deps/v8/src/mips64/assembler-mips64.h | 17 +- deps/v8/src/mips64/code-stubs-mips64.cc | 10 +- deps/v8/src/mips64/codegen-mips64.cc | 33 +- .../mips64/interface-descriptors-mips64.cc | 16 +- deps/v8/src/mips64/macro-assembler-mips64.cc | 22 +- deps/v8/src/mips64/macro-assembler-mips64.h | 25 +- deps/v8/src/objects-body-descriptors-inl.h | 117 +- deps/v8/src/objects-body-descriptors.h | 11 +- deps/v8/src/objects-debug.cc | 128 +- deps/v8/src/objects-definitions.h | 566 ++-- deps/v8/src/objects-inl.h | 1165 +------- deps/v8/src/objects-printer.cc | 128 +- deps/v8/src/objects.cc | 714 +++-- deps/v8/src/objects.h | 1740 +---------- deps/v8/src/objects/allocation-site-inl.h | 197 ++ deps/v8/src/objects/allocation-site.h | 186 ++ deps/v8/src/objects/api-callbacks-inl.h | 18 +- deps/v8/src/objects/api-callbacks.h | 23 +- deps/v8/src/objects/arguments.h | 2 +- deps/v8/src/objects/bigint.cc | 42 +- deps/v8/src/objects/bigint.h | 5 +- deps/v8/src/objects/code-inl.h | 10 +- deps/v8/src/objects/code.h | 48 +- deps/v8/src/objects/compilation-cache.h | 11 - deps/v8/src/objects/debug-objects.h | 3 - deps/v8/src/objects/fixed-array-inl.h | 4 +- deps/v8/src/objects/fixed-array.h | 15 +- deps/v8/src/objects/hash-table-inl.h | 4 +- deps/v8/src/objects/hash-table.h | 2 +- deps/v8/src/objects/intl-objects.cc | 1196 +------- deps/v8/src/objects/intl-objects.h | 260 +- deps/v8/src/objects/js-array-buffer-inl.h | 84 +- deps/v8/src/objects/js-array-buffer.cc | 41 +- deps/v8/src/objects/js-array-buffer.h | 106 +- deps/v8/src/objects/js-array.h | 3 +- deps/v8/src/objects/js-break-iterator-inl.h | 49 + deps/v8/src/objects/js-break-iterator.cc | 175 ++ deps/v8/src/objects/js-break-iterator.h | 90 + deps/v8/src/objects/js-collator-inl.h | 12 - deps/v8/src/objects/js-collator.cc | 205 +- deps/v8/src/objects/js-collator.h | 35 +- deps/v8/src/objects/js-collection.h | 3 - deps/v8/src/objects/js-date-time-format-inl.h | 34 + deps/v8/src/objects/js-date-time-format.cc | 990 +++++++ deps/v8/src/objects/js-date-time-format.h | 106 + deps/v8/src/objects/js-generator.h | 2 +- deps/v8/src/objects/js-locale.cc | 29 +- deps/v8/src/objects/js-number-format-inl.h | 58 + deps/v8/src/objects/js-number-format.cc | 714 +++++ deps/v8/src/objects/js-number-format.h | 139 + deps/v8/src/objects/js-objects-inl.h | 903 ++++++ deps/v8/src/objects/js-objects.h | 1407 +++++++++ deps/v8/src/objects/js-plural-rules.cc | 5 +- deps/v8/src/objects/js-plural-rules.h | 7 +- deps/v8/src/objects/js-promise.h | 2 +- deps/v8/src/objects/js-proxy.h | 4 +- .../src/objects/js-regexp-string-iterator.h | 2 +- .../v8/src/objects/js-relative-time-format.cc | 208 ++ deps/v8/src/objects/js-relative-time-format.h | 15 +- deps/v8/src/objects/map-inl.h | 51 +- deps/v8/src/objects/map.h | 36 +- deps/v8/src/objects/maybe-object-inl.h | 54 +- deps/v8/src/objects/maybe-object.h | 56 +- deps/v8/src/objects/microtask-queue-inl.h | 28 + deps/v8/src/objects/microtask-queue.cc | 40 + deps/v8/src/objects/microtask-queue.h | 55 + deps/v8/src/objects/module.cc | 12 +- deps/v8/src/objects/module.h | 7 +- deps/v8/src/objects/name-inl.h | 26 +- deps/v8/src/objects/name.h | 39 +- deps/v8/src/objects/object-macros-undef.h | 2 + deps/v8/src/objects/object-macros.h | 6 + deps/v8/src/objects/ordered-hash-table-inl.h | 8 +- deps/v8/src/objects/ordered-hash-table.cc | 2 +- deps/v8/src/objects/ordered-hash-table.h | 6 +- deps/v8/src/objects/promise.h | 2 + deps/v8/src/objects/prototype-info-inl.h | 6 +- deps/v8/src/objects/scope-info.cc | 1 - deps/v8/src/objects/scope-info.h | 3 +- deps/v8/src/objects/script.h | 3 - .../v8/src/objects/shared-function-info-inl.h | 69 - deps/v8/src/objects/shared-function-info.h | 19 +- deps/v8/src/objects/string-inl.h | 26 +- deps/v8/src/objects/string.h | 55 +- deps/v8/src/objects/templates.h | 3 - deps/v8/src/optimized-compilation-info.cc | 86 +- deps/v8/src/optimized-compilation-info.h | 37 +- deps/v8/src/ostreams.cc | 7 - deps/v8/src/ostreams.h | 4 +- deps/v8/src/parsing/duplicate-finder.h | 2 +- deps/v8/src/parsing/expression-classifier.h | 19 +- deps/v8/src/parsing/func-name-inferrer.h | 8 +- deps/v8/src/parsing/parse-info.cc | 96 +- deps/v8/src/parsing/parse-info.h | 30 +- deps/v8/src/parsing/parser-base.h | 561 ++-- deps/v8/src/parsing/parser.cc | 133 +- deps/v8/src/parsing/parser.h | 50 +- .../src/parsing/preparsed-scope-data-impl.h | 259 ++ deps/v8/src/parsing/preparsed-scope-data.cc | 433 +-- deps/v8/src/parsing/preparsed-scope-data.h | 163 +- deps/v8/src/parsing/preparser.cc | 46 +- deps/v8/src/parsing/preparser.h | 119 +- .../src/parsing/scanner-character-streams.cc | 136 +- deps/v8/src/parsing/scanner-inl.h | 530 +++- deps/v8/src/parsing/scanner.cc | 617 +--- deps/v8/src/parsing/scanner.h | 140 +- deps/v8/src/parsing/token.cc | 1 - deps/v8/src/parsing/token.h | 205 +- deps/v8/src/perf-jit.h | 2 +- deps/v8/src/ppc/assembler-ppc.cc | 24 +- deps/v8/src/ppc/assembler-ppc.h | 8 +- deps/v8/src/ppc/code-stubs-ppc.cc | 10 +- deps/v8/src/ppc/codegen-ppc.cc | 17 +- deps/v8/src/ppc/interface-descriptors-ppc.cc | 14 +- deps/v8/src/ppc/macro-assembler-ppc.cc | 16 +- deps/v8/src/ppc/macro-assembler-ppc.h | 21 +- deps/v8/src/ppc/simulator-ppc.cc | 4 +- deps/v8/src/profiler/allocation-tracker.cc | 7 +- deps/v8/src/profiler/allocation-tracker.h | 2 +- deps/v8/src/profiler/circular-queue-inl.h | 7 +- deps/v8/src/profiler/cpu-profiler.h | 6 +- deps/v8/src/profiler/heap-profiler.cc | 5 +- deps/v8/src/profiler/heap-profiler.h | 2 +- .../src/profiler/heap-snapshot-generator.cc | 208 +- .../v8/src/profiler/heap-snapshot-generator.h | 82 +- deps/v8/src/profiler/profile-generator.cc | 12 +- deps/v8/src/profiler/profile-generator.h | 2 +- deps/v8/src/profiler/sampling-heap-profiler.h | 2 +- deps/v8/src/profiler/tick-sample.cc | 2 +- deps/v8/src/profiler/tracing-cpu-profiler.h | 2 +- deps/v8/src/profiler/unbound-queue.h | 4 +- deps/v8/src/property-details.h | 2 +- deps/v8/src/property.cc | 8 +- deps/v8/src/property.h | 14 +- deps/v8/src/prototype.h | 2 +- deps/v8/src/regexp/jsregexp.cc | 5 +- deps/v8/src/regexp/jsregexp.h | 179 +- deps/v8/src/regexp/property-sequences.cc | 1115 +++++++ deps/v8/src/regexp/property-sequences.h | 28 + deps/v8/src/regexp/regexp-ast.h | 15 +- .../regexp/regexp-macro-assembler-tracer.cc | 5 +- .../regexp/regexp-macro-assembler-tracer.h | 121 +- deps/v8/src/regexp/regexp-macro-assembler.cc | 10 +- deps/v8/src/regexp/regexp-macro-assembler.h | 4 +- deps/v8/src/regexp/regexp-parser.cc | 149 +- deps/v8/src/regexp/regexp-parser.h | 12 +- .../regexp/x64/regexp-macro-assembler-x64.h | 117 +- deps/v8/src/register-configuration.h | 2 +- deps/v8/src/reloc-info.h | 2 +- deps/v8/src/roots-inl.h | 81 +- deps/v8/src/roots.h | 137 +- deps/v8/src/runtime/runtime-array.cc | 12 +- deps/v8/src/runtime/runtime-atomics.cc | 14 +- deps/v8/src/runtime/runtime-classes.cc | 8 +- deps/v8/src/runtime/runtime-collections.cc | 38 - deps/v8/src/runtime/runtime-date.cc | 7 - deps/v8/src/runtime/runtime-debug.cc | 23 +- deps/v8/src/runtime/runtime-function.cc | 80 - deps/v8/src/runtime/runtime-futex.cc | 2 +- deps/v8/src/runtime/runtime-generator.cc | 6 - deps/v8/src/runtime/runtime-internal.cc | 55 +- deps/v8/src/runtime/runtime-interpreter.cc | 1 - deps/v8/src/runtime/runtime-intl.cc | 398 +-- deps/v8/src/runtime/runtime-literals.cc | 8 +- deps/v8/src/runtime/runtime-numbers.cc | 73 +- deps/v8/src/runtime/runtime-object.cc | 298 +- deps/v8/src/runtime/runtime-proxy.cc | 4 +- deps/v8/src/runtime/runtime-regexp.cc | 4 +- deps/v8/src/runtime/runtime-scopes.cc | 10 +- deps/v8/src/runtime/runtime-strings.cc | 22 +- deps/v8/src/runtime/runtime-test.cc | 63 +- deps/v8/src/runtime/runtime-typedarray.cc | 4 +- deps/v8/src/runtime/runtime-wasm.cc | 146 +- deps/v8/src/runtime/runtime.cc | 1 + deps/v8/src/runtime/runtime.h | 210 +- deps/v8/src/s390/assembler-s390.cc | 30 +- deps/v8/src/s390/assembler-s390.h | 7 +- deps/v8/src/s390/code-stubs-s390.cc | 10 +- deps/v8/src/s390/codegen-s390.cc | 17 +- .../v8/src/s390/interface-descriptors-s390.cc | 14 +- deps/v8/src/s390/macro-assembler-s390.cc | 15 +- deps/v8/src/s390/macro-assembler-s390.h | 26 +- deps/v8/src/safepoint-table.h | 11 +- deps/v8/src/setup-isolate-deserialize.cc | 13 - deps/v8/src/setup-isolate-full.cc | 10 - deps/v8/src/setup-isolate.h | 4 +- .../builtin-deserializer-allocator.cc | 137 +- .../snapshot/builtin-deserializer-allocator.h | 33 +- deps/v8/src/snapshot/builtin-deserializer.cc | 102 +- deps/v8/src/snapshot/builtin-deserializer.h | 11 +- deps/v8/src/snapshot/builtin-serializer.cc | 51 +- deps/v8/src/snapshot/builtin-serializer.h | 16 +- .../v8/src/snapshot/builtin-snapshot-utils.cc | 67 - deps/v8/src/snapshot/builtin-snapshot-utils.h | 56 - .../default-deserializer-allocator.cc | 5 +- .../snapshot/default-deserializer-allocator.h | 2 +- deps/v8/src/snapshot/deserializer.cc | 36 +- deps/v8/src/snapshot/deserializer.h | 5 + deps/v8/src/snapshot/mksnapshot.cc | 97 +- deps/v8/src/snapshot/object-deserializer.cc | 15 + deps/v8/src/snapshot/serializer-common.h | 2 +- deps/v8/src/snapshot/serializer.cc | 36 +- deps/v8/src/snapshot/serializer.h | 1 + deps/v8/src/snapshot/snapshot-common.cc | 57 +- deps/v8/src/snapshot/snapshot-source-sink.h | 6 +- deps/v8/src/snapshot/snapshot.h | 6 - deps/v8/src/snapshot/startup-deserializer.cc | 3 +- deps/v8/src/snapshot/startup-serializer.cc | 9 +- deps/v8/src/snapshot/startup-serializer.h | 7 +- deps/v8/src/splay-tree.h | 10 +- deps/v8/src/string-constants.cc | 188 ++ deps/v8/src/string-constants.h | 112 + deps/v8/src/string-stream.h | 7 +- deps/v8/src/torque/ast.h | 44 +- deps/v8/src/torque/declarable.h | 9 +- deps/v8/src/torque/declaration-visitor.cc | 19 +- deps/v8/src/torque/declaration-visitor.h | 4 +- deps/v8/src/torque/declarations.cc | 10 +- deps/v8/src/torque/declarations.h | 7 +- deps/v8/src/torque/file-visitor.cc | 2 +- deps/v8/src/torque/implementation-visitor.cc | 125 +- deps/v8/src/torque/implementation-visitor.h | 18 +- deps/v8/src/torque/source-positions.h | 2 +- deps/v8/src/torque/torque-parser.cc | 69 +- deps/v8/src/torque/torque.cc | 5 + deps/v8/src/torque/types.cc | 2 +- deps/v8/src/torque/types.h | 4 +- deps/v8/src/torque/utils.cc | 80 + deps/v8/src/torque/utils.h | 24 + deps/v8/src/tracing/trace-event.h | 8 +- deps/v8/src/transitions-inl.h | 22 +- deps/v8/src/transitions.cc | 50 +- deps/v8/src/trap-handler/trap-handler.h | 10 +- deps/v8/src/turbo-assembler.cc | 7 +- deps/v8/src/turbo-assembler.h | 16 +- deps/v8/src/unicode-cache.h | 2 +- deps/v8/src/unicode-decoder.h | 2 +- deps/v8/src/unicode.h | 4 +- deps/v8/src/utils.cc | 26 +- deps/v8/src/utils.h | 244 +- deps/v8/src/v8threads.h | 2 +- deps/v8/src/value-serializer.cc | 8 +- deps/v8/src/vector-slot-pair.cc | 14 +- deps/v8/src/vector-slot-pair.h | 10 +- deps/v8/src/visitors.h | 20 +- deps/v8/src/vm-state.h | 5 +- .../wasm/baseline/arm/liftoff-assembler-arm.h | 34 + .../baseline/arm64/liftoff-assembler-arm64.h | 52 +- .../baseline/ia32/liftoff-assembler-ia32.h | 87 +- .../v8/src/wasm/baseline/liftoff-assembler.cc | 2 +- deps/v8/src/wasm/baseline/liftoff-assembler.h | 31 +- deps/v8/src/wasm/baseline/liftoff-compiler.cc | 113 +- deps/v8/src/wasm/baseline/liftoff-register.h | 17 +- .../baseline/mips/liftoff-assembler-mips.h | 132 +- .../mips64/liftoff-assembler-mips64.h | 79 +- .../wasm/baseline/ppc/liftoff-assembler-ppc.h | 34 + .../baseline/s390/liftoff-assembler-s390.h | 34 + .../wasm/baseline/x64/liftoff-assembler-x64.h | 67 +- deps/v8/src/wasm/decoder.h | 23 +- deps/v8/src/wasm/function-body-decoder-impl.h | 26 +- deps/v8/src/wasm/function-body-decoder.cc | 93 +- deps/v8/src/wasm/function-compiler.cc | 9 +- deps/v8/src/wasm/function-compiler.h | 3 +- deps/v8/src/wasm/module-compiler.cc | 258 +- deps/v8/src/wasm/module-compiler.h | 3 +- deps/v8/src/wasm/module-decoder.cc | 87 +- deps/v8/src/wasm/object-access.h | 36 + deps/v8/src/wasm/value-type.h | 3 + deps/v8/src/wasm/wasm-code-manager.cc | 135 +- deps/v8/src/wasm/wasm-code-manager.h | 50 +- deps/v8/src/wasm/wasm-constants.h | 11 +- deps/v8/src/wasm/wasm-debug.cc | 27 +- deps/v8/src/wasm/wasm-engine.cc | 6 +- deps/v8/src/wasm/wasm-engine.h | 4 +- deps/v8/src/wasm/wasm-interpreter.cc | 95 +- deps/v8/src/wasm/wasm-js.cc | 39 +- deps/v8/src/wasm/wasm-linkage.h | 41 +- deps/v8/src/wasm/wasm-memory.cc | 136 +- deps/v8/src/wasm/wasm-memory.h | 7 +- deps/v8/src/wasm/wasm-module.cc | 31 +- deps/v8/src/wasm/wasm-module.h | 44 +- deps/v8/src/wasm/wasm-objects-inl.h | 15 +- deps/v8/src/wasm/wasm-objects.cc | 126 +- deps/v8/src/wasm/wasm-objects.h | 41 +- deps/v8/src/wasm/wasm-opcodes.h | 3 - deps/v8/src/wasm/wasm-result.cc | 2 +- deps/v8/src/wasm/wasm-serialization.cc | 19 +- deps/v8/src/x64/assembler-x64.cc | 38 +- deps/v8/src/x64/assembler-x64.h | 12 +- deps/v8/src/x64/code-stubs-x64.cc | 20 +- deps/v8/src/x64/codegen-x64.cc | 17 +- deps/v8/src/x64/disasm-x64.cc | 21 +- deps/v8/src/x64/interface-descriptors-x64.cc | 14 +- deps/v8/src/x64/macro-assembler-x64.cc | 25 +- deps/v8/src/x64/macro-assembler-x64.h | 35 +- deps/v8/src/zone/zone-allocator.h | 4 +- deps/v8/src/zone/zone-chunk-list.h | 4 +- deps/v8/src/zone/zone-containers.h | 4 +- deps/v8/src/zone/zone.h | 12 +- deps/v8/test/benchmarks/testcfg.py | 2 +- deps/v8/test/cctest/BUILD.gn | 2 + deps/v8/test/cctest/cctest.cc | 21 +- deps/v8/test/cctest/cctest.h | 23 +- deps/v8/test/cctest/cctest.status | 31 +- deps/v8/test/cctest/compiler/call-tester.h | 4 +- deps/v8/test/cctest/compiler/codegen-tester.h | 6 +- .../cctest/compiler/graph-builder-tester.h | 2 +- .../cctest/compiler/test-branch-combine.cc | 8 +- .../cctest/compiler/test-code-assembler.cc | 2 +- .../cctest/compiler/test-code-generator.cc | 29 +- .../cctest/compiler/test-multiple-return.cc | 13 +- .../compiler/test-representation-change.cc | 151 +- .../test-run-bytecode-graph-builder.cc | 4 +- .../cctest/compiler/test-run-intrinsics.cc | 15 - .../test/cctest/compiler/test-run-machops.cc | 24 +- .../cctest/compiler/test-run-native-calls.cc | 90 +- deps/v8/test/cctest/compiler/value-helper.h | 20 +- deps/v8/test/cctest/heap/heap-utils.cc | 7 +- deps/v8/test/cctest/heap/test-alloc.cc | 74 - .../cctest/heap/test-array-buffer-tracker.cc | 6 +- .../cctest/heap/test-concurrent-marking.cc | 15 +- .../heap/test-external-string-tracker.cc | 6 +- deps/v8/test/cctest/heap/test-heap.cc | 71 +- .../cctest/heap/test-incremental-marking.cc | 2 +- deps/v8/test/cctest/heap/test-lab.cc | 2 +- deps/v8/test/cctest/heap/test-spaces.cc | 100 +- deps/v8/test/cctest/heap/test-unmapper.cc | 2 +- .../test/cctest/heap/test-weak-references.cc | 90 +- .../ArrayLiterals.golden | 150 +- .../AsyncGenerators.golden | 2 +- .../CallAndSpread.golden | 34 +- .../ClassDeclarations.golden | 8 +- .../bytecode_expectations/ForAwaitOf.golden | 8 +- .../bytecode_expectations/ForOf.golden | 8 +- .../bytecode_expectations/ForOfLoop.golden | 16 +- .../bytecode_expectations/Generators.golden | 2 +- .../IIFEWithOneshotOpt.golden | 360 ++- .../IIFEWithoutOneshotOpt.golden | 183 ++ .../bytecode_expectations/NewAndSpread.golden | 34 +- .../ObjectLiterals.golden | 2 +- .../PropertyLoadStoreOneShot.golden | 24 +- .../PublicClassFields.golden | 10 +- .../StaticClassFields.golden | 20 +- .../SuperCallAndSpread.golden | 50 +- .../cctest/interpreter/interpreter-tester.cc | 2 +- .../cctest/interpreter/interpreter-tester.h | 2 +- .../interpreter/test-bytecode-generator.cc | 97 + .../test-interpreter-intrinsics.cc | 34 +- .../cctest/interpreter/test-interpreter.cc | 66 +- .../interpreter/test-source-positions.cc | 2 +- .../v8/test/cctest/libsampler/test-sampler.cc | 2 +- deps/v8/test/cctest/parsing/test-preparser.cc | 90 +- .../cctest/parsing/test-scanner-streams.cc | 184 +- deps/v8/test/cctest/print-extension.h | 4 +- deps/v8/test/cctest/profiler-extension.h | 4 +- deps/v8/test/cctest/scope-test-helper.h | 2 +- .../v8/test/cctest/setup-isolate-for-tests.cc | 9 - deps/v8/test/cctest/setup-isolate-for-tests.h | 4 +- deps/v8/test/cctest/test-accessors.cc | 2 +- deps/v8/test/cctest/test-allocation.cc | 20 +- deps/v8/test/cctest/test-api-accessors.cc | 79 +- deps/v8/test/cctest/test-api-interceptors.cc | 81 +- deps/v8/test/cctest/test-api.cc | 456 +-- deps/v8/test/cctest/test-assembler-arm64.cc | 8 +- deps/v8/test/cctest/test-assembler-mips64.cc | 20 +- deps/v8/test/cctest/test-assembler-x64.cc | 8 +- deps/v8/test/cctest/test-circular-queue.cc | 2 +- .../test/cctest/test-code-stub-assembler.cc | 52 +- deps/v8/test/cctest/test-compiler.cc | 8 +- deps/v8/test/cctest/test-date.cc | 4 +- deps/v8/test/cctest/test-debug.cc | 37 +- deps/v8/test/cctest/test-decls.cc | 26 +- deps/v8/test/cctest/test-deoptimization.cc | 39 +- deps/v8/test/cctest/test-disasm-mips64.cc | 13 - deps/v8/test/cctest/test-disasm-x64.cc | 8 +- deps/v8/test/cctest/test-feedback-vector.cc | 70 +- .../test/cctest/test-field-type-tracking.cc | 31 +- deps/v8/test/cctest/test-heap-profiler.cc | 40 +- deps/v8/test/cctest/test-inspector.cc | 2 +- deps/v8/test/cctest/test-intl.cc | 5 +- deps/v8/test/cctest/test-lockers.cc | 28 +- deps/v8/test/cctest/test-log.cc | 25 +- .../test/cctest/test-macro-assembler-mips.cc | 2 +- .../cctest/test-macro-assembler-mips64.cc | 2 +- deps/v8/test/cctest/test-mementos.cc | 4 +- deps/v8/test/cctest/test-parsing.cc | 864 ++++-- deps/v8/test/cctest/test-platform.cc | 2 +- deps/v8/test/cctest/test-regexp.cc | 8 +- deps/v8/test/cctest/test-roots.cc | 11 +- deps/v8/test/cctest/test-sampler-api.cc | 2 +- deps/v8/test/cctest/test-serialize.cc | 23 +- .../cctest/test-smi-lexicographic-compare.cc | 79 + deps/v8/test/cctest/test-strings.cc | 26 +- .../v8/test/cctest/test-thread-termination.cc | 4 +- deps/v8/test/cctest/test-threads.cc | 2 +- deps/v8/test/cctest/test-trace-event.cc | 4 +- deps/v8/test/cctest/test-usecounters.cc | 21 + deps/v8/test/cctest/test-weakmaps.cc | 6 +- deps/v8/test/cctest/test-weaksets.cc | 6 +- deps/v8/test/cctest/testcfg.py | 12 + deps/v8/test/cctest/torque/test-torque.cc | 12 + deps/v8/test/cctest/trace-extension.h | 4 +- deps/v8/test/cctest/unicode-helpers.cc | 31 + deps/v8/test/cctest/unicode-helpers.h | 27 +- deps/v8/test/cctest/wasm/test-c-wasm-entry.cc | 9 +- .../test/cctest/wasm/test-run-wasm-atomics.cc | 72 +- .../cctest/wasm/test-run-wasm-atomics64.cc | 262 +- .../test/cctest/wasm/test-run-wasm-module.cc | 10 +- .../test/cctest/wasm/test-wasm-breakpoints.cc | 4 +- .../cctest/wasm/test-wasm-shared-engine.cc | 10 +- deps/v8/test/cctest/wasm/test-wasm-stack.cc | 5 +- .../cctest/wasm/test-wasm-trap-position.cc | 6 +- deps/v8/test/cctest/wasm/wasm-atomics-utils.h | 8 + deps/v8/test/cctest/wasm/wasm-run-utils.cc | 20 +- deps/v8/test/cctest/wasm/wasm-run-utils.h | 4 +- deps/v8/test/common/assembler-tester.h | 21 +- deps/v8/test/common/wasm/wasm-macro-gen.h | 1 + .../debug/debug-break-class-fields.js | 139 + ...cursion.js => debug-liveedit-recursion.js} | 0 .../debug/es6/generators-debug-scopes.js | 3 +- ...ebug-evaluate-no-side-effect-builtins-2.js | 2 + .../debug-evaluate-no-side-effect-builtins.js | 2 + ...g-evaluate-no-side-effect-runtime-check.js | 5 +- .../debug-evaluate-no-side-effect.js | 23 +- deps/v8/test/debugger/debugger.status | 19 + .../debugger/regress/regress-crbug-882664.js | 41 + deps/v8/test/debugger/testcfg.py | 2 +- deps/v8/test/fuzzer/multi-return.cc | 4 +- deps/v8/test/fuzzer/wasm-fuzzer-common.h | 2 +- ...-on-exception-compiler-errors-expected.txt | 4 +- .../debugger/es6-module-liveedit-expected.txt | 17 + .../inspector/debugger/es6-module-liveedit.js | 50 + .../es6-module-set-script-source-expected.txt | 8 - .../debugger/es6-module-set-script-source.js | 33 - .../debugger/eval-scopes-expected.txt | 6 + ...ible-breakpoints-class-fields-expected.txt | 206 ++ .../get-possible-breakpoints-class-fields.js | 37 + .../resources/break-locations-class-fields.js | 204 ++ ...kip-variables-with-empty-name-expected.txt | 6 + .../script-on-after-compile-expected.txt | 18 + .../debugger/script-on-after-compile.js | 2 + .../this-in-arrow-function-expected.txt | 6 +- ...wasm-stepping-with-source-map-expected.txt | 308 ++ .../debugger/wasm-stepping-with-source-map.js | 123 + deps/v8/test/inspector/inspector-test.cc | 13 +- deps/v8/test/inspector/inspector.status | 19 +- deps/v8/test/inspector/isolate-data.cc | 2 +- deps/v8/test/inspector/protocol-test.js | 16 +- .../runtime/compile-script-expected.txt | 2 +- .../inspector/runtime/es6-module-expected.txt | 6 + .../runtime/get-properties-expected.txt | 20 + .../get-properties-on-proxy-expected.txt | 12 + .../test/inspector/runtime/get-properties.js | 5 +- .../internal-properties-entries-expected.txt | 108 + .../runtime/internal-properties-expected.txt | 82 +- .../runtime/stable-object-id-expected.txt | 15 + .../inspector/runtime/stable-object-id.js | 87 + .../runtime-remote-object-expected.txt | 18 + deps/v8/test/inspector/task-runner.h | 4 +- deps/v8/test/inspector/testcfg.py | 7 + deps/v8/test/intl/break-iterator/options.js | 13 + deps/v8/test/intl/break-iterator/subclass.js | 29 + .../supported-locales-is-method.js} | 14 +- deps/v8/test/intl/collator/de-sort.js | 14 + deps/v8/test/intl/collator/options.js | 121 + .../intl/date-format/constructor-order.js | 100 + .../intl/date-format/date-format-to-parts.js | 10 +- .../test/intl/date-format/format-is-bound.js | 7 + .../intl/date-format/timezone-conversion.js | 17 + deps/v8/test/intl/intl.status | 10 + .../test/intl/list-format/supported-locale.js | 19 + .../intl/number-format/format-is-bound.js | 6 + .../number-format/resolved-options-unwrap.js | 11 + .../number-format/wont-crash-by-1-or-false.js | 12 + .../test/intl/plural-rules/check-to-number.js | 21 + deps/v8/test/intl/regress-7982.js | 36 + .../format-to-parts-plural.js | 29 + .../relative-time-format/supported-locale.js | 19 + deps/v8/test/intl/testcfg.py | 2 +- .../run.js | 11 +- .../run.js | 13 +- .../run.js | 2 +- deps/v8/test/js-perf-test/JSTests.json | 101 +- .../js-perf-test/Parsing/arrowfunctions.js | 12 +- .../js-perf-test/SpreadCallsGeneral/run.js | 2 +- .../js-perf-test/TurboFan/typedLowering.js | 4 +- .../message/fail/class-fields-computed.js | 9 + .../message/fail/class-fields-computed.out | 5 + .../message/fail/class-fields-static-throw.js | 11 + .../fail/class-fields-static-throw.out | 6 + .../test/message/fail/class-fields-throw.js | 11 + .../test/message/fail/class-fields-throw.out | 7 + .../message/fail/map-arg-non-iterable.out | 4 +- .../message/fail/undefined-keyed-property.out | 4 +- deps/v8/test/message/message.status | 6 + deps/v8/test/message/testcfg.py | 7 +- .../message/wasm-trace-memory-interpreted.js | 2 +- .../test/message/wasm-trace-memory-liftoff.js | 2 +- deps/v8/test/message/wasm-trace-memory.js | 2 +- .../mjsunit/array-functions-prototype-misc.js | 40 +- deps/v8/test/mjsunit/array-splice.js | 18 + deps/v8/test/mjsunit/array-unshift.js | 11 +- .../mjsunit/code-coverage-class-fields.js | 199 ++ .../mjsunit/compiler/abstract-equal-symbol.js | 135 + .../mjsunit/compiler/array-buffer-is-view.js | 64 + .../test/mjsunit/compiler/array-is-array.js | 105 + .../mjsunit/compiler/dataview-constant.js | 173 ++ .../mjsunit/compiler/dataview-neutered.js | 376 +++ .../mjsunit/compiler/dataview-nonconstant.js | 173 ++ deps/v8/test/mjsunit/compiler/instanceof2.js | 233 ++ deps/v8/test/mjsunit/compiler/instanceof3.js | 233 ++ deps/v8/test/mjsunit/compiler/int64.js | 91 + deps/v8/test/mjsunit/compiler/math-imul.js | 76 + deps/v8/test/mjsunit/compiler/math-max.js | 38 + deps/v8/test/mjsunit/compiler/math-min.js | 38 + deps/v8/test/mjsunit/compiler/number-add.js | 62 + .../v8/test/mjsunit/compiler/number-divide.js | 207 ++ .../mjsunit/compiler/number-issafeinteger.js | 22 +- .../test/mjsunit/compiler/number-modulus.js | 125 + .../test/mjsunit/compiler/number-subtract.js | 34 + .../compiler/redundancy-elimination.js | 134 + .../test/mjsunit/compiler/regress-884052.js | 16 + .../mjsunit/compiler/strict-equal-symbol.js | 50 + .../mjsunit/compiler/string-add-try-catch.js | 57 + .../d8-worker-script.js} | 26 +- deps/v8/test/mjsunit/d8/d8-worker-script.txt | 8 + .../mjsunit/d8/d8-worker-sharedarraybuffer.js | 4 +- .../test/mjsunit/d8/d8-worker-spawn-worker.js | 4 +- deps/v8/test/mjsunit/d8/d8-worker.js | 24 +- .../v8/test/mjsunit/es6/array-spread-holey.js | 52 + .../object-spread-ic-dontenum-transition.js | 26 + .../object-spread-ic-multiple-transitions.js | 16 + .../test/mjsunit/external-backing-store-gc.js | 13 + deps/v8/test/mjsunit/for-in-special-cases.js | 72 +- .../v8/test/mjsunit/harmony/atomics-notify.js | 8 + .../mjsunit/harmony/atomics-value-check.js | 2 +- .../mjsunit/harmony/bigint/comparisons.js | 24 - deps/v8/test/mjsunit/harmony/futex.js | 8 +- deps/v8/test/mjsunit/harmony/global.js | 2 +- .../test/mjsunit/harmony/modules-import-13.js | 1 + ...-property-emoji-flag-sequence-generated.js | 266 ++ ...roperty-emoji-keycap-sequence-generated.js | 20 + ...perty-emoji-modifier-sequence-generated.js | 538 ++++ ...p-property-emoji-tag-sequence-generated.js | 11 + ...p-property-emoji-zwj-sequence-generated.js | 782 +++++ .../harmony/regexp-property-invalid.js | 2 + .../harmony/regexp-property-sequence.js | 88 + deps/v8/test/mjsunit/harmony/to-number.js | 15 - deps/v8/test/mjsunit/harmony/to-primitive.js | 54 - .../well-formed-json-stringify-checked.js | 2575 +++++++++++++++++ .../well-formed-json-stringify-unchecked.js | 2575 +++++++++++++++++ .../test/mjsunit/ignition/regress-616064.js | 2 +- deps/v8/test/mjsunit/json.js | 18 +- deps/v8/test/mjsunit/lexicographic-compare.js | 62 - deps/v8/test/mjsunit/mjsunit.js | 10 +- deps/v8/test/mjsunit/mjsunit.status | 53 +- deps/v8/test/mjsunit/regress/regress-3255.js | 2 +- deps/v8/test/mjsunit/regress/regress-4271.js | 2 +- deps/v8/test/mjsunit/regress/regress-4279.js | 2 +- .../v8/test/mjsunit/regress/regress-8133-1.js | 16 + .../v8/test/mjsunit/regress/regress-8133-2.js | 17 + .../v8/test/mjsunit/regress/regress-821368.js | 2 +- .../v8/test/mjsunit/regress/regress-883059.js | 7 + .../mjsunit/regress/regress-crbug-380671.js | 14 - .../mjsunit/regress/regress-crbug-503578.js | 2 +- .../mjsunit/regress/regress-crbug-503698.js | 2 +- .../mjsunit/regress/regress-crbug-503968.js | 2 +- .../mjsunit/regress/regress-crbug-503991.js | 2 +- .../mjsunit/regress/regress-crbug-504136.js | 2 +- .../mjsunit/regress/regress-crbug-504727.js | 2 +- .../mjsunit/regress/regress-crbug-504729.js | 2 +- .../mjsunit/regress/regress-crbug-505778.js | 2 +- .../mjsunit/regress/regress-crbug-506549.js | 2 +- .../mjsunit/regress/regress-crbug-511880.js | 4 +- .../mjsunit/regress/regress-crbug-514081.js | 2 +- .../mjsunit/regress/regress-crbug-518747.js | 2 +- .../mjsunit/regress/regress-crbug-522496.js | 2 +- .../mjsunit/regress/regress-crbug-687063.js | 31 + .../mjsunit/regress/regress-crbug-722871.js | 2 +- .../mjsunit/regress/regress-crbug-876443.js | 9 + .../mjsunit/regress/regress-crbug-878845.js | 11 + .../mjsunit/regress/regress-crbug-879560.js | 14 + .../mjsunit/regress/regress-crbug-879898.js | 12 + .../mjsunit/regress/regress-crbug-880207.js | 13 + .../mjsunit/regress/regress-crbug-882233-1.js | 17 + .../mjsunit/regress/regress-crbug-882233-2.js | 32 + .../mjsunit/regress/regress-crbug-884933.js | 85 + .../mjsunit/regress/regress-crbug-885404.js | 11 + .../mjsunit/regress/regress-crbug-887891.js | 10 + .../test/mjsunit/regress/regress-v8-7682.js | 8 +- .../mjsunit/regress/wasm/regress-801850.js | 2 +- .../mjsunit/regress/wasm/regress-803427.js | 2 +- .../test/mjsunit/regress/wasm/regress-8059.js | 2 +- .../mjsunit/regress/wasm/regress-808012.js | 2 +- .../mjsunit/regress/wasm/regress-808848.js | 2 +- .../test/mjsunit/regress/wasm/regress-8094.js | 30 + .../test/mjsunit/regress/wasm/regress-8095.js | 25 + .../mjsunit/regress/wasm/regress-810973.js | 5 +- .../mjsunit/regress/wasm/regress-810973b.js | 1209 ++++++++ deps/v8/test/mjsunit/samevalue.js | 5 +- .../test/mjsunit/stack-traces-class-fields.js | 246 ++ deps/v8/test/mjsunit/test-async.js | 2 +- deps/v8/test/mjsunit/testcfg.py | 66 +- deps/v8/test/mjsunit/typeof.js | 65 +- deps/v8/test/mjsunit/wasm/anyref.js | 13 +- .../test/mjsunit/wasm/bounds-check-64bit.js | 2 +- .../mjsunit/wasm/compare-exchange-stress.js | 208 ++ deps/v8/test/mjsunit/wasm/data-segments.js | 2 - .../v8/test/mjsunit/wasm/exceptions-export.js | 81 + .../v8/test/mjsunit/wasm/exceptions-import.js | 96 + .../v8/test/mjsunit/wasm/exceptions-shared.js | 158 + deps/v8/test/mjsunit/wasm/exceptions.js | 930 +++--- .../mjsunit/wasm/grow-memory-detaching.js | 2 +- deps/v8/test/mjsunit/wasm/grow-memory.js | 34 +- deps/v8/test/mjsunit/wasm/import-memory.js | 3 + .../mjsunit/wasm/import-mutable-global.js | 2 +- deps/v8/test/mjsunit/wasm/table.js | 6 +- .../mjsunit/wasm/trap-handler-fallback.js | 21 +- deps/v8/test/mjsunit/wasm/unicode.js | 2 +- deps/v8/test/mjsunit/wasm/wasm-constants.js | 28 +- .../test/mjsunit/wasm/wasm-module-builder.js | 66 +- .../test/mjsunit/wasm/worker-interpreter.js | 2 +- deps/v8/test/mjsunit/wasm/worker-memory.js | 9 +- deps/v8/test/mjsunit/wasm/worker-module.js | 2 +- deps/v8/test/mkgrokdump/mkgrokdump.cc | 50 +- deps/v8/test/mozilla/mozilla.status | 15 + deps/v8/test/mozilla/testcfg.py | 2 +- deps/v8/test/preparser/testcfg.py | 2 +- deps/v8/test/test262/harness-agent.js | 2 +- deps/v8/test/test262/test262.status | 248 +- deps/v8/test/test262/testcfg.py | 13 +- deps/v8/test/torque/test-torque.tq | 140 +- deps/v8/test/unittests/BUILD.gn | 6 + deps/v8/test/unittests/allocation-unittest.cc | 41 +- .../unittests/api/interceptor-unittest.cc | 2 +- .../v8/test/unittests/api/isolate-unittest.cc | 69 + .../unittests/asmjs/asm-types-unittest.cc | 4 +- .../unittests/base/address-region-unittest.cc | 64 + .../unittests/base/functional-unittest.cc | 2 +- .../platform/condition-variable-unittest.cc | 4 +- .../base/platform/platform-unittest.cc | 2 +- .../base/region-allocator-unittest.cc | 326 +++ .../unittests/cancelable-tasks-unittest.cc | 2 +- .../unittests/code-stub-assembler-unittest.h | 4 +- .../compiler-dispatcher-unittest.cc | 590 ++-- .../unoptimized-compile-job-unittest.cc | 190 +- .../compiler/bytecode-analysis-unittest.cc | 4 +- .../checkpoint-elimination-unittest.cc | 2 +- .../compiler/code-assembler-unittest.h | 4 +- .../common-operator-reducer-unittest.cc | 2 +- .../compiler/common-operator-unittest.cc | 2 +- .../constant-folding-reducer-unittest.cc | 2 +- .../control-flow-optimizer-unittest.cc | 2 +- .../dead-code-elimination-unittest.cc | 2 +- .../compiler/graph-reducer-unittest.cc | 2 +- .../test/unittests/compiler/graph-unittest.cc | 15 +- .../test/unittests/compiler/graph-unittest.h | 4 +- .../compiler/instruction-selector-unittest.cc | 2 +- .../compiler/instruction-unittest.cc | 4 +- .../compiler/js-call-reducer-unittest.cc | 19 +- .../compiler/js-create-lowering-unittest.cc | 12 +- .../js-intrinsic-lowering-unittest.cc | 2 +- ...-native-context-specialization-unittest.cc | 48 + .../compiler/js-typed-lowering-unittest.cc | 9 +- .../compiler/load-elimination-unittest.cc | 2 +- .../compiler/loop-peeling-unittest.cc | 2 +- .../machine-operator-reducer-unittest.cc | 76 +- .../unittests/compiler/node-cache-unittest.cc | 8 +- .../compiler/node-matchers-unittest.cc | 468 +-- .../unittests/compiler/node-test-utils.cc | 48 +- .../test/unittests/compiler/node-test-utils.h | 16 +- .../redundancy-elimination-unittest.cc | 945 ++++++ .../compiler/scheduler-rpo-unittest.cc | 2 +- .../compiler/simplified-lowering-unittest.cc | 2 +- .../simplified-operator-reducer-unittest.cc | 4 +- .../compiler/typed-optimization-unittest.cc | 2 +- .../test/unittests/compiler/typer-unittest.cc | 1 - deps/v8/test/unittests/counters-unittest.cc | 4 +- .../v8/test/unittests/heap/bitmap-unittest.cc | 2 +- .../heap/embedder-tracing-unittest.cc | 19 - .../heap/gc-idle-time-handler-unittest.cc | 4 +- .../test/unittests/heap/gc-tracer-unittest.cc | 16 +- .../heap/heap-controller-unittest.cc | 60 +- deps/v8/test/unittests/heap/heap-unittest.cc | 12 + .../heap/item-parallel-job-unittest.cc | 8 +- .../v8/test/unittests/heap/spaces-unittest.cc | 4 +- .../bytecode-array-builder-unittest.cc | 8 +- .../bytecode-array-iterator-unittest.cc | 4 +- ...bytecode-array-random-iterator-unittest.cc | 4 +- .../bytecode-array-writer-unittest.cc | 2 +- .../interpreter/bytecode-decoder-unittest.cc | 4 +- .../bytecode-register-allocator-unittest.cc | 2 +- .../bytecode-register-optimizer-unittest.cc | 2 +- .../constant-array-builder-unittest.cc | 4 +- .../interpreter-assembler-unittest.h | 4 +- .../libplatform/default-platform-unittest.cc | 10 +- .../libplatform/worker-thread-unittest.cc | 3 +- deps/v8/test/unittests/object-unittest.cc | 4 +- .../objects/microtask-queue-unittest.cc | 55 + .../unittests/parser/preparser-unittest.cc | 2 +- .../register-configuration-unittest.cc | 4 +- deps/v8/test/unittests/run-all-unittests.cc | 6 +- .../source-position-table-unittest.cc | 4 +- deps/v8/test/unittests/test-helpers.cc | 32 +- deps/v8/test/unittests/test-helpers.h | 2 + deps/v8/test/unittests/test-utils.cc | 39 +- deps/v8/test/unittests/test-utils.h | 24 +- deps/v8/test/unittests/unittests.status | 5 + deps/v8/test/unittests/utils/threaded-list.cc | 309 ++ .../unittests/value-serializer-unittest.cc | 2 +- .../test/unittests/wasm/decoder-unittest.cc | 2 +- .../wasm/function-body-decoder-unittest.cc | 53 +- .../unittests/wasm/module-decoder-unittest.cc | 128 +- .../unittests/wasm/trap-handler-unittest.cc | 4 +- .../wasm/wasm-module-builder-unittest.cc | 2 +- deps/v8/test/wasm-spec-tests/testcfg.py | 2 +- deps/v8/test/webkit/array-splice.js | 5 - .../fast/js/toString-overrides-expected.txt | 4 +- deps/v8/test/webkit/testcfg.py | 2 +- deps/v8/test/webkit/webkit.status | 4 +- .../src/googletest/include/gtest/gtest_prod.h | 3 +- deps/v8/third_party/v8/builtins/array-sort.tq | 960 +++--- deps/v8/tools/BUILD.gn | 4 +- deps/v8/tools/blink_tests/TestExpectations | 2 + deps/v8/tools/callstats.html | 7 +- deps/v8/tools/callstats.py | 37 +- deps/v8/tools/gen-postmortem-metadata.py | 11 +- .../tools/generate-header-include-checks.py | 1 - deps/v8/tools/grokdump.py | 6 +- deps/v8/tools/heap-stats/categories.js | 8 +- deps/v8/tools/node/fetch_deps.py | 7 +- deps/v8/tools/node/update_node.py | 14 +- deps/v8/tools/perf-to-html.py | 381 --- deps/v8/tools/presubmit.py | 2 +- deps/v8/tools/profile.js | 2 +- deps/v8/tools/profview/index.html | 6 +- deps/v8/tools/profview/profile-utils.js | 62 +- deps/v8/tools/profview/profview.css | 53 + deps/v8/tools/profview/profview.js | 277 +- deps/v8/tools/release/create_release.py | 2 +- deps/v8/tools/release/filter_build_files.py | 2 +- deps/v8/tools/release/git_recipes.py | 8 +- deps/v8/tools/release/test_scripts.py | 2 +- deps/v8/tools/run-clang-tidy.py | 400 +++ deps/v8/tools/run_perf.py | 4 +- .../v8/tools/sanitizers/tsan_suppressions.txt | 4 + deps/v8/tools/test262-results-parser.js | 41 + deps/v8/tools/testrunner/base_runner.py | 9 +- deps/v8/tools/testrunner/local/android.py | 7 +- deps/v8/tools/testrunner/local/command.py | 2 +- .../tools/testrunner/objects/predictable.py | 3 +- deps/v8/tools/testrunner/objects/testcase.py | 90 +- .../syntaxes/torque.tmLanguage.json | 4 +- deps/v8/tools/try_perf.py | 2 + deps/v8/tools/turbolizer/README.md | 4 +- deps/v8/tools/turbolizer/rollup.config.js | 5 +- .../testdata/expected_test_results1.json | 24 +- .../testdata/testroot1/test/sweet/testcfg.py | 2 +- deps/v8/tools/v8heapconst.py | 452 +-- deps/v8/tools/whitespace.txt | 2 +- 1277 files changed, 55674 insertions(+), 30558 deletions(-) create mode 100644 deps/v8/.clang-tidy create mode 100644 deps/v8/include/v8-internal.h create mode 100644 deps/v8/src/allocation-site-scopes-inl.h create mode 100644 deps/v8/src/ast/scopes-inl.h create mode 100644 deps/v8/src/base/address-region.h create mode 100644 deps/v8/src/base/bounded-page-allocator.cc create mode 100644 deps/v8/src/base/bounded-page-allocator.h create mode 100644 deps/v8/src/base/lsan-page-allocator.cc create mode 100644 deps/v8/src/base/lsan-page-allocator.h create mode 100644 deps/v8/src/base/region-allocator.cc create mode 100644 deps/v8/src/base/region-allocator.h create mode 100644 deps/v8/src/builtins/array-lastindexof.tq create mode 100644 deps/v8/src/builtins/array-splice.tq create mode 100644 deps/v8/src/builtins/array-unshift.tq delete mode 100644 deps/v8/src/builtins/builtins-intl.h create mode 100644 deps/v8/src/builtins/generate-bytecodes-builtins-list.cc delete mode 100644 deps/v8/src/interpreter/setup-interpreter-internal.cc delete mode 100644 deps/v8/src/interpreter/setup-interpreter.h create mode 100644 deps/v8/src/objects/allocation-site-inl.h create mode 100644 deps/v8/src/objects/allocation-site.h create mode 100644 deps/v8/src/objects/js-break-iterator-inl.h create mode 100644 deps/v8/src/objects/js-break-iterator.cc create mode 100644 deps/v8/src/objects/js-break-iterator.h create mode 100644 deps/v8/src/objects/js-date-time-format-inl.h create mode 100644 deps/v8/src/objects/js-date-time-format.cc create mode 100644 deps/v8/src/objects/js-date-time-format.h create mode 100644 deps/v8/src/objects/js-number-format-inl.h create mode 100644 deps/v8/src/objects/js-number-format.cc create mode 100644 deps/v8/src/objects/js-number-format.h create mode 100644 deps/v8/src/objects/js-objects-inl.h create mode 100644 deps/v8/src/objects/js-objects.h create mode 100644 deps/v8/src/objects/microtask-queue-inl.h create mode 100644 deps/v8/src/objects/microtask-queue.cc create mode 100644 deps/v8/src/objects/microtask-queue.h create mode 100644 deps/v8/src/parsing/preparsed-scope-data-impl.h create mode 100644 deps/v8/src/regexp/property-sequences.cc create mode 100644 deps/v8/src/regexp/property-sequences.h delete mode 100644 deps/v8/src/snapshot/builtin-snapshot-utils.cc delete mode 100644 deps/v8/src/snapshot/builtin-snapshot-utils.h create mode 100644 deps/v8/src/string-constants.cc create mode 100644 deps/v8/src/string-constants.h create mode 100644 deps/v8/src/wasm/object-access.h create mode 100644 deps/v8/test/cctest/test-smi-lexicographic-compare.cc create mode 100644 deps/v8/test/cctest/unicode-helpers.cc create mode 100644 deps/v8/test/debugger/debug/debug-break-class-fields.js rename deps/v8/test/debugger/debug/{debug-live-edit-recursion.js => debug-liveedit-recursion.js} (100%) create mode 100644 deps/v8/test/debugger/regress/regress-crbug-882664.js create mode 100644 deps/v8/test/inspector/debugger/es6-module-liveedit-expected.txt create mode 100644 deps/v8/test/inspector/debugger/es6-module-liveedit.js delete mode 100644 deps/v8/test/inspector/debugger/es6-module-set-script-source-expected.txt delete mode 100644 deps/v8/test/inspector/debugger/es6-module-set-script-source.js create mode 100644 deps/v8/test/inspector/debugger/get-possible-breakpoints-class-fields-expected.txt create mode 100644 deps/v8/test/inspector/debugger/get-possible-breakpoints-class-fields.js create mode 100644 deps/v8/test/inspector/debugger/resources/break-locations-class-fields.js create mode 100644 deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt create mode 100644 deps/v8/test/inspector/debugger/wasm-stepping-with-source-map.js create mode 100644 deps/v8/test/inspector/runtime/stable-object-id-expected.txt create mode 100644 deps/v8/test/inspector/runtime/stable-object-id.js create mode 100644 deps/v8/test/intl/break-iterator/options.js create mode 100644 deps/v8/test/intl/break-iterator/subclass.js rename deps/v8/test/{mjsunit/regress/regress-2185.js => intl/break-iterator/supported-locales-is-method.js} (86%) create mode 100644 deps/v8/test/intl/collator/options.js create mode 100644 deps/v8/test/intl/date-format/constructor-order.js create mode 100644 deps/v8/test/intl/date-format/timezone-conversion.js create mode 100644 deps/v8/test/intl/list-format/supported-locale.js create mode 100644 deps/v8/test/intl/number-format/resolved-options-unwrap.js create mode 100644 deps/v8/test/intl/number-format/wont-crash-by-1-or-false.js create mode 100644 deps/v8/test/intl/plural-rules/check-to-number.js create mode 100644 deps/v8/test/intl/regress-7982.js create mode 100644 deps/v8/test/intl/relative-time-format/format-to-parts-plural.js create mode 100644 deps/v8/test/intl/relative-time-format/supported-locale.js rename deps/v8/test/js-perf-test/{ArrayLiteralInitialSpreadLarge => ArrayLiteralInitialSpreadLargePacked}/run.js (92%) rename deps/v8/test/js-perf-test/{ArrayLiteralInitialSpreadSmall => ArrayLiteralInitialSpreadSmallPacked}/run.js (98%) create mode 100644 deps/v8/test/message/fail/class-fields-computed.js create mode 100644 deps/v8/test/message/fail/class-fields-computed.out create mode 100644 deps/v8/test/message/fail/class-fields-static-throw.js create mode 100644 deps/v8/test/message/fail/class-fields-static-throw.out create mode 100644 deps/v8/test/message/fail/class-fields-throw.js create mode 100644 deps/v8/test/message/fail/class-fields-throw.out create mode 100644 deps/v8/test/mjsunit/code-coverage-class-fields.js create mode 100644 deps/v8/test/mjsunit/compiler/abstract-equal-symbol.js create mode 100644 deps/v8/test/mjsunit/compiler/array-buffer-is-view.js create mode 100644 deps/v8/test/mjsunit/compiler/array-is-array.js create mode 100644 deps/v8/test/mjsunit/compiler/dataview-constant.js create mode 100644 deps/v8/test/mjsunit/compiler/dataview-neutered.js create mode 100644 deps/v8/test/mjsunit/compiler/dataview-nonconstant.js create mode 100644 deps/v8/test/mjsunit/compiler/instanceof2.js create mode 100644 deps/v8/test/mjsunit/compiler/instanceof3.js create mode 100644 deps/v8/test/mjsunit/compiler/int64.js create mode 100644 deps/v8/test/mjsunit/compiler/math-imul.js create mode 100644 deps/v8/test/mjsunit/compiler/math-max.js create mode 100644 deps/v8/test/mjsunit/compiler/math-min.js create mode 100644 deps/v8/test/mjsunit/compiler/number-add.js create mode 100644 deps/v8/test/mjsunit/compiler/number-divide.js create mode 100644 deps/v8/test/mjsunit/compiler/number-modulus.js create mode 100644 deps/v8/test/mjsunit/compiler/number-subtract.js create mode 100644 deps/v8/test/mjsunit/compiler/redundancy-elimination.js create mode 100644 deps/v8/test/mjsunit/compiler/regress-884052.js create mode 100644 deps/v8/test/mjsunit/compiler/strict-equal-symbol.js rename deps/v8/test/mjsunit/{regress/regress-splice-large-index.js => d8/d8-worker-script.js} (75%) create mode 100644 deps/v8/test/mjsunit/d8/d8-worker-script.txt create mode 100644 deps/v8/test/mjsunit/es6/array-spread-holey.js create mode 100644 deps/v8/test/mjsunit/es9/object-spread-ic-dontenum-transition.js create mode 100644 deps/v8/test/mjsunit/es9/object-spread-ic-multiple-transitions.js create mode 100644 deps/v8/test/mjsunit/external-backing-store-gc.js create mode 100644 deps/v8/test/mjsunit/harmony/atomics-notify.js create mode 100644 deps/v8/test/mjsunit/harmony/regexp-property-emoji-flag-sequence-generated.js create mode 100644 deps/v8/test/mjsunit/harmony/regexp-property-emoji-keycap-sequence-generated.js create mode 100644 deps/v8/test/mjsunit/harmony/regexp-property-emoji-modifier-sequence-generated.js create mode 100644 deps/v8/test/mjsunit/harmony/regexp-property-emoji-tag-sequence-generated.js create mode 100644 deps/v8/test/mjsunit/harmony/regexp-property-emoji-zwj-sequence-generated.js create mode 100644 deps/v8/test/mjsunit/harmony/regexp-property-sequence.js delete mode 100644 deps/v8/test/mjsunit/harmony/to-primitive.js create mode 100644 deps/v8/test/mjsunit/harmony/well-formed-json-stringify-checked.js create mode 100644 deps/v8/test/mjsunit/harmony/well-formed-json-stringify-unchecked.js delete mode 100644 deps/v8/test/mjsunit/lexicographic-compare.js create mode 100644 deps/v8/test/mjsunit/regress/regress-8133-1.js create mode 100644 deps/v8/test/mjsunit/regress/regress-8133-2.js create mode 100644 deps/v8/test/mjsunit/regress/regress-883059.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-380671.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-687063.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-876443.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-878845.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-879560.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-879898.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-880207.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-882233-1.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-882233-2.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-884933.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-885404.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-887891.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-8094.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-8095.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-810973b.js create mode 100644 deps/v8/test/mjsunit/stack-traces-class-fields.js create mode 100644 deps/v8/test/mjsunit/wasm/compare-exchange-stress.js create mode 100644 deps/v8/test/mjsunit/wasm/exceptions-export.js create mode 100644 deps/v8/test/mjsunit/wasm/exceptions-import.js create mode 100644 deps/v8/test/mjsunit/wasm/exceptions-shared.js create mode 100644 deps/v8/test/unittests/base/address-region-unittest.cc create mode 100644 deps/v8/test/unittests/base/region-allocator-unittest.cc create mode 100644 deps/v8/test/unittests/compiler/js-native-context-specialization-unittest.cc create mode 100644 deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc create mode 100644 deps/v8/test/unittests/objects/microtask-queue-unittest.cc create mode 100644 deps/v8/test/unittests/utils/threaded-list.cc delete mode 100755 deps/v8/tools/perf-to-html.py create mode 100755 deps/v8/tools/run-clang-tidy.py create mode 100644 deps/v8/tools/test262-results-parser.js diff --git a/deps/v8/.clang-tidy b/deps/v8/.clang-tidy new file mode 100644 index 00000000000000..31d7ddc750011f --- /dev/null +++ b/deps/v8/.clang-tidy @@ -0,0 +1,20 @@ +--- +--- + Checks: '-*, + modernize-redundant-void-arg, + modernize-replace-random-shuffle, + modernize-shrink-to-fit, + modernize-use-auto, + modernize-use-bool-literals, + modernize-use-equals-default, + modernize-use-equals-delete, + modernize-use-nullptr, + modernize-use-override, + google-build-explicit-make-pair, + google-explicit-constructor, + google-readability-casting' +WarningsAsErrors: '' +HeaderFilterRegex: '' +AnalyzeTemporaryDtors: false +... + diff --git a/deps/v8/.gitattributes b/deps/v8/.gitattributes index 7ef1e1b74b01e0..b3e9762a9392a1 100644 --- a/deps/v8/.gitattributes +++ b/deps/v8/.gitattributes @@ -3,3 +3,5 @@ # Do not modify line endings for binary files (which are sometimes auto # detected as text files by git). *.png binary +# Don't include minified JS in git grep/diff output +test/mjsunit/asm/sqlite3/*.js -diff diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore index 511e24d90c740f..7f09c89e360f8e 100644 --- a/deps/v8/.gitignore +++ b/deps/v8/.gitignore @@ -73,7 +73,6 @@ /tools/clang /tools/gcmole/gcmole-tools /tools/gcmole/gcmole-tools.tar.gz -/tools/gyp /tools/jsfunfuzz/jsfunfuzz /tools/jsfunfuzz/jsfunfuzz.tar.gz /tools/luci-go diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 898bc8feaecd08..b935565945dad7 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -132,6 +132,7 @@ Peter Rybin Peter Varga Peter Wong Paul Lind +PhistucK Qingyan Li Qiuyi Zhang Rafal Krypa @@ -162,6 +163,7 @@ Vladimir Krivosheev Vladimir Shutoff Wiktor Garbacz Xiaoyin Liu +Yannic Bonenberger Yong Wang Yu Yin Zac Hansen diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index c6a58776cd6a81..46ebd73fba9f99 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -63,7 +63,7 @@ declare_args() { v8_enable_vtunejit = false # Sets -dENABLE_HANDLE_ZAPPING. - v8_enable_handle_zapping = is_debug + v8_enable_handle_zapping = true # Enable slow dchecks. v8_enable_slow_dchecks = false @@ -76,9 +76,6 @@ declare_args() { v8_enable_embedded_builtins = v8_use_snapshot && v8_current_cpu != "x86" && !is_aix && (!is_win || is_clang) - # Enable embedded bytecode handlers. - v8_enable_embedded_bytecode_handlers = false - # Enable code-generation-time checking of types in the CodeStubAssembler. v8_enable_verify_csa = false @@ -164,6 +161,17 @@ declare_args() { # setting the "check_v8_header_includes" gclient variable to run a # specific hook). v8_check_header_includes = false + + # We reuse the snapshot toolchain for building torque and other generators to + # avoid building v8_libbase on the host more than once. On mips with big + # endian, the snapshot toolchain is the target toolchain and, hence, can't be + # used. +} + +v8_generator_toolchain = v8_snapshot_toolchain +if (host_cpu == "x64" && + (v8_current_cpu == "mips" || v8_current_cpu == "mips64")) { + v8_generator_toolchain = "//build/toolchain/linux:clang_x64" } # Derived defaults. @@ -197,9 +205,6 @@ assert( !v8_untrusted_code_mitigations, "Embedded builtins on ia32 and untrusted code mitigations are incompatible") -assert(!v8_enable_embedded_bytecode_handlers || v8_enable_embedded_builtins, - "Embedded bytecode handlers only work with embedded builtins") - # Specifies if the target build is a simulator build. Comparing target cpu # with v8 target cpu to not affect simulator builds for making cross-compile # snapshots. @@ -377,10 +382,10 @@ config("features") { defines += [ "V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY" ] } if (v8_enable_embedded_builtins) { - defines += [ "V8_EMBEDDED_BUILTINS" ] - } - if (v8_enable_embedded_bytecode_handlers) { - defines += [ "V8_EMBEDDED_BYTECODE_HANDLERS" ] + defines += [ + "V8_EMBEDDED_BUILTINS", + "V8_EMBEDDED_BYTECODE_HANDLERS", + ] } if (v8_use_multi_snapshots) { defines += [ "V8_MULTI_SNAPSHOTS" ] @@ -849,6 +854,8 @@ action("postmortem-metadata") { sources = [ "src/objects.h", "src/objects-inl.h", + "src/objects/allocation-site-inl.h", + "src/objects/allocation-site.h", "src/objects/code-inl.h", "src/objects/code.h", "src/objects/data-handler.h", @@ -859,12 +866,16 @@ action("postmortem-metadata") { "src/objects/js-array.h", "src/objects/js-array-buffer-inl.h", "src/objects/js-array-buffer.h", + "src/objects/js-objects-inl.h", + "src/objects/js-objects.h", "src/objects/js-regexp-inl.h", "src/objects/js-regexp.h", "src/objects/js-regexp-string-iterator-inl.h", "src/objects/js-regexp-string-iterator.h", "src/objects/map.h", "src/objects/map-inl.h", + "src/objects/name.h", + "src/objects/name-inl.h", "src/objects/scope-info.h", "src/objects/script.h", "src/objects/script-inl.h", @@ -887,7 +898,10 @@ torque_files = [ "src/builtins/array.tq", "src/builtins/array-copywithin.tq", "src/builtins/array-foreach.tq", + "src/builtins/array-lastindexof.tq", "src/builtins/array-reverse.tq", + "src/builtins/array-splice.tq", + "src/builtins/array-unshift.tq", "src/builtins/typed-array.tq", "src/builtins/data-view.tq", "test/torque/test-torque.tq", @@ -909,17 +923,8 @@ action("run_torque") { "test/cctest/:*", ] - # We reuse the snapshot toolchain for building torque to not build v8_libbase - # on the host more than once. On mips with big endian, the snapshot toolchain - # is the target toolchain and, hence, can't be used. - v8_torque_toolchain = v8_snapshot_toolchain - if (host_cpu == "x64" && - (v8_current_cpu == "mips" || v8_current_cpu == "mips64")) { - v8_torque_toolchain = "//build/toolchain/linux:clang_x64" - } - deps = [ - ":torque($v8_torque_toolchain)", + ":torque($v8_generator_toolchain)", ] script = "tools/run.py" @@ -937,7 +942,7 @@ action("run_torque") { } args = [ - "./" + rebase_path(get_label_info(":torque($v8_torque_toolchain)", + "./" + rebase_path(get_label_info(":torque($v8_generator_toolchain)", "root_out_dir") + "/torque", root_build_dir), "-o", @@ -967,6 +972,7 @@ v8_source_set("torque_generated_initializers") { visibility = [ ":*" ] # Only targets in this file can depend on this. deps = [ + ":generate_bytecode_builtins_list", ":run_torque", ] @@ -987,6 +993,24 @@ v8_source_set("torque_generated_initializers") { configs = [ ":internal_config" ] } +action("generate_bytecode_builtins_list") { + script = "tools/run.py" + outputs = [ + "$target_gen_dir/builtins-generated/bytecodes-builtins-list.h", + ] + deps = [ + ":bytecode_builtins_list_generator($v8_generator_toolchain)", + ] + args = [ + "./" + rebase_path( + get_label_info( + ":bytecode_builtins_list_generator($v8_generator_toolchain)", + "root_out_dir") + "/bytecode_builtins_list_generator", + root_build_dir), + rebase_path("$target_gen_dir/builtins-generated/bytecodes-builtins-list.h"), + ] +} + # Template to generate different V8 snapshots based on different runtime flags. # Can be invoked with run_mksnapshot(). The target will resolve to # run_mksnapshot_. If is "default", no file suffixes will be used. @@ -1380,8 +1404,6 @@ v8_source_set("v8_initializers") { "src/interpreter/interpreter-generator.h", "src/interpreter/interpreter-intrinsics-generator.cc", "src/interpreter/interpreter-intrinsics-generator.h", - "src/interpreter/setup-interpreter-internal.cc", - "src/interpreter/setup-interpreter.h", ] if (use_jumbo_build == true) { @@ -1483,6 +1505,7 @@ v8_header_set("v8_headers") { configs = [ ":internal_config" ] sources = [ + "include/v8-internal.h", "include/v8.h", "include/v8config.h", ] @@ -1502,8 +1525,10 @@ v8_source_set("v8_base") { "//base/trace_event/common/trace_event_common.h", ### gcmole(all) ### + "$target_gen_dir/builtins-generated/bytecodes-builtins-list.h", "include/v8-inspector-protocol.h", "include/v8-inspector.h", + "include/v8-internal.h", "include/v8-platform.h", "include/v8-profiler.h", "include/v8-testing.h", @@ -1514,6 +1539,7 @@ v8_source_set("v8_base") { "src/accessors.h", "src/address-map.cc", "src/address-map.h", + "src/allocation-site-scopes-inl.h", "src/allocation-site-scopes.h", "src/allocation.cc", "src/allocation.h", @@ -1560,6 +1586,7 @@ v8_source_set("v8_base") { "src/ast/modules.h", "src/ast/prettyprinter.cc", "src/ast/prettyprinter.h", + "src/ast/scopes-inl.h", "src/ast/scopes.cc", "src/ast/scopes.h", "src/ast/variables.cc", @@ -1597,7 +1624,6 @@ v8_source_set("v8_base") { "src/builtins/builtins-internal.cc", "src/builtins/builtins-interpreter.cc", "src/builtins/builtins-intl.cc", - "src/builtins/builtins-intl.h", "src/builtins/builtins-json.cc", "src/builtins/builtins-math.cc", "src/builtins/builtins-number.cc", @@ -2179,11 +2205,17 @@ v8_source_set("v8_base") { "src/objects/js-array-buffer.h", "src/objects/js-array-inl.h", "src/objects/js-array.h", + "src/objects/js-break-iterator-inl.h", + "src/objects/js-break-iterator.cc", + "src/objects/js-break-iterator.h", "src/objects/js-collator-inl.h", "src/objects/js-collator.cc", "src/objects/js-collator.h", "src/objects/js-collection-inl.h", "src/objects/js-collection.h", + "src/objects/js-date-time-format-inl.h", + "src/objects/js-date-time-format.cc", + "src/objects/js-date-time-format.h", "src/objects/js-generator-inl.h", "src/objects/js-generator.h", "src/objects/js-list-format-inl.h", @@ -2192,6 +2224,11 @@ v8_source_set("v8_base") { "src/objects/js-locale-inl.h", "src/objects/js-locale.cc", "src/objects/js-locale.h", + "src/objects/js-number-format-inl.h", + "src/objects/js-number-format.cc", + "src/objects/js-number-format.h", + "src/objects/js-objects-inl.h", + "src/objects/js-objects.h", "src/objects/js-plural-rules-inl.h", "src/objects/js-plural-rules.cc", "src/objects/js-plural-rules.h", @@ -2216,6 +2253,9 @@ v8_source_set("v8_base") { "src/objects/maybe-object-inl.h", "src/objects/maybe-object.h", "src/objects/microtask-inl.h", + "src/objects/microtask-queue-inl.h", + "src/objects/microtask-queue.cc", + "src/objects/microtask-queue.h", "src/objects/microtask.h", "src/objects/module-inl.h", "src/objects/module.cc", @@ -2265,6 +2305,7 @@ v8_source_set("v8_base") { "src/parsing/parsing.cc", "src/parsing/parsing.h", "src/parsing/pattern-rewriter.cc", + "src/parsing/preparsed-scope-data-impl.h", "src/parsing/preparsed-scope-data.cc", "src/parsing/preparsed-scope-data.h", "src/parsing/preparser-logger.h", @@ -2321,6 +2362,8 @@ v8_source_set("v8_base") { "src/regexp/jsregexp-inl.h", "src/regexp/jsregexp.cc", "src/regexp/jsregexp.h", + "src/regexp/property-sequences.cc", + "src/regexp/property-sequences.h", "src/regexp/regexp-ast.cc", "src/regexp/regexp-ast.h", "src/regexp/regexp-macro-assembler-irregexp-inl.h", @@ -2393,8 +2436,6 @@ v8_source_set("v8_base") { "src/snapshot/builtin-serializer-allocator.h", "src/snapshot/builtin-serializer.cc", "src/snapshot/builtin-serializer.h", - "src/snapshot/builtin-snapshot-utils.cc", - "src/snapshot/builtin-snapshot-utils.h", "src/snapshot/code-serializer.cc", "src/snapshot/code-serializer.h", "src/snapshot/default-deserializer-allocator.cc", @@ -2437,6 +2478,8 @@ v8_source_set("v8_base") { "src/string-builder.cc", "src/string-case.cc", "src/string-case.h", + "src/string-constants.cc", + "src/string-constants.h", "src/string-hasher-inl.h", "src/string-hasher.h", "src/string-search.h", @@ -2516,6 +2559,7 @@ v8_source_set("v8_base") { "src/wasm/module-compiler.h", "src/wasm/module-decoder.cc", "src/wasm/module-decoder.h", + "src/wasm/object-access.h", "src/wasm/signature-map.cc", "src/wasm/signature-map.h", "src/wasm/streaming-decoder.cc", @@ -2867,6 +2911,7 @@ v8_source_set("v8_base") { defines = [] deps = [ + ":generate_bytecode_builtins_list", ":torque_generated_core", ":v8_headers", ":v8_libbase", @@ -2884,22 +2929,30 @@ v8_source_set("v8_base") { } else { sources -= [ "src/builtins/builtins-intl.cc", - "src/builtins/builtins-intl.h", "src/char-predicates.cc", "src/intl.cc", "src/intl.h", "src/objects/intl-objects-inl.h", "src/objects/intl-objects.cc", "src/objects/intl-objects.h", + "src/objects/js-break-iterator-inl.h", + "src/objects/js-break-iterator.cc", + "src/objects/js-break-iterator.h", "src/objects/js-collator-inl.h", "src/objects/js-collator.cc", "src/objects/js-collator.h", + "src/objects/js-date-time-format-inl.h", + "src/objects/js-date-time-format.cc", + "src/objects/js-date-time-format.h", "src/objects/js-list-format-inl.h", "src/objects/js-list-format.cc", "src/objects/js-list-format.h", "src/objects/js-locale-inl.h", "src/objects/js-locale.cc", "src/objects/js-locale.h", + "src/objects/js-number-format-inl.h", + "src/objects/js-number-format.cc", + "src/objects/js-number-format.h", "src/objects/js-plural-rules-inl.h", "src/objects/js-plural-rules.cc", "src/objects/js-plural-rules.h", @@ -2959,6 +3012,7 @@ v8_source_set("torque_base") { v8_component("v8_libbase") { sources = [ "src/base/adapters.h", + "src/base/address-region.h", "src/base/atomic-utils.h", "src/base/atomicops.h", "src/base/atomicops_internals_atomicword_compat.h", @@ -2967,6 +3021,8 @@ v8_component("v8_libbase") { "src/base/base-export.h", "src/base/bits.cc", "src/base/bits.h", + "src/base/bounded-page-allocator.cc", + "src/base/bounded-page-allocator.h", "src/base/build_config.h", "src/base/compiler-specific.h", "src/base/cpu.cc", @@ -2992,6 +3048,8 @@ v8_component("v8_libbase") { "src/base/list.h", "src/base/logging.cc", "src/base/logging.h", + "src/base/lsan-page-allocator.cc", + "src/base/lsan-page-allocator.h", "src/base/macros.h", "src/base/once.cc", "src/base/once.h", @@ -3008,6 +3066,8 @@ v8_component("v8_libbase") { "src/base/platform/semaphore.h", "src/base/platform/time.cc", "src/base/platform/time.h", + "src/base/region-allocator.cc", + "src/base/region-allocator.h", "src/base/ring-buffer.h", "src/base/safe_conversions.h", "src/base/safe_conversions_impl.h", @@ -3235,6 +3295,29 @@ if (v8_monolithic) { # Executables # +if (current_toolchain == v8_generator_toolchain) { + v8_executable("bytecode_builtins_list_generator") { + visibility = [ ":*" ] # Only targets in this file can depend on this. + + include_dirs = [ "." ] + + sources = [ + "src/builtins/generate-bytecodes-builtins-list.cc", + "src/interpreter/bytecode-operands.cc", + "src/interpreter/bytecode-operands.h", + "src/interpreter/bytecodes.cc", + "src/interpreter/bytecodes.h", + ] + + configs = [ ":internal_config" ] + + deps = [ + ":v8_libbase", + "//build/win:default_exe_manifest", + ] + } +} + if (v8_use_snapshot && current_toolchain == v8_snapshot_toolchain) { v8_executable("mksnapshot") { visibility = [ ":*" ] # Only targets in this file can depend on this. @@ -3634,6 +3717,7 @@ v8_source_set("wasm_module_runner") { ] deps = [ + ":generate_bytecode_builtins_list", ":torque_generated_core", ] @@ -3717,6 +3801,7 @@ v8_source_set("lib_wasm_fuzzer_common") { ] deps = [ + ":generate_bytecode_builtins_list", ":torque_generated_core", ] diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 428325ad58c974..5efaf190556ec3 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,948 @@ +2018-09-21: Version 7.1.163 + + Performance and stability improvements on all platforms. + + +2018-09-21: Version 7.1.162 + + Performance and stability improvements on all platforms. + + +2018-09-21: Version 7.1.161 + + Performance and stability improvements on all platforms. + + +2018-09-21: Version 7.1.160 + + Performance and stability improvements on all platforms. + + +2018-09-21: Version 7.1.159 + + Performance and stability improvements on all platforms. + + +2018-09-21: Version 7.1.158 + + Performance and stability improvements on all platforms. + + +2018-09-21: Version 7.1.157 + + Performance and stability improvements on all platforms. + + +2018-09-21: Version 7.1.156 + + Performance and stability improvements on all platforms. + + +2018-09-21: Version 7.1.155 + + Performance and stability improvements on all platforms. + + +2018-09-21: Version 7.1.154 + + Performance and stability improvements on all platforms. + + +2018-09-21: Version 7.1.153 + + Performance and stability improvements on all platforms. + + +2018-09-21: Version 7.1.152 + + Performance and stability improvements on all platforms. + + +2018-09-21: Version 7.1.151 + + Performance and stability improvements on all platforms. + + +2018-09-21: Version 7.1.150 + + Performance and stability improvements on all platforms. + + +2018-09-21: Version 7.1.149 + + Performance and stability improvements on all platforms. + + +2018-09-20: Version 7.1.148 + + Performance and stability improvements on all platforms. + + +2018-09-19: Version 7.1.147 + + Performance and stability improvements on all platforms. + + +2018-09-19: Version 7.1.146 + + Performance and stability improvements on all platforms. + + +2018-09-19: Version 7.1.145 + + Performance and stability improvements on all platforms. + + +2018-09-19: Version 7.1.144 + + Performance and stability improvements on all platforms. + + +2018-09-19: Version 7.1.143 + + Performance and stability improvements on all platforms. + + +2018-09-19: Version 7.1.142 + + Performance and stability improvements on all platforms. + + +2018-09-19: Version 7.1.141 + + Performance and stability improvements on all platforms. + + +2018-09-19: Version 7.1.140 + + Performance and stability improvements on all platforms. + + +2018-09-19: Version 7.1.139 + + Performance and stability improvements on all platforms. + + +2018-09-19: Version 7.1.138 + + Performance and stability improvements on all platforms. + + +2018-09-19: Version 7.1.137 + + Performance and stability improvements on all platforms. + + +2018-09-19: Version 7.1.136 + + Performance and stability improvements on all platforms. + + +2018-09-19: Version 7.1.135 + + Performance and stability improvements on all platforms. + + +2018-09-19: Version 7.1.134 + + Performance and stability improvements on all platforms. + + +2018-09-19: Version 7.1.133 + + Performance and stability improvements on all platforms. + + +2018-09-18: Version 7.1.132 + + Performance and stability improvements on all platforms. + + +2018-09-18: Version 7.1.131 + + Performance and stability improvements on all platforms. + + +2018-09-18: Version 7.1.130 + + Performance and stability improvements on all platforms. + + +2018-09-18: Version 7.1.129 + + Performance and stability improvements on all platforms. + + +2018-09-18: Version 7.1.128 + + Performance and stability improvements on all platforms. + + +2018-09-18: Version 7.1.127 + + Performance and stability improvements on all platforms. + + +2018-09-18: Version 7.1.126 + + Performance and stability improvements on all platforms. + + +2018-09-18: Version 7.1.125 + + Performance and stability improvements on all platforms. + + +2018-09-18: Version 7.1.124 + + Performance and stability improvements on all platforms. + + +2018-09-18: Version 7.1.123 + + Performance and stability improvements on all platforms. + + +2018-09-18: Version 7.1.122 + + Performance and stability improvements on all platforms. + + +2018-09-18: Version 7.1.121 + + Performance and stability improvements on all platforms. + + +2018-09-17: Version 7.1.120 + + Performance and stability improvements on all platforms. + + +2018-09-17: Version 7.1.119 + + Performance and stability improvements on all platforms. + + +2018-09-17: Version 7.1.118 + + Performance and stability improvements on all platforms. + + +2018-09-17: Version 7.1.117 + + Performance and stability improvements on all platforms. + + +2018-09-17: Version 7.1.116 + + Performance and stability improvements on all platforms. + + +2018-09-17: Version 7.1.115 + + Performance and stability improvements on all platforms. + + +2018-09-17: Version 7.1.114 + + Performance and stability improvements on all platforms. + + +2018-09-14: Version 7.1.113 + + Performance and stability improvements on all platforms. + + +2018-09-14: Version 7.1.112 + + Performance and stability improvements on all platforms. + + +2018-09-14: Version 7.1.111 + + Performance and stability improvements on all platforms. + + +2018-09-14: Version 7.1.110 + + Performance and stability improvements on all platforms. + + +2018-09-14: Version 7.1.109 + + Performance and stability improvements on all platforms. + + +2018-09-14: Version 7.1.108 + + Performance and stability improvements on all platforms. + + +2018-09-14: Version 7.1.107 + + Performance and stability improvements on all platforms. + + +2018-09-14: Version 7.1.106 + + Performance and stability improvements on all platforms. + + +2018-09-14: Version 7.1.105 + + Performance and stability improvements on all platforms. + + +2018-09-14: Version 7.1.104 + + Performance and stability improvements on all platforms. + + +2018-09-14: Version 7.1.103 + + Performance and stability improvements on all platforms. + + +2018-09-14: Version 7.1.102 + + Performance and stability improvements on all platforms. + + +2018-09-14: Version 7.1.101 + + Performance and stability improvements on all platforms. + + +2018-09-14: Version 7.1.100 + + Performance and stability improvements on all platforms. + + +2018-09-14: Version 7.1.99 + + Performance and stability improvements on all platforms. + + +2018-09-13: Version 7.1.98 + + Performance and stability improvements on all platforms. + + +2018-09-13: Version 7.1.97 + + Performance and stability improvements on all platforms. + + +2018-09-13: Version 7.1.96 + + Performance and stability improvements on all platforms. + + +2018-09-13: Version 7.1.95 + + Performance and stability improvements on all platforms. + + +2018-09-13: Version 7.1.94 + + Performance and stability improvements on all platforms. + + +2018-09-13: Version 7.1.93 + + Performance and stability improvements on all platforms. + + +2018-09-13: Version 7.1.92 + + Performance and stability improvements on all platforms. + + +2018-09-12: Version 7.1.91 + + Performance and stability improvements on all platforms. + + +2018-09-12: Version 7.1.90 + + Performance and stability improvements on all platforms. + + +2018-09-12: Version 7.1.89 + + Performance and stability improvements on all platforms. + + +2018-09-12: Version 7.1.88 + + Performance and stability improvements on all platforms. + + +2018-09-12: Version 7.1.87 + + Performance and stability improvements on all platforms. + + +2018-09-12: Version 7.1.86 + + Performance and stability improvements on all platforms. + + +2018-09-11: Version 7.1.85 + + Performance and stability improvements on all platforms. + + +2018-09-11: Version 7.1.84 + + Performance and stability improvements on all platforms. + + +2018-09-11: Version 7.1.83 + + Performance and stability improvements on all platforms. + + +2018-09-11: Version 7.1.82 + + Performance and stability improvements on all platforms. + + +2018-09-11: Version 7.1.81 + + Performance and stability improvements on all platforms. + + +2018-09-11: Version 7.1.80 + + Performance and stability improvements on all platforms. + + +2018-09-10: Version 7.1.79 + + Performance and stability improvements on all platforms. + + +2018-09-10: Version 7.1.78 + + Performance and stability improvements on all platforms. + + +2018-09-10: Version 7.1.77 + + Performance and stability improvements on all platforms. + + +2018-09-10: Version 7.1.76 + + Performance and stability improvements on all platforms. + + +2018-09-10: Version 7.1.75 + + Performance and stability improvements on all platforms. + + +2018-09-10: Version 7.1.74 + + Performance and stability improvements on all platforms. + + +2018-09-10: Version 7.1.73 + + Performance and stability improvements on all platforms. + + +2018-09-10: Version 7.1.72 + + Performance and stability improvements on all platforms. + + +2018-09-10: Version 7.1.71 + + Performance and stability improvements on all platforms. + + +2018-09-09: Version 7.1.70 + + Performance and stability improvements on all platforms. + + +2018-09-08: Version 7.1.69 + + Performance and stability improvements on all platforms. + + +2018-09-07: Version 7.1.68 + + Performance and stability improvements on all platforms. + + +2018-09-07: Version 7.1.67 + + Performance and stability improvements on all platforms. + + +2018-09-07: Version 7.1.66 + + Performance and stability improvements on all platforms. + + +2018-09-07: Version 7.1.65 + + Performance and stability improvements on all platforms. + + +2018-09-07: Version 7.1.64 + + Performance and stability improvements on all platforms. + + +2018-09-07: Version 7.1.63 + + Performance and stability improvements on all platforms. + + +2018-09-07: Version 7.1.62 + + Performance and stability improvements on all platforms. + + +2018-09-07: Version 7.1.61 + + Performance and stability improvements on all platforms. + + +2018-09-07: Version 7.1.60 + + Performance and stability improvements on all platforms. + + +2018-09-07: Version 7.1.59 + + Performance and stability improvements on all platforms. + + +2018-09-07: Version 7.1.58 + + Performance and stability improvements on all platforms. + + +2018-09-07: Version 7.1.57 + + Performance and stability improvements on all platforms. + + +2018-09-07: Version 7.1.56 + + Performance and stability improvements on all platforms. + + +2018-09-06: Version 7.1.55 + + Performance and stability improvements on all platforms. + + +2018-09-06: Version 7.1.54 + + Performance and stability improvements on all platforms. + + +2018-09-06: Version 7.1.53 + + Performance and stability improvements on all platforms. + + +2018-09-06: Version 7.1.52 + + Performance and stability improvements on all platforms. + + +2018-09-06: Version 7.1.51 + + Performance and stability improvements on all platforms. + + +2018-09-06: Version 7.1.50 + + Performance and stability improvements on all platforms. + + +2018-09-06: Version 7.1.49 + + Performance and stability improvements on all platforms. + + +2018-09-06: Version 7.1.48 + + Performance and stability improvements on all platforms. + + +2018-09-06: Version 7.1.47 + + Performance and stability improvements on all platforms. + + +2018-09-06: Version 7.1.46 + + Performance and stability improvements on all platforms. + + +2018-09-06: Version 7.1.45 + + Performance and stability improvements on all platforms. + + +2018-09-06: Version 7.1.44 + + Performance and stability improvements on all platforms. + + +2018-09-06: Version 7.1.43 + + Performance and stability improvements on all platforms. + + +2018-09-06: Version 7.1.42 + + Performance and stability improvements on all platforms. + + +2018-09-06: Version 7.1.41 + + Performance and stability improvements on all platforms. + + +2018-09-05: Version 7.1.40 + + Performance and stability improvements on all platforms. + + +2018-09-05: Version 7.1.39 + + Performance and stability improvements on all platforms. + + +2018-09-05: Version 7.1.38 + + Performance and stability improvements on all platforms. + + +2018-09-05: Version 7.1.37 + + Performance and stability improvements on all platforms. + + +2018-09-05: Version 7.1.36 + + Performance and stability improvements on all platforms. + + +2018-09-05: Version 7.1.35 + + Performance and stability improvements on all platforms. + + +2018-09-05: Version 7.1.34 + + Performance and stability improvements on all platforms. + + +2018-09-05: Version 7.1.33 + + Performance and stability improvements on all platforms. + + +2018-09-05: Version 7.1.32 + + Performance and stability improvements on all platforms. + + +2018-09-05: Version 7.1.31 + + Performance and stability improvements on all platforms. + + +2018-09-05: Version 7.1.30 + + Performance and stability improvements on all platforms. + + +2018-09-05: Version 7.1.29 + + Performance and stability improvements on all platforms. + + +2018-09-05: Version 7.1.28 + + Performance and stability improvements on all platforms. + + +2018-09-05: Version 7.1.27 + + Performance and stability improvements on all platforms. + + +2018-09-04: Version 7.1.26 + + Performance and stability improvements on all platforms. + + +2018-09-04: Version 7.1.25 + + Performance and stability improvements on all platforms. + + +2018-09-04: Version 7.1.24 + + Performance and stability improvements on all platforms. + + +2018-09-04: Version 7.1.23 + + Performance and stability improvements on all platforms. + + +2018-09-04: Version 7.1.22 + + Performance and stability improvements on all platforms. + + +2018-09-04: Version 7.1.21 + + Performance and stability improvements on all platforms. + + +2018-09-04: Version 7.1.20 + + Performance and stability improvements on all platforms. + + +2018-09-04: Version 7.1.19 + + Performance and stability improvements on all platforms. + + +2018-09-04: Version 7.1.18 + + Performance and stability improvements on all platforms. + + +2018-09-04: Version 7.1.17 + + Performance and stability improvements on all platforms. + + +2018-09-04: Version 7.1.16 + + Performance and stability improvements on all platforms. + + +2018-09-03: Version 7.1.15 + + Performance and stability improvements on all platforms. + + +2018-09-03: Version 7.1.14 + + Performance and stability improvements on all platforms. + + +2018-09-03: Version 7.1.13 + + Performance and stability improvements on all platforms. + + +2018-09-03: Version 7.1.12 + + Performance and stability improvements on all platforms. + + +2018-09-03: Version 7.1.11 + + Performance and stability improvements on all platforms. + + +2018-09-03: Version 7.1.10 + + Performance and stability improvements on all platforms. + + +2018-09-02: Version 7.1.9 + + Performance and stability improvements on all platforms. + + +2018-09-02: Version 7.1.8 + + Performance and stability improvements on all platforms. + + +2018-09-02: Version 7.1.7 + + Performance and stability improvements on all platforms. + + +2018-09-01: Version 7.1.6 + + Performance and stability improvements on all platforms. + + +2018-08-31: Version 7.1.5 + + Performance and stability improvements on all platforms. + + +2018-08-31: Version 7.1.4 + + Performance and stability improvements on all platforms. + + +2018-08-31: Version 7.1.3 + + Performance and stability improvements on all platforms. + + +2018-08-31: Version 7.1.2 + + Performance and stability improvements on all platforms. + + +2018-08-31: Version 7.1.1 + + Performance and stability improvements on all platforms. + + +2018-08-29: Version 7.0.302 + + Performance and stability improvements on all platforms. + + +2018-08-29: Version 7.0.301 + + Performance and stability improvements on all platforms. + + +2018-08-29: Version 7.0.300 + + Performance and stability improvements on all platforms. + + +2018-08-29: Version 7.0.299 + + Performance and stability improvements on all platforms. + + +2018-08-29: Version 7.0.298 + + Performance and stability improvements on all platforms. + + +2018-08-29: Version 7.0.297 + + Performance and stability improvements on all platforms. + + +2018-08-29: Version 7.0.296 + + Performance and stability improvements on all platforms. + + +2018-08-29: Version 7.0.295 + + Performance and stability improvements on all platforms. + + +2018-08-29: Version 7.0.294 + + Performance and stability improvements on all platforms. + + +2018-08-29: Version 7.0.293 + + Performance and stability improvements on all platforms. + + +2018-08-29: Version 7.0.292 + + Performance and stability improvements on all platforms. + + +2018-08-29: Version 7.0.291 + + Performance and stability improvements on all platforms. + + +2018-08-29: Version 7.0.290 + + Performance and stability improvements on all platforms. + + +2018-08-29: Version 7.0.289 + + Performance and stability improvements on all platforms. + + +2018-08-29: Version 7.0.288 + + Performance and stability improvements on all platforms. + + +2018-08-28: Version 7.0.287 + + Performance and stability improvements on all platforms. + + +2018-08-28: Version 7.0.286 + + Performance and stability improvements on all platforms. + + +2018-08-28: Version 7.0.285 + + Performance and stability improvements on all platforms. + + +2018-08-28: Version 7.0.284 + + Performance and stability improvements on all platforms. + + +2018-08-28: Version 7.0.283 + + Performance and stability improvements on all platforms. + + +2018-08-28: Version 7.0.282 + + Performance and stability improvements on all platforms. + + +2018-08-28: Version 7.0.281 + + Performance and stability improvements on all platforms. + + +2018-08-28: Version 7.0.280 + + Performance and stability improvements on all platforms. + + +2018-08-28: Version 7.0.279 + + Performance and stability improvements on all platforms. + + +2018-08-28: Version 7.0.278 + + Performance and stability improvements on all platforms. + + +2018-08-28: Version 7.0.277 + + Performance and stability improvements on all platforms. + + 2018-08-27: Version 7.0.276 Performance and stability improvements on all platforms. diff --git a/deps/v8/DEPS b/deps/v8/DEPS index 42e7a40baa5980..217ff95019f552 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -13,15 +13,13 @@ vars = { deps = { 'v8/build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + 'dd6b994b32b498e9e766ce60c44da0aec3a2a188', - 'v8/tools/gyp': - Var('chromium_url') + '/external/gyp.git' + '@' + 'd61a9397e668fa9843c4aa7da9e79460fe590bfb', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '641370b8b44f9bf4714782331cfba1994ccd41a5', 'v8/third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'aaf2cc09c6874e394c6c1e4692360cc400d6b388', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '92df4e63abbe9ffe24ff632927acea5523112aeb', 'v8/third_party/icu': - Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'a191af9d025859e8368b8b469120d78006e9f5f6', + Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '7ca3ffa77d635e44b9735e1b54fb9c4da3b6c821', 'v8/third_party/instrumented_libraries': - Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'd8cf40c4592dcec7fb01fcbdf1f6d4958b3fbf11', + Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'a90cbf3b4216430a437991fb53ede8e048dea454', 'v8/buildtools': Var('chromium_url') + '/chromium/buildtools.git' + '@' + '2dff9c9c74e9d732e6fe57c84ef7fd044cc45d96', 'v8/base/trace_event/common': @@ -35,25 +33,22 @@ deps = { 'condition': 'checkout_android', }, 'v8/third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + 'bc2c0a9307285fa36e03e7cdb6bf8623390ff855', - 'condition': 'checkout_android', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '6f7c60dde99fb258809c0a2b3253fa1d978fdde5', }, 'v8/third_party/colorama/src': { 'url': Var('chromium_url') + '/external/colorama.git' + '@' + '799604a1041e9b3bc5d2789ecbd7e8db2e18e6b8', 'condition': 'checkout_android', }, 'v8/third_party/fuchsia-sdk': { - 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '3ec92c896bcbddc46e2a073ebfdd25aa1194656e', + 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + 'dc90d9916a122daa525932acadab5ed578ec30cb', 'condition': 'checkout_fuchsia', }, 'v8/third_party/googletest/src': - Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'd5266326752f0a1dadbd310932d8f4fd8c3c5e7d', + Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '2e68926a9d4929e9289373cd49e40ddcb9a628f7', 'v8/third_party/jinja2': Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'b41863e42637544c2941b574c7877d3e1f663e25', 'v8/third_party/markupsafe': Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783', - 'v8/third_party/proguard': - Var('chromium_url') + '/chromium/src/third_party/proguard.git' + '@' + 'a3729bea473bb5ffc5eaf289f5733bc5e2861c07', 'v8/tools/swarming_client': Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '486c9b53c4d54dd4b95bb6ce0e31160e600dfc11', 'v8/test/benchmarks/data': @@ -61,21 +56,21 @@ deps = { 'v8/test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'v8/test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'a6c1d05ac4fed084fa047e4c52ab2a8c9c2a8aef', + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'f10582ee661c1079f725b27561bdd211cfd953a5', 'v8/test/test262/harness': Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '0f2acdd882c84cff43b9d60df7574a1901e2cdcd', - 'v8/third_party/qemu': { + 'v8/third_party/qemu-linux-x64': { 'packages': [ { 'package': 'fuchsia/qemu/linux-amd64', 'version': '9cc486c5b18a0be515c39a280ca9a309c54cf994' }, ], - 'condition': 'checkout_fuchsia', + 'condition': 'host_os == "linux" and checkout_fuchsia', 'dep_type': 'cipd', }, 'v8/tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'bb4146fb8a9dde405b71914657bb461dc93912ab', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '9ad74fabeb3a6635a7205ccb9abfa1b361fb324e', 'v8/tools/luci-go': Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + '445d7c4b6a4f10e188edb395b132e3996b127691', 'v8/test/wasm-js': diff --git a/deps/v8/include/libplatform/v8-tracing.h b/deps/v8/include/libplatform/v8-tracing.h index e430e7c3502741..250d5fbdb9f07f 100644 --- a/deps/v8/include/libplatform/v8-tracing.h +++ b/deps/v8/include/libplatform/v8-tracing.h @@ -35,7 +35,7 @@ class V8_PLATFORM_EXPORT TraceObject { const char* as_string; }; - TraceObject() {} + TraceObject() = default; ~TraceObject(); void Initialize( char phase, const uint8_t* category_enabled_flag, const char* name, @@ -106,8 +106,8 @@ class V8_PLATFORM_EXPORT TraceObject { class V8_PLATFORM_EXPORT TraceWriter { public: - TraceWriter() {} - virtual ~TraceWriter() {} + TraceWriter() = default; + virtual ~TraceWriter() = default; virtual void AppendTraceEvent(TraceObject* trace_event) = 0; virtual void Flush() = 0; @@ -147,8 +147,8 @@ class V8_PLATFORM_EXPORT TraceBufferChunk { class V8_PLATFORM_EXPORT TraceBuffer { public: - TraceBuffer() {} - virtual ~TraceBuffer() {} + TraceBuffer() = default; + virtual ~TraceBuffer() = default; virtual TraceObject* AddTraceEvent(uint64_t* handle) = 0; virtual TraceObject* GetEventByHandle(uint64_t handle) = 0; diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h index e06963949a5620..f0a8b5f1635adf 100644 --- a/deps/v8/include/v8-inspector.h +++ b/deps/v8/include/v8-inspector.h @@ -62,7 +62,7 @@ class V8_EXPORT StringView { class V8_EXPORT StringBuffer { public: - virtual ~StringBuffer() {} + virtual ~StringBuffer() = default; virtual const StringView& string() = 0; // This method copies contents. static std::unique_ptr create(const StringView&); @@ -107,7 +107,7 @@ class V8_EXPORT V8StackTrace { virtual StringView topScriptId() const = 0; virtual StringView topFunctionName() const = 0; - virtual ~V8StackTrace() {} + virtual ~V8StackTrace() = default; virtual std::unique_ptr buildInspectorObject() const = 0; virtual std::unique_ptr toString() const = 0; @@ -118,13 +118,13 @@ class V8_EXPORT V8StackTrace { class V8_EXPORT V8InspectorSession { public: - virtual ~V8InspectorSession() {} + virtual ~V8InspectorSession() = default; // Cross-context inspectable values (DOM nodes in different worlds, etc.). class V8_EXPORT Inspectable { public: virtual v8::Local get(v8::Local) = 0; - virtual ~Inspectable() {} + virtual ~Inspectable() = default; }; virtual void addInspectedObject(std::unique_ptr) = 0; @@ -162,7 +162,7 @@ class V8_EXPORT V8InspectorSession { class V8_EXPORT V8InspectorClient { public: - virtual ~V8InspectorClient() {} + virtual ~V8InspectorClient() = default; virtual void runMessageLoopOnPause(int contextGroupId) {} virtual void quitMessageLoopOnPause() {} @@ -239,7 +239,7 @@ struct V8_EXPORT V8StackTraceId { class V8_EXPORT V8Inspector { public: static std::unique_ptr create(v8::Isolate*, V8InspectorClient*); - virtual ~V8Inspector() {} + virtual ~V8Inspector() = default; // Contexts instrumentation. virtual void contextCreated(const V8ContextInfo&) = 0; @@ -277,7 +277,7 @@ class V8_EXPORT V8Inspector { // Connection. class V8_EXPORT Channel { public: - virtual ~Channel() {} + virtual ~Channel() = default; virtual void sendResponse(int callId, std::unique_ptr message) = 0; virtual void sendNotification(std::unique_ptr message) = 0; diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h new file mode 100644 index 00000000000000..80f7367bfee3e6 --- /dev/null +++ b/deps/v8/include/v8-internal.h @@ -0,0 +1,316 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef INCLUDE_V8_INTERNAL_H_ +#define INCLUDE_V8_INTERNAL_H_ + +#include +#include +#include + +#include "v8-version.h" // NOLINT(build/include) +#include "v8config.h" // NOLINT(build/include) + +namespace v8 { + +class Context; +class Data; +class Isolate; + +namespace internal { + +class Object; + +/** + * Configuration of tagging scheme. + */ +const int kApiPointerSize = sizeof(void*); // NOLINT +const int kApiDoubleSize = sizeof(double); // NOLINT +const int kApiIntSize = sizeof(int); // NOLINT +const int kApiInt64Size = sizeof(int64_t); // NOLINT + +// Tag information for HeapObject. +const int kHeapObjectTag = 1; +const int kWeakHeapObjectTag = 3; +const int kHeapObjectTagSize = 2; +const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1; + +// Tag information for Smi. +const int kSmiTag = 0; +const int kSmiTagSize = 1; +const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1; + +template +struct SmiTagging; + +template +V8_INLINE internal::Object* IntToSmi(int value) { + int smi_shift_bits = kSmiTagSize + kSmiShiftSize; + intptr_t tagged_value = + (static_cast(value) << smi_shift_bits) | kSmiTag; + return reinterpret_cast(tagged_value); +} + +// Smi constants for systems where tagged pointer is a 32-bit value. +template <> +struct SmiTagging<4> { + enum { kSmiShiftSize = 0, kSmiValueSize = 31 }; + static int SmiShiftSize() { return kSmiShiftSize; } + static int SmiValueSize() { return kSmiValueSize; } + V8_INLINE static int SmiToInt(const internal::Object* value) { + int shift_bits = kSmiTagSize + kSmiShiftSize; + // Throw away top 32 bits and shift down (requires >> to be sign extending). + return static_cast(reinterpret_cast(value)) >> shift_bits; + } + V8_INLINE static internal::Object* IntToSmi(int value) { + return internal::IntToSmi(value); + } + V8_INLINE static constexpr bool IsValidSmi(intptr_t value) { + // To be representable as an tagged small integer, the two + // most-significant bits of 'value' must be either 00 or 11 due to + // sign-extension. To check this we add 01 to the two + // most-significant bits, and check if the most-significant bit is 0 + // + // CAUTION: The original code below: + // bool result = ((value + 0x40000000) & 0x80000000) == 0; + // may lead to incorrect results according to the C language spec, and + // in fact doesn't work correctly with gcc4.1.1 in some cases: The + // compiler may produce undefined results in case of signed integer + // overflow. The computation must be done w/ unsigned ints. + return static_cast(value) + 0x40000000U < 0x80000000U; + } +}; + +// Smi constants for systems where tagged pointer is a 64-bit value. +template <> +struct SmiTagging<8> { + enum { kSmiShiftSize = 31, kSmiValueSize = 32 }; + static int SmiShiftSize() { return kSmiShiftSize; } + static int SmiValueSize() { return kSmiValueSize; } + V8_INLINE static int SmiToInt(const internal::Object* value) { + int shift_bits = kSmiTagSize + kSmiShiftSize; + // Shift down and throw away top 32 bits. + return static_cast(reinterpret_cast(value) >> shift_bits); + } + V8_INLINE static internal::Object* IntToSmi(int value) { + return internal::IntToSmi(value); + } + V8_INLINE static constexpr bool IsValidSmi(intptr_t value) { + // To be representable as a long smi, the value must be a 32-bit integer. + return (value == static_cast(value)); + } +}; + +#if V8_COMPRESS_POINTERS +static_assert( + kApiPointerSize == kApiInt64Size, + "Pointer compression can be enabled only for 64-bit architectures"); +typedef SmiTagging<4> PlatformSmiTagging; +#else +typedef SmiTagging PlatformSmiTagging; +#endif + +const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize; +const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize; +const int kSmiMinValue = (static_cast(-1)) << (kSmiValueSize - 1); +const int kSmiMaxValue = -(kSmiMinValue + 1); +constexpr bool SmiValuesAre31Bits() { return kSmiValueSize == 31; } +constexpr bool SmiValuesAre32Bits() { return kSmiValueSize == 32; } + +/** + * This class exports constants and functionality from within v8 that + * is necessary to implement inline functions in the v8 api. Don't + * depend on functions and constants defined here. + */ +class Internals { + public: + // These values match non-compiler-dependent values defined within + // the implementation of v8. + static const int kHeapObjectMapOffset = 0; + static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize; + static const int kStringResourceOffset = + 1 * kApiPointerSize + 2 * kApiIntSize; + + static const int kOddballKindOffset = 4 * kApiPointerSize + kApiDoubleSize; + static const int kForeignAddressOffset = kApiPointerSize; + static const int kJSObjectHeaderSize = 3 * kApiPointerSize; + static const int kFixedArrayHeaderSize = 2 * kApiPointerSize; + static const int kContextHeaderSize = 2 * kApiPointerSize; + static const int kContextEmbedderDataIndex = 5; + static const int kFullStringRepresentationMask = 0x0f; + static const int kStringEncodingMask = 0x8; + static const int kExternalTwoByteRepresentationTag = 0x02; + static const int kExternalOneByteRepresentationTag = 0x0a; + + static const int kIsolateEmbedderDataOffset = 0 * kApiPointerSize; + static const int kExternalMemoryOffset = 4 * kApiPointerSize; + static const int kExternalMemoryLimitOffset = + kExternalMemoryOffset + kApiInt64Size; + static const int kExternalMemoryAtLastMarkCompactOffset = + kExternalMemoryLimitOffset + kApiInt64Size; + static const int kIsolateRootsOffset = kExternalMemoryLimitOffset + + kApiInt64Size + kApiInt64Size + + kApiPointerSize + kApiPointerSize; + static const int kUndefinedValueRootIndex = 4; + static const int kTheHoleValueRootIndex = 5; + static const int kNullValueRootIndex = 6; + static const int kTrueValueRootIndex = 7; + static const int kFalseValueRootIndex = 8; + static const int kEmptyStringRootIndex = 9; + + static const int kNodeClassIdOffset = 1 * kApiPointerSize; + static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3; + static const int kNodeStateMask = 0x7; + static const int kNodeStateIsWeakValue = 2; + static const int kNodeStateIsPendingValue = 3; + static const int kNodeStateIsNearDeathValue = 4; + static const int kNodeIsIndependentShift = 3; + static const int kNodeIsActiveShift = 4; + + static const int kFirstNonstringType = 0x80; + static const int kOddballType = 0x83; + static const int kForeignType = 0x87; + static const int kJSSpecialApiObjectType = 0x410; + static const int kJSApiObjectType = 0x420; + static const int kJSObjectType = 0x421; + + static const int kUndefinedOddballKind = 5; + static const int kNullOddballKind = 3; + + static const uint32_t kNumIsolateDataSlots = 4; + + // Soft limit for AdjustAmountofExternalAllocatedMemory. Trigger an + // incremental GC once the external memory reaches this limit. + static constexpr int kExternalAllocationSoftLimit = 64 * 1024 * 1024; + + V8_EXPORT static void CheckInitializedImpl(v8::Isolate* isolate); + V8_INLINE static void CheckInitialized(v8::Isolate* isolate) { +#ifdef V8_ENABLE_CHECKS + CheckInitializedImpl(isolate); +#endif + } + + V8_INLINE static bool HasHeapObjectTag(const internal::Object* value) { + return ((reinterpret_cast(value) & kHeapObjectTagMask) == + kHeapObjectTag); + } + + V8_INLINE static int SmiValue(const internal::Object* value) { + return PlatformSmiTagging::SmiToInt(value); + } + + V8_INLINE static internal::Object* IntToSmi(int value) { + return PlatformSmiTagging::IntToSmi(value); + } + + V8_INLINE static constexpr bool IsValidSmi(intptr_t value) { + return PlatformSmiTagging::IsValidSmi(value); + } + + V8_INLINE static int GetInstanceType(const internal::Object* obj) { + typedef internal::Object O; + O* map = ReadField(obj, kHeapObjectMapOffset); + return ReadField(map, kMapInstanceTypeOffset); + } + + V8_INLINE static int GetOddballKind(const internal::Object* obj) { + typedef internal::Object O; + return SmiValue(ReadField(obj, kOddballKindOffset)); + } + + V8_INLINE static bool IsExternalTwoByteString(int instance_type) { + int representation = (instance_type & kFullStringRepresentationMask); + return representation == kExternalTwoByteRepresentationTag; + } + + V8_INLINE static uint8_t GetNodeFlag(internal::Object** obj, int shift) { + uint8_t* addr = reinterpret_cast(obj) + kNodeFlagsOffset; + return *addr & static_cast(1U << shift); + } + + V8_INLINE static void UpdateNodeFlag(internal::Object** obj, bool value, + int shift) { + uint8_t* addr = reinterpret_cast(obj) + kNodeFlagsOffset; + uint8_t mask = static_cast(1U << shift); + *addr = static_cast((*addr & ~mask) | (value << shift)); + } + + V8_INLINE static uint8_t GetNodeState(internal::Object** obj) { + uint8_t* addr = reinterpret_cast(obj) + kNodeFlagsOffset; + return *addr & kNodeStateMask; + } + + V8_INLINE static void UpdateNodeState(internal::Object** obj, uint8_t value) { + uint8_t* addr = reinterpret_cast(obj) + kNodeFlagsOffset; + *addr = static_cast((*addr & ~kNodeStateMask) | value); + } + + V8_INLINE static void SetEmbedderData(v8::Isolate* isolate, uint32_t slot, + void* data) { + uint8_t* addr = reinterpret_cast(isolate) + + kIsolateEmbedderDataOffset + slot * kApiPointerSize; + *reinterpret_cast(addr) = data; + } + + V8_INLINE static void* GetEmbedderData(const v8::Isolate* isolate, + uint32_t slot) { + const uint8_t* addr = reinterpret_cast(isolate) + + kIsolateEmbedderDataOffset + slot * kApiPointerSize; + return *reinterpret_cast(addr); + } + + V8_INLINE static internal::Object** GetRoot(v8::Isolate* isolate, int index) { + uint8_t* addr = reinterpret_cast(isolate) + kIsolateRootsOffset; + return reinterpret_cast(addr + index * kApiPointerSize); + } + + template + V8_INLINE static T ReadField(const internal::Object* ptr, int offset) { + const uint8_t* addr = + reinterpret_cast(ptr) + offset - kHeapObjectTag; + return *reinterpret_cast(addr); + } + + template + V8_INLINE static T ReadEmbedderData(const v8::Context* context, int index) { + typedef internal::Object O; + typedef internal::Internals I; + O* ctx = *reinterpret_cast(context); + int embedder_data_offset = + I::kContextHeaderSize + + (internal::kApiPointerSize * I::kContextEmbedderDataIndex); + O* embedder_data = I::ReadField(ctx, embedder_data_offset); + int value_offset = + I::kFixedArrayHeaderSize + (internal::kApiPointerSize * index); + return I::ReadField(embedder_data, value_offset); + } +}; + +// Only perform cast check for types derived from v8::Data since +// other types do not implement the Cast method. +template +struct CastCheck { + template + static void Perform(T* data); +}; + +template <> +template +void CastCheck::Perform(T* data) { + T::Cast(data); +} + +template <> +template +void CastCheck::Perform(T* data) {} + +template +V8_INLINE void PerformCastCheck(T* data) { + CastCheck::value>::Perform(data); +} + +} // namespace internal +} // namespace v8 + +#endif // INCLUDE_V8_INTERNAL_H_ diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h index 9981061a44bf06..c034518defdd96 100644 --- a/deps/v8/include/v8-profiler.h +++ b/deps/v8/include/v8-profiler.h @@ -445,7 +445,7 @@ class V8_EXPORT OutputStream { // NOLINT kContinue = 0, kAbort = 1 }; - virtual ~OutputStream() {} + virtual ~OutputStream() = default; /** Notify about the end of stream. */ virtual void EndOfStream() = 0; /** Get preferred output chunk size. Called only once. */ @@ -539,7 +539,7 @@ class V8_EXPORT ActivityControl { // NOLINT kContinue = 0, kAbort = 1 }; - virtual ~ActivityControl() {} + virtual ~ActivityControl() = default; /** * Notify about current progress. The activity can be stopped by * returning kAbort as the callback result. @@ -625,7 +625,7 @@ class V8_EXPORT AllocationProfile { */ virtual Node* GetRootNode() = 0; - virtual ~AllocationProfile() {} + virtual ~AllocationProfile() = default; static const int kNoLineNumberInfo = Message::kNoLineNumberInfo; static const int kNoColumnNumberInfo = Message::kNoColumnInfo; @@ -793,15 +793,15 @@ class V8_EXPORT HeapProfiler { virtual const char* GetName(Local object) = 0; protected: - virtual ~ObjectNameResolver() {} + virtual ~ObjectNameResolver() = default; }; /** * Takes a heap snapshot and returns it. */ const HeapSnapshot* TakeHeapSnapshot( - ActivityControl* control = NULL, - ObjectNameResolver* global_object_name_resolver = NULL); + ActivityControl* control = nullptr, + ObjectNameResolver* global_object_name_resolver = nullptr); /** * Starts tracking of heap objects population statistics. After calling @@ -828,7 +828,7 @@ class V8_EXPORT HeapProfiler { * method. */ SnapshotObjectId GetHeapStats(OutputStream* stream, - int64_t* timestamp_us = NULL); + int64_t* timestamp_us = nullptr); /** * Stops tracking of heap objects population statistics, cleans up all @@ -985,8 +985,8 @@ class V8_EXPORT RetainedObjectInfo { // NOLINT virtual intptr_t GetSizeInBytes() { return -1; } protected: - RetainedObjectInfo() {} - virtual ~RetainedObjectInfo() {} + RetainedObjectInfo() = default; + virtual ~RetainedObjectInfo() = default; private: RetainedObjectInfo(const RetainedObjectInfo&); diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h index a3069651222ee0..96c9acbbdc2b29 100644 --- a/deps/v8/include/v8-util.h +++ b/deps/v8/include/v8-util.h @@ -94,11 +94,11 @@ class DefaultPersistentValueMapTraits : public StdMapTraits { static WeakCallbackDataType* WeakCallbackParameter( MapType* map, const K& key, Local value) { - return NULL; + return nullptr; } static MapType* MapFromWeakCallbackInfo( const WeakCallbackInfo& data) { - return NULL; + return nullptr; } static K KeyFromWeakCallbackInfo( const WeakCallbackInfo& data) { @@ -302,7 +302,7 @@ class PersistentValueMapBase { static PersistentContainerValue ClearAndLeak(Global* persistent) { V* v = persistent->val_; - persistent->val_ = 0; + persistent->val_ = nullptr; return reinterpret_cast(v); } @@ -633,7 +633,7 @@ class PersistentValueVector { private: static PersistentContainerValue ClearAndLeak(Global* persistent) { V* v = persistent->val_; - persistent->val_ = 0; + persistent->val_ = nullptr; return reinterpret_cast(v); } diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index a93cd9be0c6bed..2340292f6b2f6e 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -9,9 +9,9 @@ // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define V8_MAJOR_VERSION 7 -#define V8_MINOR_VERSION 0 -#define V8_BUILD_NUMBER 276 -#define V8_PATCH_LEVEL 22 +#define V8_MINOR_VERSION 1 +#define V8_BUILD_NUMBER 163 +#define V8_PATCH_LEVEL 0 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 63edc67edfd95f..3510d8499b063d 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -22,42 +22,13 @@ #include #include -#include "v8-version.h" // NOLINT(build/include) -#include "v8config.h" // NOLINT(build/include) +#include "v8-internal.h" // NOLINT(build/include) +#include "v8-version.h" // NOLINT(build/include) +#include "v8config.h" // NOLINT(build/include) // We reserve the V8_* prefix for macros defined in V8 public API and // assume there are no name conflicts with the embedder's code. -#ifdef V8_OS_WIN - -// Setup for Windows DLL export/import. When building the V8 DLL the -// BUILDING_V8_SHARED needs to be defined. When building a program which uses -// the V8 DLL USING_V8_SHARED needs to be defined. When either building the V8 -// static library or building a program which uses the V8 static library neither -// BUILDING_V8_SHARED nor USING_V8_SHARED should be defined. -#ifdef BUILDING_V8_SHARED -# define V8_EXPORT __declspec(dllexport) -#elif USING_V8_SHARED -# define V8_EXPORT __declspec(dllimport) -#else -# define V8_EXPORT -#endif // BUILDING_V8_SHARED - -#else // V8_OS_WIN - -// Setup for Linux shared library export. -#if V8_HAS_ATTRIBUTE_VISIBILITY -# ifdef BUILDING_V8_SHARED -# define V8_EXPORT __attribute__ ((visibility("default"))) -# else -# define V8_EXPORT -# endif -#else -# define V8_EXPORT -#endif - -#endif // V8_OS_WIN - /** * The v8 JavaScript engine. */ @@ -153,108 +124,13 @@ template class CustomArguments; class PropertyCallbackArguments; class FunctionCallbackArguments; class GlobalHandles; +class ScopedExternalStringLock; namespace wasm { class NativeModule; class StreamingDecoder; } // namespace wasm -/** - * Configuration of tagging scheme. - */ -const int kApiPointerSize = sizeof(void*); // NOLINT -const int kApiDoubleSize = sizeof(double); // NOLINT -const int kApiIntSize = sizeof(int); // NOLINT -const int kApiInt64Size = sizeof(int64_t); // NOLINT - -// Tag information for HeapObject. -const int kHeapObjectTag = 1; -const int kWeakHeapObjectTag = 3; -const int kHeapObjectTagSize = 2; -const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1; - -// Tag information for Smi. -const int kSmiTag = 0; -const int kSmiTagSize = 1; -const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1; - -template -struct SmiTagging; - -template -V8_INLINE internal::Object* IntToSmi(int value) { - int smi_shift_bits = kSmiTagSize + kSmiShiftSize; - intptr_t tagged_value = - (static_cast(value) << smi_shift_bits) | kSmiTag; - return reinterpret_cast(tagged_value); -} - -// Smi constants for systems where tagged pointer is a 32-bit value. -template <> -struct SmiTagging<4> { - enum { kSmiShiftSize = 0, kSmiValueSize = 31 }; - static int SmiShiftSize() { return kSmiShiftSize; } - static int SmiValueSize() { return kSmiValueSize; } - V8_INLINE static int SmiToInt(const internal::Object* value) { - int shift_bits = kSmiTagSize + kSmiShiftSize; - // Throw away top 32 bits and shift down (requires >> to be sign extending). - return static_cast(reinterpret_cast(value)) >> shift_bits; - } - V8_INLINE static internal::Object* IntToSmi(int value) { - return internal::IntToSmi(value); - } - V8_INLINE static constexpr bool IsValidSmi(intptr_t value) { - // To be representable as an tagged small integer, the two - // most-significant bits of 'value' must be either 00 or 11 due to - // sign-extension. To check this we add 01 to the two - // most-significant bits, and check if the most-significant bit is 0 - // - // CAUTION: The original code below: - // bool result = ((value + 0x40000000) & 0x80000000) == 0; - // may lead to incorrect results according to the C language spec, and - // in fact doesn't work correctly with gcc4.1.1 in some cases: The - // compiler may produce undefined results in case of signed integer - // overflow. The computation must be done w/ unsigned ints. - return static_cast(value) + 0x40000000U < 0x80000000U; - } -}; - -// Smi constants for systems where tagged pointer is a 64-bit value. -template <> -struct SmiTagging<8> { - enum { kSmiShiftSize = 31, kSmiValueSize = 32 }; - static int SmiShiftSize() { return kSmiShiftSize; } - static int SmiValueSize() { return kSmiValueSize; } - V8_INLINE static int SmiToInt(const internal::Object* value) { - int shift_bits = kSmiTagSize + kSmiShiftSize; - // Shift down and throw away top 32 bits. - return static_cast(reinterpret_cast(value) >> shift_bits); - } - V8_INLINE static internal::Object* IntToSmi(int value) { - return internal::IntToSmi(value); - } - V8_INLINE static constexpr bool IsValidSmi(intptr_t value) { - // To be representable as a long smi, the value must be a 32-bit integer. - return (value == static_cast(value)); - } -}; - -#if V8_COMPRESS_POINTERS -static_assert( - kApiPointerSize == kApiInt64Size, - "Pointer compression can be enabled only for 64-bit architectures"); -typedef SmiTagging<4> PlatformSmiTagging; -#else -typedef SmiTagging PlatformSmiTagging; -#endif - -const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize; -const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize; -const int kSmiMinValue = (static_cast(-1)) << (kSmiValueSize - 1); -const int kSmiMaxValue = -(kSmiMinValue + 1); -constexpr bool SmiValuesAre31Bits() { return kSmiValueSize == 31; } -constexpr bool SmiValuesAre32Bits() { return kSmiValueSize == 32; } - } // namespace internal namespace debug { @@ -302,7 +178,7 @@ class ConsoleCallArguments; template class Local { public: - V8_INLINE Local() : val_(0) {} + V8_INLINE Local() : val_(nullptr) {} template V8_INLINE Local(Local that) : val_(reinterpret_cast(*that)) { @@ -317,12 +193,12 @@ class Local { /** * Returns true if the handle is empty. */ - V8_INLINE bool IsEmpty() const { return val_ == 0; } + V8_INLINE bool IsEmpty() const { return val_ == nullptr; } /** * Sets the handle to be empty. IsEmpty() will then return true. */ - V8_INLINE void Clear() { val_ = 0; } + V8_INLINE void Clear() { val_ = nullptr; } V8_INLINE T* operator->() const { return val_; } @@ -338,8 +214,8 @@ class Local { V8_INLINE bool operator==(const Local& that) const { internal::Object** a = reinterpret_cast(this->val_); internal::Object** b = reinterpret_cast(that.val_); - if (a == 0) return b == 0; - if (b == 0) return false; + if (a == nullptr) return b == nullptr; + if (b == nullptr) return false; return *a == *b; } @@ -347,8 +223,8 @@ class Local { const PersistentBase& that) const { internal::Object** a = reinterpret_cast(this->val_); internal::Object** b = reinterpret_cast(that.val_); - if (a == 0) return b == 0; - if (b == 0) return false; + if (a == nullptr) return b == nullptr; + if (b == nullptr) return false; return *a == *b; } @@ -592,7 +468,7 @@ template class PersistentBase { template V8_INLINE void Reset(Isolate* isolate, const PersistentBase& other); - V8_INLINE bool IsEmpty() const { return val_ == NULL; } + V8_INLINE bool IsEmpty() const { return val_ == nullptr; } V8_INLINE void Empty() { val_ = 0; } V8_INLINE Local Get(Isolate* isolate) const { @@ -603,8 +479,8 @@ template class PersistentBase { V8_INLINE bool operator==(const PersistentBase& that) const { internal::Object** a = reinterpret_cast(this->val_); internal::Object** b = reinterpret_cast(that.val_); - if (a == NULL) return b == NULL; - if (b == NULL) return false; + if (a == nullptr) return b == nullptr; + if (b == nullptr) return false; return *a == *b; } @@ -612,8 +488,8 @@ template class PersistentBase { V8_INLINE bool operator==(const Local& that) const { internal::Object** a = reinterpret_cast(this->val_); internal::Object** b = reinterpret_cast(that.val_); - if (a == NULL) return b == NULL; - if (b == NULL) return false; + if (a == nullptr) return b == nullptr; + if (b == nullptr) return false; return *a == *b; } @@ -786,7 +662,7 @@ template class Persistent : public PersistentBase { /** * A Persistent with no storage cell. */ - V8_INLINE Persistent() : PersistentBase(0) { } + V8_INLINE Persistent() : PersistentBase(nullptr) {} /** * Construct a Persistent from a Local. * When the Local is non-empty, a new storage cell is created @@ -813,7 +689,7 @@ template class Persistent : public PersistentBase { * traits class is called, allowing the setting of flags based on the * copied Persistent. */ - V8_INLINE Persistent(const Persistent& that) : PersistentBase(0) { + V8_INLINE Persistent(const Persistent& that) : PersistentBase(nullptr) { Copy(that); } template @@ -979,7 +855,7 @@ class V8_EXPORT HandleScope { void operator=(const HandleScope&) = delete; protected: - V8_INLINE HandleScope() {} + V8_INLINE HandleScope() = default; void Initialize(Isolate* isolate); @@ -1019,7 +895,7 @@ class V8_EXPORT HandleScope { class V8_EXPORT EscapableHandleScope : public HandleScope { public: explicit EscapableHandleScope(Isolate* isolate); - V8_INLINE ~EscapableHandleScope() {} + V8_INLINE ~EscapableHandleScope() = default; /** * Pushes the value into the previous scope and returns a handle to it. @@ -1389,7 +1265,7 @@ class V8_EXPORT ScriptCompiler { }; CachedData() - : data(NULL), + : data(nullptr), length(0), rejected(false), buffer_policy(BufferNotOwned) {} @@ -1420,9 +1296,9 @@ class V8_EXPORT ScriptCompiler { public: // Source takes ownership of CachedData. V8_INLINE Source(Local source_string, const ScriptOrigin& origin, - CachedData* cached_data = NULL); + CachedData* cached_data = nullptr); V8_INLINE Source(Local source_string, - CachedData* cached_data = NULL); + CachedData* cached_data = nullptr); V8_INLINE ~Source(); // Ownership of the CachedData or its buffers is *not* transferred to the @@ -1461,7 +1337,7 @@ class V8_EXPORT ScriptCompiler { */ class V8_EXPORT ExternalSourceStream { public: - virtual ~ExternalSourceStream() {} + virtual ~ExternalSourceStream() = default; /** * V8 calls this to request the next chunk of data from the embedder. This @@ -1539,7 +1415,7 @@ class V8_EXPORT ScriptCompiler { */ class ScriptStreamingTask { public: - virtual ~ScriptStreamingTask() {} + virtual ~ScriptStreamingTask() = default; virtual void Run() = 0; }; @@ -1945,6 +1821,11 @@ struct SampleInfo { // executing an external callback. }; +struct MemoryRange { + const void* start; + size_t length_in_bytes; +}; + /** * A JSON Parser and Stringifier. */ @@ -1957,9 +1838,9 @@ class V8_EXPORT JSON { * \param json_string The string to parse. * \return The corresponding value if successfully parsed. */ - static V8_DEPRECATE_SOON("Use the maybe version taking context", - MaybeLocal Parse(Isolate* isolate, - Local json_string)); + static V8_DEPRECATED("Use the maybe version taking context", + MaybeLocal Parse(Isolate* isolate, + Local json_string)); static V8_WARN_UNUSED_RESULT MaybeLocal Parse( Local context, Local json_string); @@ -1987,7 +1868,7 @@ class V8_EXPORT ValueSerializer { public: class V8_EXPORT Delegate { public: - virtual ~Delegate() {} + virtual ~Delegate() = default; /** * Handles the case where a DataCloneError would be thrown in the structured @@ -2059,7 +1940,7 @@ class V8_EXPORT ValueSerializer { * Returns the stored data. This serializer should not be used once the buffer * is released. The contents are undefined if a previous write has failed. */ - V8_DEPRECATE_SOON("Use Release()", std::vector ReleaseBuffer()); + V8_DEPRECATED("Use Release()", std::vector ReleaseBuffer()); /** * Returns the stored data (allocated using the delegate's @@ -2080,10 +1961,10 @@ class V8_EXPORT ValueSerializer { /** * Similar to TransferArrayBuffer, but for SharedArrayBuffer. */ - V8_DEPRECATE_SOON("Use Delegate::GetSharedArrayBufferId", - void TransferSharedArrayBuffer( - uint32_t transfer_id, - Local shared_array_buffer)); + V8_DEPRECATED("Use Delegate::GetSharedArrayBufferId", + void TransferSharedArrayBuffer( + uint32_t transfer_id, + Local shared_array_buffer)); /** * Indicate whether to treat ArrayBufferView objects as host objects, @@ -2124,7 +2005,7 @@ class V8_EXPORT ValueDeserializer { public: class V8_EXPORT Delegate { public: - virtual ~Delegate() {} + virtual ~Delegate() = default; /** * The embedder overrides this method to read some kind of host object, if @@ -2720,7 +2601,7 @@ class V8_EXPORT String : public Name { int length = -1, int options = NO_OPTIONS) const; // UTF-8 encoded characters. int WriteUtf8(Isolate* isolate, char* buffer, int length = -1, - int* nchars_ref = NULL, int options = NO_OPTIONS) const; + int* nchars_ref = nullptr, int options = NO_OPTIONS) const; /** * A zero length string. @@ -2739,12 +2620,31 @@ class V8_EXPORT String : public Name { class V8_EXPORT ExternalStringResourceBase { // NOLINT public: - virtual ~ExternalStringResourceBase() {} + virtual ~ExternalStringResourceBase() = default; - virtual bool IsCompressible() const { return false; } + V8_DEPRECATE_SOON("Use IsCacheable().", + virtual bool IsCompressible() const) { + return false; + } + + /** + * If a string is cacheable, the value returned by + * ExternalStringResource::data() may be cached, otherwise it is not + * expected to be stable beyond the current top-level task. + */ + virtual bool IsCacheable() const { +#if __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" +#endif + return !IsCompressible(); +#if __clang__ +#pragma clang diagnostic pop +#endif + } protected: - ExternalStringResourceBase() {} + ExternalStringResourceBase() = default; /** * Internally V8 will call this Dispose method when the external string @@ -2754,6 +2654,24 @@ class V8_EXPORT String : public Name { */ virtual void Dispose() { delete this; } + /** + * For a non-cacheable string, the value returned by + * |ExternalStringResource::data()| has to be stable between |Lock()| and + * |Unlock()|, that is the string must behave as is |IsCacheable()| returned + * true. + * + * These two functions must be thread-safe, and can be called from anywhere. + * They also must handle lock depth, in the sense that each can be called + * several times, from different threads, and unlocking should only happen + * when the balance of Lock() and Unlock() calls is 0. + */ + virtual void Lock() const {} + + /** + * Unlocks the string. + */ + virtual void Unlock() const {} + // Disallow copying and assigning. ExternalStringResourceBase(const ExternalStringResourceBase&) = delete; void operator=(const ExternalStringResourceBase&) = delete; @@ -2761,6 +2679,7 @@ class V8_EXPORT String : public Name { private: friend class internal::Heap; friend class v8::String; + friend class internal::ScopedExternalStringLock; }; /** @@ -2776,7 +2695,7 @@ class V8_EXPORT String : public Name { * Override the destructor to manage the life cycle of the underlying * buffer. */ - virtual ~ExternalStringResource() {} + ~ExternalStringResource() override = default; /** * The string data from the underlying buffer. @@ -2789,7 +2708,7 @@ class V8_EXPORT String : public Name { virtual size_t length() const = 0; protected: - ExternalStringResource() {} + ExternalStringResource() = default; }; /** @@ -2809,13 +2728,13 @@ class V8_EXPORT String : public Name { * Override the destructor to manage the life cycle of the underlying * buffer. */ - virtual ~ExternalOneByteStringResource() {} + ~ExternalOneByteStringResource() override = default; /** The string data from the underlying buffer.*/ virtual const char* data() const = 0; /** The number of Latin-1 characters in the string.*/ virtual size_t length() const = 0; protected: - ExternalOneByteStringResource() {} + ExternalOneByteStringResource() = default; }; /** @@ -3036,6 +2955,7 @@ class V8_EXPORT Symbol : public Name { static Local ForApi(Isolate *isolate, Local name); // Well-known symbols + static Local GetAsyncIterator(Isolate* isolate); static Local GetHasInstance(Isolate* isolate); static Local GetIsConcatSpreadable(Isolate* isolate); static Local GetIterator(Isolate* isolate); @@ -3273,10 +3193,17 @@ enum PropertyFilter { * Options for marking whether callbacks may trigger JS-observable side effects. * Side-effect-free callbacks are whitelisted during debug evaluation with * throwOnSideEffect. It applies when calling a Function, FunctionTemplate, - * or an Accessor's getter callback. For Interceptors, please see + * or an Accessor callback. For Interceptors, please see * PropertyHandlerFlags's kHasNoSideEffect. + * Callbacks that only cause side effects to the receiver are whitelisted if + * invoked on receiver objects that are created within the same debug-evaluate + * call, as these objects are temporary and the side effect does not escape. */ -enum class SideEffectType { kHasSideEffect, kHasNoSideEffect }; +enum class SideEffectType { + kHasSideEffect, + kHasNoSideEffect, + kHasSideEffectToReceiver +}; /** * Keys/Properties filter enums: @@ -3414,10 +3341,12 @@ class V8_EXPORT Object : public Value { */ V8_WARN_UNUSED_RESULT Maybe SetAccessor( Local context, Local name, - AccessorNameGetterCallback getter, AccessorNameSetterCallback setter = 0, + AccessorNameGetterCallback getter, + AccessorNameSetterCallback setter = nullptr, MaybeLocal data = MaybeLocal(), AccessControl settings = DEFAULT, PropertyAttribute attribute = None, - SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect); + SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect, + SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect); void SetAccessorProperty(Local name, Local getter, Local setter = Local(), @@ -3433,7 +3362,8 @@ class V8_EXPORT Object : public Value { AccessorNameGetterCallback getter, AccessorNameSetterCallback setter = nullptr, Local data = Local(), PropertyAttribute attributes = None, - SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect); + SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect, + SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect); /** * Attempts to create a property with the given name which behaves like a data @@ -3447,7 +3377,8 @@ class V8_EXPORT Object : public Value { Local context, Local name, AccessorNameGetterCallback getter, Local data = Local(), PropertyAttribute attributes = None, - SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect); + SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect, + SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect); /** * Functionality for private properties. @@ -4323,13 +4254,6 @@ class V8_EXPORT WasmCompiledModule : public Object { public: typedef std::pair, size_t> SerializedModule; -// The COMMA macro allows us to use ',' inside of the V8_DEPRECATED macro. -#define COMMA , - V8_DEPRECATED( - "Use BufferReference.", - typedef std::pair CallerOwnedBuffer); -#undef COMMA - /** * A unowned reference to a byte buffer. */ @@ -4338,12 +4262,6 @@ class V8_EXPORT WasmCompiledModule : public Object { size_t size; BufferReference(const uint8_t* start, size_t size) : start(start), size(size) {} - // Temporarily allow conversion to and from CallerOwnedBuffer. - V8_DEPRECATED( - "Use BufferReference directly.", - inline BufferReference(CallerOwnedBuffer)); // NOLINT(runtime/explicit) - V8_DEPRECATED("Use BufferReference directly.", - inline operator CallerOwnedBuffer()); }; /** @@ -4390,8 +4308,6 @@ class V8_EXPORT WasmCompiledModule : public Object { * Get the wasm-encoded bytes that were used to compile this module. */ BufferReference GetWasmWireBytesRef(); - V8_DEPRECATED("Use GetWasmWireBytesRef version.", - Local GetWasmWireBytes()); /** * Serialize the compiled module. The serialized data does not include the @@ -4424,15 +4340,6 @@ class V8_EXPORT WasmCompiledModule : public Object { static void CheckCast(Value* obj); }; -// TODO(clemensh): Remove after M70 branch. -WasmCompiledModule::BufferReference::BufferReference( - WasmCompiledModule::CallerOwnedBuffer buf) - : BufferReference(buf.first, buf.second) {} -WasmCompiledModule::BufferReference:: -operator WasmCompiledModule::CallerOwnedBuffer() { - return {start, size}; -} - /** * The V8 interface for WebAssembly streaming compilation. When streaming * compilation is initiated, V8 passes a {WasmStreaming} object to the embedder @@ -4497,7 +4404,7 @@ class V8_EXPORT WasmModuleObjectBuilderStreaming final { void Abort(MaybeLocal exception); Local GetPromise(); - ~WasmModuleObjectBuilderStreaming(); + ~WasmModuleObjectBuilderStreaming() = default; private: WasmModuleObjectBuilderStreaming(const WasmModuleObjectBuilderStreaming&) = @@ -4556,7 +4463,7 @@ class V8_EXPORT ArrayBuffer : public Object { */ class V8_EXPORT Allocator { // NOLINT public: - virtual ~Allocator() {} + virtual ~Allocator() = default; /** * Allocate |length| bytes. Return NULL if allocation is not successful. @@ -5133,8 +5040,8 @@ class V8_EXPORT SharedArrayBuffer : public Object { */ class V8_EXPORT Date : public Object { public: - static V8_DEPRECATE_SOON("Use maybe version.", - Local New(Isolate* isolate, double time)); + static V8_DEPRECATED("Use maybe version.", + Local New(Isolate* isolate, double time)); static V8_WARN_UNUSED_RESULT MaybeLocal New(Local context, double time); @@ -5377,20 +5284,22 @@ class V8_EXPORT Template : public Data { */ void SetNativeDataProperty( Local name, AccessorGetterCallback getter, - AccessorSetterCallback setter = 0, + AccessorSetterCallback setter = nullptr, // TODO(dcarney): gcc can't handle Local below Local data = Local(), PropertyAttribute attribute = None, Local signature = Local(), AccessControl settings = DEFAULT, - SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect); + SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect, + SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect); void SetNativeDataProperty( Local name, AccessorNameGetterCallback getter, - AccessorNameSetterCallback setter = 0, + AccessorNameSetterCallback setter = nullptr, // TODO(dcarney): gcc can't handle Local below Local data = Local(), PropertyAttribute attribute = None, Local signature = Local(), AccessControl settings = DEFAULT, - SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect); + SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect, + SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect); /** * Like SetNativeDataProperty, but V8 will replace the native data property @@ -5399,7 +5308,8 @@ class V8_EXPORT Template : public Data { void SetLazyDataProperty( Local name, AccessorNameGetterCallback getter, Local data = Local(), PropertyAttribute attribute = None, - SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect); + SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect, + SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect); /** * During template instantiation, sets the value with the intrinsic property @@ -5760,7 +5670,7 @@ class V8_EXPORT FunctionTemplate : public Template { public: /** Creates a function template.*/ static Local New( - Isolate* isolate, FunctionCallback callback = 0, + Isolate* isolate, FunctionCallback callback = nullptr, Local data = Local(), Local signature = Local(), int length = 0, ConstructorBehavior behavior = ConstructorBehavior::kAllow, @@ -5941,11 +5851,11 @@ struct NamedPropertyHandlerConfiguration { NamedPropertyHandlerConfiguration( /** Note: getter is required */ - GenericNamedPropertyGetterCallback getter = 0, - GenericNamedPropertySetterCallback setter = 0, - GenericNamedPropertyQueryCallback query = 0, - GenericNamedPropertyDeleterCallback deleter = 0, - GenericNamedPropertyEnumeratorCallback enumerator = 0, + GenericNamedPropertyGetterCallback getter = nullptr, + GenericNamedPropertySetterCallback setter = nullptr, + GenericNamedPropertyQueryCallback query = nullptr, + GenericNamedPropertyDeleterCallback deleter = nullptr, + GenericNamedPropertyEnumeratorCallback enumerator = nullptr, Local data = Local(), PropertyHandlerFlags flags = PropertyHandlerFlags::kNone) : getter(getter), @@ -5953,8 +5863,8 @@ struct NamedPropertyHandlerConfiguration { query(query), deleter(deleter), enumerator(enumerator), - definer(0), - descriptor(0), + definer(nullptr), + descriptor(nullptr), data(data), flags(flags) {} @@ -5969,7 +5879,7 @@ struct NamedPropertyHandlerConfiguration { PropertyHandlerFlags flags = PropertyHandlerFlags::kNone) : getter(getter), setter(setter), - query(0), + query(nullptr), deleter(deleter), enumerator(enumerator), definer(definer), @@ -6011,11 +5921,11 @@ struct IndexedPropertyHandlerConfiguration { IndexedPropertyHandlerConfiguration( /** Note: getter is required */ - IndexedPropertyGetterCallback getter = 0, - IndexedPropertySetterCallback setter = 0, - IndexedPropertyQueryCallback query = 0, - IndexedPropertyDeleterCallback deleter = 0, - IndexedPropertyEnumeratorCallback enumerator = 0, + IndexedPropertyGetterCallback getter = nullptr, + IndexedPropertySetterCallback setter = nullptr, + IndexedPropertyQueryCallback query = nullptr, + IndexedPropertyDeleterCallback deleter = nullptr, + IndexedPropertyEnumeratorCallback enumerator = nullptr, Local data = Local(), PropertyHandlerFlags flags = PropertyHandlerFlags::kNone) : getter(getter), @@ -6023,8 +5933,8 @@ struct IndexedPropertyHandlerConfiguration { query(query), deleter(deleter), enumerator(enumerator), - definer(0), - descriptor(0), + definer(nullptr), + descriptor(nullptr), data(data), flags(flags) {} @@ -6039,7 +5949,7 @@ struct IndexedPropertyHandlerConfiguration { PropertyHandlerFlags flags = PropertyHandlerFlags::kNone) : getter(getter), setter(setter), - query(0), + query(nullptr), deleter(deleter), enumerator(enumerator), definer(definer), @@ -6111,16 +6021,20 @@ class V8_EXPORT ObjectTemplate : public Template { */ void SetAccessor( Local name, AccessorGetterCallback getter, - AccessorSetterCallback setter = 0, Local data = Local(), - AccessControl settings = DEFAULT, PropertyAttribute attribute = None, + AccessorSetterCallback setter = nullptr, + Local data = Local(), AccessControl settings = DEFAULT, + PropertyAttribute attribute = None, Local signature = Local(), - SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect); + SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect, + SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect); void SetAccessor( Local name, AccessorNameGetterCallback getter, - AccessorNameSetterCallback setter = 0, Local data = Local(), - AccessControl settings = DEFAULT, PropertyAttribute attribute = None, + AccessorNameSetterCallback setter = nullptr, + Local data = Local(), AccessControl settings = DEFAULT, + PropertyAttribute attribute = None, Local signature = Local(), - SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect); + SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect, + SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect); /** * Sets a named property handler on the object template. @@ -6154,10 +6068,10 @@ class V8_EXPORT ObjectTemplate : public Template { // TODO(dcarney): deprecate void SetIndexedPropertyHandler( IndexedPropertyGetterCallback getter, - IndexedPropertySetterCallback setter = 0, - IndexedPropertyQueryCallback query = 0, - IndexedPropertyDeleterCallback deleter = 0, - IndexedPropertyEnumeratorCallback enumerator = 0, + IndexedPropertySetterCallback setter = nullptr, + IndexedPropertyQueryCallback query = nullptr, + IndexedPropertyDeleterCallback deleter = nullptr, + IndexedPropertyEnumeratorCallback enumerator = nullptr, Local data = Local()) { SetHandler(IndexedPropertyHandlerConfiguration(getter, setter, query, deleter, enumerator, data)); @@ -6297,11 +6211,11 @@ V8_DEPRECATE_SOON("Implementation detail", class) V8_EXPORT ExternalOneByteStringResourceImpl : public String::ExternalOneByteStringResource { public: - ExternalOneByteStringResourceImpl() : data_(0), length_(0) {} + ExternalOneByteStringResourceImpl() : data_(nullptr), length_(0) {} ExternalOneByteStringResourceImpl(const char* data, size_t length) : data_(data), length_(length) {} - const char* data() const { return data_; } - size_t length() const { return length_; } + const char* data() const override { return data_; } + size_t length() const override { return length_; } private: const char* data_; @@ -6315,11 +6229,8 @@ class V8_EXPORT Extension { // NOLINT public: // Note that the strings passed into this constructor must live as long // as the Extension itself. - Extension(const char* name, - const char* source = 0, - int dep_count = 0, - const char** deps = 0, - int source_length = -1); + Extension(const char* name, const char* source = nullptr, int dep_count = 0, + const char** deps = nullptr, int source_length = -1); virtual ~Extension() { delete source_; } virtual Local GetNativeFunctionTemplate( Isolate* isolate, Local name) { @@ -6391,14 +6302,14 @@ class V8_EXPORT ResourceConstraints { uint64_t virtual_memory_limit); // Returns the max semi-space size in MB. - V8_DEPRECATE_SOON("Use max_semi_space_size_in_kb()", - size_t max_semi_space_size()) { + V8_DEPRECATED("Use max_semi_space_size_in_kb()", + size_t max_semi_space_size()) { return max_semi_space_size_in_kb_ / 1024; } // Sets the max semi-space size in MB. - V8_DEPRECATE_SOON("Use set_max_semi_space_size_in_kb(size_t limit_in_kb)", - void set_max_semi_space_size(size_t limit_in_mb)) { + V8_DEPRECATED("Use set_max_semi_space_size_in_kb(size_t limit_in_kb)", + void set_max_semi_space_size(size_t limit_in_mb)) { max_semi_space_size_in_kb_ = limit_in_mb * 1024; } @@ -6416,12 +6327,12 @@ class V8_EXPORT ResourceConstraints { void set_max_old_space_size(size_t limit_in_mb) { max_old_space_size_ = limit_in_mb; } - V8_DEPRECATE_SOON("max_executable_size_ is subsumed by max_old_space_size_", - size_t max_executable_size() const) { + V8_DEPRECATED("max_executable_size_ is subsumed by max_old_space_size_", + size_t max_executable_size() const) { return max_executable_size_; } - V8_DEPRECATE_SOON("max_executable_size_ is subsumed by max_old_space_size_", - void set_max_executable_size(size_t limit_in_mb)) { + V8_DEPRECATED("max_executable_size_ is subsumed by max_old_space_size_", + void set_max_executable_size(size_t limit_in_mb)) { max_executable_size_ = limit_in_mb; } uint32_t* stack_limit() const { return stack_limit_; } @@ -6543,6 +6454,15 @@ typedef void (*HostInitializeImportMetaObjectCallback)(Local context, Local module, Local meta); +/** + * PrepareStackTraceCallback is called when the stack property of an error is + * first accessed. The return value will be used as the stack value. If this + * callback is registed, the |Error.prepareStackTrace| API will be disabled. + */ +typedef MaybeLocal (*PrepareStackTraceCallback)(Local context, + Local error, + Local trace); + /** * PromiseHook with type kInit is called when a new promise is * created. When a new promise is created as part of the chain in the @@ -6971,7 +6891,7 @@ typedef void (*JitCodeEventHandler)(const JitCodeEvent* event); */ class V8_EXPORT ExternalResourceVisitor { // NOLINT public: - virtual ~ExternalResourceVisitor() {} + virtual ~ExternalResourceVisitor() = default; virtual void VisitExternalString(Local string) {} }; @@ -6981,7 +6901,7 @@ class V8_EXPORT ExternalResourceVisitor { // NOLINT */ class V8_EXPORT PersistentHandleVisitor { // NOLINT public: - virtual ~PersistentHandleVisitor() {} + virtual ~PersistentHandleVisitor() = default; virtual void VisitPersistentHandle(Persistent* value, uint16_t class_id) {} }; @@ -7050,9 +6970,9 @@ class V8_EXPORT EmbedderHeapTracer { * Note: Only one of the AdvanceTracing methods needs to be overriden by the * embedder. */ - V8_DEPRECATE_SOON("Use void AdvanceTracing(deadline_in_ms)", - virtual bool AdvanceTracing( - double deadline_in_ms, AdvanceTracingActions actions)) { + V8_DEPRECATED("Use void AdvanceTracing(deadline_in_ms)", + virtual bool AdvanceTracing(double deadline_in_ms, + AdvanceTracingActions actions)) { return false; } @@ -7091,8 +7011,8 @@ class V8_EXPORT EmbedderHeapTracer { * Note: Only one of the EnterFinalPause methods needs to be overriden by the * embedder. */ - V8_DEPRECATE_SOON("Use void EnterFinalPause(EmbedderStackState)", - virtual void EnterFinalPause()) {} + V8_DEPRECATED("Use void EnterFinalPause(EmbedderStackState)", + virtual void EnterFinalPause()) {} virtual void EnterFinalPause(EmbedderStackState stack_state); /** @@ -7101,7 +7021,8 @@ class V8_EXPORT EmbedderHeapTracer { * The embedder is expected to throw away all intermediate data and reset to * the initial state. */ - virtual void AbortTracing() = 0; + V8_DEPRECATE_SOON("Obsolete as V8 will not abort tracing anymore.", + virtual void AbortTracing()) {} /* * Called by the embedder to request immediate finalization of the currently @@ -7130,8 +7051,7 @@ class V8_EXPORT EmbedderHeapTracer { /** * Returns the number of wrappers that are still to be traced by the embedder. */ - V8_DEPRECATE_SOON("Use IsTracingDone", - virtual size_t NumberOfWrappersToTrace()) { + V8_DEPRECATED("Use IsTracingDone", virtual size_t NumberOfWrappersToTrace()) { return 0; } @@ -7435,6 +7355,8 @@ class V8_EXPORT Isolate { kFunctionTokenOffsetTooLongForToString = 49, kWasmSharedMemory = 50, kWasmThreadOpcodes = 51, + kAtomicsNotify = 52, + kAtomicsWake = 53, // If you add new values here, you'll also need to update Chromium's: // web_feature.mojom, UseCounterCallback.cpp, and enums.xml. V8 changes to @@ -7522,6 +7444,12 @@ class V8_EXPORT Isolate { void SetHostInitializeImportMetaObjectCallback( HostInitializeImportMetaObjectCallback callback); + /** + * This specifies the callback called when the stack property of Error + * is accessed. + */ + void SetPrepareStackTraceCallback(PrepareStackTraceCallback callback); + /** * Optional notification that the system is running low on memory. * V8 uses these notifications to guide heuristics. @@ -7759,6 +7687,11 @@ class V8_EXPORT Isolate { */ void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer); + /* + * Gets the currently active heap tracer for the isolate. + */ + EmbedderHeapTracer* GetEmbedderHeapTracer(); + /** * Use for |AtomicsWaitCallback| to indicate the type of event it receives. */ @@ -8157,7 +8090,9 @@ class V8_EXPORT Isolate { void SetStackLimit(uintptr_t stack_limit); /** - * Returns a memory range that can potentially contain jitted code. + * Returns a memory range that can potentially contain jitted code. Code for + * V8's 'builtins' will not be in this range if embedded builtins is enabled. + * Instead, see GetBuiltinsCodeRange. * * On Win64, embedders are advised to install function table callbacks for * these ranges, as default SEH won't be able to unwind through jitted code. @@ -8171,6 +8106,15 @@ class V8_EXPORT Isolate { */ void GetCodeRange(void** start, size_t* length_in_bytes); + /** + * Returns a memory range containing the code for V8's builtin functions + * which are shared across isolates. + * + * If embedded builtins are disabled, then the memory range will be a null + * pointer with 0 length. + */ + MemoryRange GetBuiltinsCodeRange(); + /** Set the callback to invoke in case of fatal errors. */ void SetFatalErrorHandler(FatalErrorCallback that); @@ -8910,7 +8854,7 @@ class V8_EXPORT TryCatch { * of the C++ try catch handler itself. */ static void* JSStackComparableAddress(TryCatch* handler) { - if (handler == NULL) return NULL; + if (handler == nullptr) return nullptr; return handler->js_stack_comparable_address_; } @@ -8950,7 +8894,7 @@ class V8_EXPORT TryCatch { */ class V8_EXPORT ExtensionConfiguration { public: - ExtensionConfiguration() : name_count_(0), names_(NULL) { } + ExtensionConfiguration() : name_count_(0), names_(nullptr) {} ExtensionConfiguration(int name_count, const char* names[]) : name_count_(name_count), names_(names) { } @@ -9007,7 +8951,7 @@ class V8_EXPORT Context { * and only object identify will remain. */ static Local New( - Isolate* isolate, ExtensionConfiguration* extensions = NULL, + Isolate* isolate, ExtensionConfiguration* extensions = nullptr, MaybeLocal global_template = MaybeLocal(), MaybeLocal global_object = MaybeLocal(), DeserializeInternalFieldsCallback internal_fields_deserializer = @@ -9346,201 +9290,6 @@ class V8_EXPORT Locker { // --- Implementation --- - -namespace internal { - -/** - * This class exports constants and functionality from within v8 that - * is necessary to implement inline functions in the v8 api. Don't - * depend on functions and constants defined here. - */ -class Internals { - public: - // These values match non-compiler-dependent values defined within - // the implementation of v8. - static const int kHeapObjectMapOffset = 0; - static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize; - static const int kStringResourceOffset = 3 * kApiPointerSize; - - static const int kOddballKindOffset = 4 * kApiPointerSize + kApiDoubleSize; - static const int kForeignAddressOffset = kApiPointerSize; - static const int kJSObjectHeaderSize = 3 * kApiPointerSize; - static const int kFixedArrayHeaderSize = 2 * kApiPointerSize; - static const int kContextHeaderSize = 2 * kApiPointerSize; - static const int kContextEmbedderDataIndex = 5; - static const int kFullStringRepresentationMask = 0x0f; - static const int kStringEncodingMask = 0x8; - static const int kExternalTwoByteRepresentationTag = 0x02; - static const int kExternalOneByteRepresentationTag = 0x0a; - - static const int kIsolateEmbedderDataOffset = 0 * kApiPointerSize; - static const int kExternalMemoryOffset = 4 * kApiPointerSize; - static const int kExternalMemoryLimitOffset = - kExternalMemoryOffset + kApiInt64Size; - static const int kExternalMemoryAtLastMarkCompactOffset = - kExternalMemoryLimitOffset + kApiInt64Size; - static const int kIsolateRootsOffset = kExternalMemoryLimitOffset + - kApiInt64Size + kApiInt64Size + - kApiPointerSize + kApiPointerSize; - static const int kUndefinedValueRootIndex = 4; - static const int kTheHoleValueRootIndex = 5; - static const int kNullValueRootIndex = 6; - static const int kTrueValueRootIndex = 7; - static const int kFalseValueRootIndex = 8; - static const int kEmptyStringRootIndex = 9; - - static const int kNodeClassIdOffset = 1 * kApiPointerSize; - static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3; - static const int kNodeStateMask = 0x7; - static const int kNodeStateIsWeakValue = 2; - static const int kNodeStateIsPendingValue = 3; - static const int kNodeStateIsNearDeathValue = 4; - static const int kNodeIsIndependentShift = 3; - static const int kNodeIsActiveShift = 4; - - static const int kFirstNonstringType = 0x80; - static const int kOddballType = 0x83; - static const int kForeignType = 0x87; - static const int kJSSpecialApiObjectType = 0x410; - static const int kJSApiObjectType = 0x420; - static const int kJSObjectType = 0x421; - - static const int kUndefinedOddballKind = 5; - static const int kNullOddballKind = 3; - - static const uint32_t kNumIsolateDataSlots = 4; - - V8_EXPORT static void CheckInitializedImpl(v8::Isolate* isolate); - V8_INLINE static void CheckInitialized(v8::Isolate* isolate) { -#ifdef V8_ENABLE_CHECKS - CheckInitializedImpl(isolate); -#endif - } - - V8_INLINE static bool HasHeapObjectTag(const internal::Object* value) { - return ((reinterpret_cast(value) & kHeapObjectTagMask) == - kHeapObjectTag); - } - - V8_INLINE static int SmiValue(const internal::Object* value) { - return PlatformSmiTagging::SmiToInt(value); - } - - V8_INLINE static internal::Object* IntToSmi(int value) { - return PlatformSmiTagging::IntToSmi(value); - } - - V8_INLINE static constexpr bool IsValidSmi(intptr_t value) { - return PlatformSmiTagging::IsValidSmi(value); - } - - V8_INLINE static int GetInstanceType(const internal::Object* obj) { - typedef internal::Object O; - O* map = ReadField(obj, kHeapObjectMapOffset); - return ReadField(map, kMapInstanceTypeOffset); - } - - V8_INLINE static int GetOddballKind(const internal::Object* obj) { - typedef internal::Object O; - return SmiValue(ReadField(obj, kOddballKindOffset)); - } - - V8_INLINE static bool IsExternalTwoByteString(int instance_type) { - int representation = (instance_type & kFullStringRepresentationMask); - return representation == kExternalTwoByteRepresentationTag; - } - - V8_INLINE static uint8_t GetNodeFlag(internal::Object** obj, int shift) { - uint8_t* addr = reinterpret_cast(obj) + kNodeFlagsOffset; - return *addr & static_cast(1U << shift); - } - - V8_INLINE static void UpdateNodeFlag(internal::Object** obj, - bool value, int shift) { - uint8_t* addr = reinterpret_cast(obj) + kNodeFlagsOffset; - uint8_t mask = static_cast(1U << shift); - *addr = static_cast((*addr & ~mask) | (value << shift)); - } - - V8_INLINE static uint8_t GetNodeState(internal::Object** obj) { - uint8_t* addr = reinterpret_cast(obj) + kNodeFlagsOffset; - return *addr & kNodeStateMask; - } - - V8_INLINE static void UpdateNodeState(internal::Object** obj, - uint8_t value) { - uint8_t* addr = reinterpret_cast(obj) + kNodeFlagsOffset; - *addr = static_cast((*addr & ~kNodeStateMask) | value); - } - - V8_INLINE static void SetEmbedderData(v8::Isolate* isolate, - uint32_t slot, - void* data) { - uint8_t* addr = reinterpret_cast(isolate) + - kIsolateEmbedderDataOffset + slot * kApiPointerSize; - *reinterpret_cast(addr) = data; - } - - V8_INLINE static void* GetEmbedderData(const v8::Isolate* isolate, - uint32_t slot) { - const uint8_t* addr = reinterpret_cast(isolate) + - kIsolateEmbedderDataOffset + slot * kApiPointerSize; - return *reinterpret_cast(addr); - } - - V8_INLINE static internal::Object** GetRoot(v8::Isolate* isolate, - int index) { - uint8_t* addr = reinterpret_cast(isolate) + kIsolateRootsOffset; - return reinterpret_cast(addr + index * kApiPointerSize); - } - - template - V8_INLINE static T ReadField(const internal::Object* ptr, int offset) { - const uint8_t* addr = - reinterpret_cast(ptr) + offset - kHeapObjectTag; - return *reinterpret_cast(addr); - } - - template - V8_INLINE static T ReadEmbedderData(const v8::Context* context, int index) { - typedef internal::Object O; - typedef internal::Internals I; - O* ctx = *reinterpret_cast(context); - int embedder_data_offset = I::kContextHeaderSize + - (internal::kApiPointerSize * I::kContextEmbedderDataIndex); - O* embedder_data = I::ReadField(ctx, embedder_data_offset); - int value_offset = - I::kFixedArrayHeaderSize + (internal::kApiPointerSize * index); - return I::ReadField(embedder_data, value_offset); - } -}; - -// Only perform cast check for types derived from v8::Data since -// other types do not implement the Cast method. -template -struct CastCheck { - template - static void Perform(T* data); -}; - -template <> -template -void CastCheck::Perform(T* data) { - T::Cast(data); -} - -template <> -template -void CastCheck::Perform(T* data) {} - -template -V8_INLINE void PerformCastCheck(T* data) { - CastCheck::value>::Perform(data); -} - -} // namespace internal - - template Local Local::New(Isolate* isolate, Local that) { return New(isolate, that.val_); @@ -9554,7 +9303,7 @@ Local Local::New(Isolate* isolate, const PersistentBase& that) { template Local Local::New(Isolate* isolate, T* that) { - if (that == NULL) return Local(); + if (that == nullptr) return Local(); T* that_ptr = that; internal::Object** p = reinterpret_cast(that_ptr); return Local(reinterpret_cast(HandleScope::CreateHandle( @@ -9598,7 +9347,7 @@ void* WeakCallbackInfo::GetInternalField(int index) const { template T* PersistentBase::New(Isolate* isolate, T* that) { - if (that == NULL) return NULL; + if (that == nullptr) return nullptr; internal::Object** p = reinterpret_cast(that); return reinterpret_cast( V8::GlobalizeReference(reinterpret_cast(isolate), @@ -9649,7 +9398,7 @@ template void PersistentBase::Reset() { if (this->IsEmpty()) return; V8::DisposeGlobal(reinterpret_cast(this->val_)); - val_ = 0; + val_ = nullptr; } @@ -10664,10 +10413,10 @@ int64_t Isolate::AdjustAmountOfExternalAllocatedMemory( } if (change_in_bytes < 0) { - *external_memory_limit += change_in_bytes; - } - - if (change_in_bytes > 0 && amount > *external_memory_limit) { + const int64_t lower_limit = *external_memory_limit + change_in_bytes; + if (lower_limit > I::kExternalAllocationSoftLimit) + *external_memory_limit = lower_limit; + } else if (change_in_bytes > 0 && amount > *external_memory_limit) { ReportExternalAllocationLimitReached(); } return *external_memory; diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h index 75fd5aa7e743f7..93c4629825b255 100644 --- a/deps/v8/include/v8config.h +++ b/deps/v8/include/v8config.h @@ -420,6 +420,36 @@ namespace v8 { template class AlignOfHelper { char c; T t; }; } #define V8_WARN_UNUSED_RESULT /* NOT SUPPORTED */ #endif +#ifdef V8_OS_WIN + +// Setup for Windows DLL export/import. When building the V8 DLL the +// BUILDING_V8_SHARED needs to be defined. When building a program which uses +// the V8 DLL USING_V8_SHARED needs to be defined. When either building the V8 +// static library or building a program which uses the V8 static library neither +// BUILDING_V8_SHARED nor USING_V8_SHARED should be defined. +#ifdef BUILDING_V8_SHARED +# define V8_EXPORT __declspec(dllexport) +#elif USING_V8_SHARED +# define V8_EXPORT __declspec(dllimport) +#else +# define V8_EXPORT +#endif // BUILDING_V8_SHARED + +#else // V8_OS_WIN + +// Setup for Linux shared library export. +#if V8_HAS_ATTRIBUTE_VISIBILITY +# ifdef BUILDING_V8_SHARED +# define V8_EXPORT __attribute__ ((visibility("default"))) +# else +# define V8_EXPORT +# endif +#else +# define V8_EXPORT +#endif + +#endif // V8_OS_WIN + // clang-format on #endif // V8CONFIG_H_ diff --git a/deps/v8/infra/config/cq.cfg b/deps/v8/infra/config/cq.cfg index a0ede58b911a79..53ea0cdd44182d 100644 --- a/deps/v8/infra/config/cq.cfg +++ b/deps/v8/infra/config/cq.cfg @@ -108,6 +108,23 @@ verifiers { triggered_by: "v8_win_rel_ng" } } + # TODO(machenbach): Remove after testing in practice and migrate to + # PRESUBMIT.py scripts. + buckets { + name: "luci.chromium.try" + builders { + name: "cast_shell_android" + experiment_percentage: 20 + } + builders { + name: "cast_shell_linux" + experiment_percentage: 20 + } + builders { + name: "linux-chromeos-rel" + experiment_percentage: 20 + } + } } } diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl index 095aeefc5c4631..f798a76a61cb4f 100644 --- a/deps/v8/infra/mb/mb_config.pyl +++ b/deps/v8/infra/mb/mb_config.pyl @@ -54,6 +54,7 @@ 'x64.debug': 'default_debug_x64', 'x64.optdebug': 'default_optdebug_x64', 'x64.release': 'default_release_x64', + 'x64.release.sample': 'release_x64_sample', }, 'client.dynamorio': { 'linux-v8-dr': 'release_x64', @@ -208,6 +209,7 @@ 'v8_android_arm64_n5x_rel_ng': 'release_android_arm64', 'v8_fuchsia_rel_ng': 'release_x64_fuchsia_trybot', 'v8_linux_rel_ng': 'release_x86_gcmole_trybot', + 'v8_linux_optional_rel_ng': 'release_x86_trybot', 'v8_linux_verify_csa_rel_ng': 'release_x86_verify_csa', 'v8_linux_nodcheck_rel_ng': 'release_x86_minimal_symbols', 'v8_linux_dbg_ng': 'debug_x86_trybot', @@ -235,6 +237,7 @@ 'v8_linux64_tsan_isolates_rel_ng': 'release_x64_tsan_minimal_symbols', 'v8_linux64_ubsan_rel_ng': 'release_x64_ubsan_vptr_minimal_symbols', + 'v8_odroid_arm_rel_ng': 'release_arm', # TODO(machenbach): Remove after switching to x64 on infra side. 'v8_win_dbg': 'debug_x86_trybot', 'v8_win_compile_dbg': 'debug_x86_trybot', @@ -280,7 +283,7 @@ 'default_optdebug_android_arm': [ 'debug', 'arm', 'android', 'v8_enable_slow_dchecks' ], 'default_release_android_arm': [ - 'release', 'arm', 'android'], + 'release', 'arm', 'android', 'android_strip_outputs'], 'default_debug_arm64': [ 'debug', 'simulate_arm64', 'v8_enable_slow_dchecks', 'v8_full_debug'], 'default_optdebug_arm64': [ @@ -353,6 +356,8 @@ 'debug', 'x64', 'v8_enable_slow_dchecks'], 'default_release_x64': [ 'release', 'x64'], + 'release_x64_sample': [ + 'release', 'x64', 'sample'], 'default_debug_x86': [ 'debug', 'x86', 'v8_enable_slow_dchecks', 'v8_full_debug'], 'default_optdebug_x86': [ @@ -416,9 +421,11 @@ 'release_arm': [ 'release_bot', 'arm', 'hard_float'], 'release_android_arm': [ - 'release_bot', 'arm', 'android', 'minimal_symbols'], + 'release_bot', 'arm', 'android', 'minimal_symbols', + 'android_strip_outputs'], 'release_android_arm64': [ - 'release_bot', 'arm64', 'android', 'minimal_symbols'], + 'release_bot', 'arm64', 'android', 'minimal_symbols', + 'android_strip_outputs'], # Release configs for x64. 'release_x64': [ @@ -580,6 +587,10 @@ 'gn_args': 'target_os="android" v8_android_log_stdout=true', }, + 'android_strip_outputs': { + 'gn_args': 'android_unstripped_runtime_outputs=false', + }, + 'arm': { 'gn_args': 'target_cpu="arm"', }, @@ -625,14 +636,10 @@ 'gn_args': 'is_debug=true v8_enable_backtrace=true', }, - 'v8_use_multi_snapshots': { - 'gn_args': 'v8_use_multi_snapshots=true', - }, - 'debug_bot': { 'mixins': [ 'debug', 'shared', 'goma', 'v8_enable_slow_dchecks', - 'v8_use_multi_snapshots', 'v8_optimized_debug'], + 'v8_optimized_debug'], }, 'debug_trybot': { @@ -715,11 +722,11 @@ }, 'release': { - 'gn_args': 'is_debug=false android_unstripped_runtime_outputs=false', + 'gn_args': 'is_debug=false', }, 'release_bot': { - 'mixins': ['release', 'static', 'goma', 'v8_use_multi_snapshots'], + 'mixins': ['release', 'static', 'goma'], }, 'release_trybot': { @@ -885,5 +892,9 @@ 'gn_args': 'target_cpu="x86"', }, + 'sample': { + 'gn_args': 'v8_monolithic=true is_component_build=false ' + 'v8_use_external_startup_data=false use_custom_libcxx=false', + }, }, } diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl index b2cd7618170c39..2c7380b3dd3643 100644 --- a/deps/v8/infra/testing/builders.pyl +++ b/deps/v8/infra/testing/builders.pyl @@ -39,7 +39,10 @@ 'os': 'Android', }, 'tests': [ - {'name': 'mjsunit', 'variant': 'default', 'shards': 2}, + {'name': 'benchmarks', 'variant': 'default'}, + {'name': 'v8testing', 'variant': 'default', 'shards': 4}, + {'name': 'mozilla', 'variant': 'default'}, + {'name': 'test262_variants', 'variant': 'default', 'shards': 6}, ], }, ############################################################################## @@ -82,7 +85,7 @@ {'name': 'mozilla', 'variant': 'extra'}, {'name': 'test262_variants', 'shards': 2}, {'name': 'test262_variants', 'variant': 'extra', 'shards': 2}, - {'name': 'v8testing'}, + {'name': 'v8testing', 'shards': 2}, {'name': 'v8testing', 'variant': 'extra'}, ], }, @@ -120,14 +123,69 @@ {'name': 'optimize_for_size'}, {'name': 'test262_variants', 'shards': 4}, {'name': 'test262_variants', 'variant': 'extra', 'shards': 2}, - {'name': 'v8testing'}, + {'name': 'v8testing', 'shards': 2}, {'name': 'v8testing', 'suffix': 'isolates', 'test_args': ['--isolates'], 'shards': 2}, {'name': 'v8testing', 'variant': 'extra'}, ], }, + 'v8_linux_optional_rel_ng_triggered': { + 'swarming_dimensions' : { + 'cpu': 'x86-64-avx2', + }, + 'tests': [ + # Code serializer. + {'name': 'benchmarks', 'variant': 'code_serializer', 'shards': 1}, + {'name': 'd8testing', 'variant': 'code_serializer', 'shards': 1}, + {'name': 'mozilla', 'variant': 'code_serializer', 'shards': 1}, + {'name': 'test262_variants', 'variant': 'code_serializer', 'shards': 1}, + # No SSE3. + { + 'name': 'mozilla', + 'suffix': 'nosse3', + 'test_args': [ + '--extra-flags', + '--noenable-sse3 --noenable-ssse3 --noenable-sse4-1 --noenable-avx', + ], + }, + { + 'name': 'test262', + 'suffix': 'nosse3', + 'test_args': [ + '--extra-flags', + '--noenable-sse3 --noenable-ssse3 --noenable-sse4-1 --noenable-avx', + ], + }, + { + 'name': 'v8testing', + 'suffix': 'nosse3', + 'test_args': [ + '--extra-flags', + '--noenable-sse3 --noenable-ssse3 --noenable-sse4-1 --noenable-avx', + ], + 'shards': 3, + }, + # No SSE4. + { + 'name': 'mozilla', + 'suffix': 'nosse4', + 'test_args': ['--extra-flags', '--noenable-sse4-1 --noenable-avx'], + }, + { + 'name': 'test262', + 'suffix': 'nosse4', + 'test_args': ['--extra-flags', '--noenable-sse4-1 --noenable-avx'], + }, + { + 'name': 'v8testing', + 'suffix': 'nosse4', + 'test_args': ['--extra-flags', '--noenable-sse4-1 --noenable-avx'], + 'shards': 3, + }, + ], + }, 'v8_linux_verify_csa_rel_ng_triggered': { 'tests': [ - {'name': 'v8testing'}, + {'name': 'v8testing', 'shards': 2}, ], }, ############################################################################## @@ -144,9 +202,9 @@ 'v8_linux_arm_rel_ng_triggered': { 'tests': [ {'name': 'mjsunit_sp_frame_access'}, - {'name': 'mozilla'}, - {'name': 'test262'}, - {'name': 'v8testing', 'shards': 7}, + {'name': 'mozilla', 'shards': 2}, + {'name': 'test262', 'shards': 2}, + {'name': 'v8testing', 'shards': 8}, {'name': 'v8testing', 'variant': 'extra', 'shards': 3}, ], }, @@ -214,7 +272,7 @@ {'name': 'test262_variants', 'shards': 4}, {'name': 'test262_variants', 'variant': 'extra', 'shards': 2}, {'name': 'v8initializers'}, - {'name': 'v8testing'}, + {'name': 'v8testing', 'shards': 2}, {'name': 'v8testing', 'variant': 'extra'}, {'name': 'v8testing', 'variant': 'minor_mc'}, {'name': 'v8testing', 'variant': 'slow_path'}, @@ -242,7 +300,7 @@ }, 'v8_linux64_verify_csa_rel_ng_triggered': { 'tests': [ - {'name': 'v8testing'}, + {'name': 'v8testing', 'shards': 2}, ], }, ############################################################################## @@ -264,13 +322,31 @@ 'v8_linux_arm64_rel_ng_triggered': { 'tests': [ {'name': 'mjsunit_sp_frame_access'}, - {'name': 'mozilla'}, - {'name': 'test262'}, + {'name': 'mozilla', 'shards': 2}, + {'name': 'test262', 'shards': 2}, {'name': 'v8testing', 'shards': 9}, {'name': 'v8testing', 'variant': 'extra', 'shards': 4}, ], }, ############################################################################## + # Odroids with native arm + 'v8_odroid_arm_rel_ng_triggered': { + 'swarming_dimensions' : { + 'cores': '8', + 'cpu': 'armv7l-32-ODROID-XU4', + 'os': 'Ubuntu-16.04', + }, + 'swarming_task_attrs': { + # Use same prio as CI due to limited resources. + 'priority': 25, + }, + 'tests': [ + {'name': 'benchmarks'}, + {'name': 'optimize_for_size'}, + {'name': 'v8testing', 'shards': 2}, + ], + }, + ############################################################################## # Win32 'v8_win_dbg': { 'swarming_dimensions' : { @@ -298,7 +374,7 @@ }, 'tests': [ {'name': 'test262'}, - {'name': 'v8testing'}, + {'name': 'v8testing', 'shards': 2}, ], }, ############################################################################## @@ -331,7 +407,7 @@ 'tests': [ {'name': 'mozilla'}, {'name': 'test262'}, - {'name': 'v8testing'}, + {'name': 'v8testing', 'shards': 2}, ], }, 'v8_win64_rel_ng_triggered': { @@ -341,7 +417,7 @@ }, 'tests': [ {'name': 'test262'}, - {'name': 'v8testing'}, + {'name': 'v8testing', 'shards': 2}, {'name': 'v8testing', 'variant': 'extra'}, ], }, @@ -385,7 +461,7 @@ 'tests': [ {'name': 'mozilla'}, {'name': 'test262'}, - {'name': 'v8testing'}, + {'name': 'v8testing', 'shards': 2}, {'name': 'v8testing', 'variant': 'extra'}, ], }, diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS index 90cfd737f21bd6..99873803c99fc1 100644 --- a/deps/v8/src/DEPS +++ b/deps/v8/src/DEPS @@ -13,6 +13,7 @@ include_rules = [ "+src/heap/heap.h", "+src/heap/heap-inl.h", "+src/heap/heap-write-barrier-inl.h", + "+src/heap/heap-write-barrier.h", "-src/inspector", "-src/interpreter", "+src/interpreter/bytecode-array-accessor.h", @@ -30,6 +31,7 @@ include_rules = [ "+testing/gtest/include/gtest/gtest_prod.h", "-src/libplatform", "-include/libplatform", + "+builtins-generated", "+torque-generated" ] diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc index da935f3652812c..226178394db1bf 100644 --- a/deps/v8/src/accessors.cc +++ b/deps/v8/src/accessors.cc @@ -31,7 +31,8 @@ Handle Accessors::MakeAccessor( info->set_is_special_data_property(true); info->set_is_sloppy(false); info->set_replace_on_access(false); - info->set_has_no_side_effect(false); + info->set_getter_side_effect_type(SideEffectType::kHasSideEffect); + info->set_setter_side_effect_type(SideEffectType::kHasSideEffect); name = factory->InternalizeName(name); info->set_name(*name); Handle get = v8::FromCData(isolate, getter); @@ -70,7 +71,7 @@ bool Accessors::IsJSObjectFieldAccessor(Isolate* isolate, Handle map, default: if (map->instance_type() < FIRST_NONSTRING_TYPE) { return CheckForName(isolate, name, isolate->factory()->length_string(), - String::kLengthOffset, FieldIndex::kTagged, index); + String::kLengthOffset, FieldIndex::kWord32, index); } return false; diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h index 69fdbbb74e329a..a8ae7da689cb83 100644 --- a/deps/v8/src/accessors.h +++ b/deps/v8/src/accessors.h @@ -22,27 +22,27 @@ class JavaScriptFrame; // The list of accessor descriptors. This is a second-order macro // taking a macro to be applied to all accessor descriptor names. -#define ACCESSOR_INFO_LIST(V) \ - V(arguments_iterator, ArgumentsIterator) \ - V(array_length, ArrayLength) \ - V(bound_function_length, BoundFunctionLength) \ - V(bound_function_name, BoundFunctionName) \ - V(error_stack, ErrorStack) \ - V(function_arguments, FunctionArguments) \ - V(function_caller, FunctionCaller) \ - V(function_name, FunctionName) \ - V(function_length, FunctionLength) \ - V(function_prototype, FunctionPrototype) \ - V(string_length, StringLength) - -#define SIDE_EFFECT_FREE_ACCESSOR_INFO_LIST(V) \ - V(ArrayLength) \ - V(BoundFunctionLength) \ - V(BoundFunctionName) \ - V(FunctionName) \ - V(FunctionLength) \ - V(FunctionPrototype) \ - V(StringLength) +// V(accessor_name, AccessorName, GetterSideEffectType, SetterSideEffectType) +#define ACCESSOR_INFO_LIST(V) \ + V(arguments_iterator, ArgumentsIterator, kHasNoSideEffect, \ + kHasSideEffectToReceiver) \ + V(array_length, ArrayLength, kHasNoSideEffect, kHasSideEffectToReceiver) \ + V(bound_function_length, BoundFunctionLength, kHasNoSideEffect, \ + kHasSideEffectToReceiver) \ + V(bound_function_name, BoundFunctionName, kHasNoSideEffect, \ + kHasSideEffectToReceiver) \ + V(error_stack, ErrorStack, kHasSideEffectToReceiver, \ + kHasSideEffectToReceiver) \ + V(function_arguments, FunctionArguments, kHasNoSideEffect, \ + kHasSideEffectToReceiver) \ + V(function_caller, FunctionCaller, kHasNoSideEffect, \ + kHasSideEffectToReceiver) \ + V(function_name, FunctionName, kHasNoSideEffect, kHasSideEffectToReceiver) \ + V(function_length, FunctionLength, kHasNoSideEffect, \ + kHasSideEffectToReceiver) \ + V(function_prototype, FunctionPrototype, kHasNoSideEffect, \ + kHasSideEffectToReceiver) \ + V(string_length, StringLength, kHasNoSideEffect, kHasSideEffectToReceiver) #define ACCESSOR_SETTER_LIST(V) \ V(ArrayLengthSetter) \ @@ -55,9 +55,9 @@ class JavaScriptFrame; class Accessors : public AllStatic { public: -#define ACCESSOR_GETTER_DECLARATION(accessor_name, AccessorName) \ - static void AccessorName##Getter( \ - v8::Local name, \ +#define ACCESSOR_GETTER_DECLARATION(accessor_name, AccessorName, ...) \ + static void AccessorName##Getter( \ + v8::Local name, \ const v8::PropertyCallbackInfo& info); ACCESSOR_INFO_LIST(ACCESSOR_GETTER_DECLARATION) #undef ACCESSOR_GETTER_DECLARATION @@ -118,7 +118,7 @@ class Accessors : public AllStatic { AccessorNameBooleanSetterCallback setter); private: -#define ACCESSOR_INFO_DECLARATION(accessor_name, AccessorName) \ +#define ACCESSOR_INFO_DECLARATION(accessor_name, AccessorName, ...) \ static Handle Make##AccessorName##Info(Isolate* isolate); ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION) #undef ACCESSOR_INFO_DECLARATION diff --git a/deps/v8/src/address-map.cc b/deps/v8/src/address-map.cc index 2b0bf727e5b31a..bf63e255cca1ab 100644 --- a/deps/v8/src/address-map.cc +++ b/deps/v8/src/address-map.cc @@ -14,8 +14,9 @@ RootIndexMap::RootIndexMap(Isolate* isolate) { map_ = isolate->root_index_map(); if (map_ != nullptr) return; map_ = new HeapObjectToIndexHashMap(); - for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) { - Heap::RootListIndex root_index = static_cast(i); + for (uint32_t i = 0; i < static_cast(RootIndex::kStrongRootListLength); + i++) { + RootIndex root_index = static_cast(i); Object* root = isolate->heap()->root(root_index); if (!root->IsHeapObject()) continue; // Omit root entries that can be written after initialization. They must diff --git a/deps/v8/src/allocation-site-scopes-inl.h b/deps/v8/src/allocation-site-scopes-inl.h new file mode 100644 index 00000000000000..e114bb3885c202 --- /dev/null +++ b/deps/v8/src/allocation-site-scopes-inl.h @@ -0,0 +1,52 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_ALLOCATION_SITE_SCOPES_INL_H_ +#define V8_ALLOCATION_SITE_SCOPES_INL_H_ + +#include "src/allocation-site-scopes.h" + +#include "src/objects/allocation-site-inl.h" + +namespace v8 { +namespace internal { + +Handle AllocationSiteUsageContext::EnterNewScope() { + if (top().is_null()) { + InitializeTraversal(top_site_); + } else { + // Advance current site + Object* nested_site = current()->nested_site(); + // Something is wrong if we advance to the end of the list here. + update_current_site(AllocationSite::cast(nested_site)); + } + return Handle(*current(), isolate()); +} + +void AllocationSiteUsageContext::ExitScope(Handle scope_site, + Handle object) { + // This assert ensures that we are pointing at the right sub-object in a + // recursive walk of a nested literal. + DCHECK(object.is_null() || *object == scope_site->boilerplate()); +} + +bool AllocationSiteUsageContext::ShouldCreateMemento(Handle object) { + if (activated_ && AllocationSite::CanTrack(object->map()->instance_type())) { + if (FLAG_allocation_site_pretenuring || + AllocationSite::ShouldTrack(object->GetElementsKind())) { + if (FLAG_trace_creation_allocation_sites) { + PrintF("*** Creating Memento for %s %p\n", + object->IsJSArray() ? "JSArray" : "JSObject", + static_cast(*object)); + } + return true; + } + } + return false; +} + +} // namespace internal +} // namespace v8 + +#endif // V8_ALLOCATION_SITE_SCOPES_INL_H_ diff --git a/deps/v8/src/allocation-site-scopes.h b/deps/v8/src/allocation-site-scopes.h index 60614c5e01cd12..0a729948db2f59 100644 --- a/deps/v8/src/allocation-site-scopes.h +++ b/deps/v8/src/allocation-site-scopes.h @@ -56,40 +56,12 @@ class AllocationSiteUsageContext : public AllocationSiteContext { top_site_(site), activated_(activated) { } - inline Handle EnterNewScope() { - if (top().is_null()) { - InitializeTraversal(top_site_); - } else { - // Advance current site - Object* nested_site = current()->nested_site(); - // Something is wrong if we advance to the end of the list here. - update_current_site(AllocationSite::cast(nested_site)); - } - return Handle(*current(), isolate()); - } + inline Handle EnterNewScope(); inline void ExitScope(Handle scope_site, - Handle object) { - // This assert ensures that we are pointing at the right sub-object in a - // recursive walk of a nested literal. - DCHECK(object.is_null() || *object == scope_site->boilerplate()); - } + Handle object); - bool ShouldCreateMemento(Handle object) { - if (activated_ && - AllocationSite::CanTrack(object->map()->instance_type())) { - if (FLAG_allocation_site_pretenuring || - AllocationSite::ShouldTrack(object->GetElementsKind())) { - if (FLAG_trace_creation_allocation_sites) { - PrintF("*** Creating Memento for %s %p\n", - object->IsJSArray() ? "JSArray" : "JSObject", - static_cast(*object)); - } - return true; - } - } - return false; - } + inline bool ShouldCreateMemento(Handle object); static const bool kCopying = true; diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc index 55c68dea89482e..6327a9c965abe7 100644 --- a/deps/v8/src/allocation.cc +++ b/deps/v8/src/allocation.cc @@ -8,6 +8,7 @@ #include "src/base/bits.h" #include "src/base/lazy-instance.h" #include "src/base/logging.h" +#include "src/base/lsan-page-allocator.h" #include "src/base/page-allocator.h" #include "src/base/platform/platform.h" #include "src/utils.h" @@ -17,10 +18,6 @@ #include // NOLINT #endif -#if defined(LEAK_SANITIZER) -#include -#endif - namespace v8 { namespace internal { @@ -51,21 +48,29 @@ struct InitializePageAllocator { static v8::base::PageAllocator default_allocator; page_allocator = &default_allocator; } +#if defined(LEAK_SANITIZER) + { + static v8::base::LsanPageAllocator lsan_allocator(page_allocator); + page_allocator = &lsan_allocator; + } +#endif *page_allocator_ptr = page_allocator; } }; static base::LazyInstance::type page_allocator = LAZY_INSTANCE_INITIALIZER; - -v8::PageAllocator* GetPageAllocator() { return page_allocator.Get(); } - // We will attempt allocation this many times. After each failure, we call // OnCriticalMemoryPressure to try to free some memory. const int kAllocationTries = 2; } // namespace +v8::PageAllocator* GetPlatformPageAllocator() { + DCHECK_NOT_NULL(page_allocator.Get()); + return page_allocator.Get(); +} + void* Malloced::New(size_t size) { void* result = AllocWithRetry(size); if (result == nullptr) { @@ -131,68 +136,62 @@ void AlignedFree(void *ptr) { #endif } -size_t AllocatePageSize() { return GetPageAllocator()->AllocatePageSize(); } +size_t AllocatePageSize() { + return GetPlatformPageAllocator()->AllocatePageSize(); +} -size_t CommitPageSize() { return GetPageAllocator()->CommitPageSize(); } +size_t CommitPageSize() { return GetPlatformPageAllocator()->CommitPageSize(); } void SetRandomMmapSeed(int64_t seed) { - GetPageAllocator()->SetRandomMmapSeed(seed); + GetPlatformPageAllocator()->SetRandomMmapSeed(seed); } -void* GetRandomMmapAddr() { return GetPageAllocator()->GetRandomMmapAddr(); } +void* GetRandomMmapAddr() { + return GetPlatformPageAllocator()->GetRandomMmapAddr(); +} -void* AllocatePages(void* address, size_t size, size_t alignment, +void* AllocatePages(v8::PageAllocator* page_allocator, void* address, + size_t size, size_t alignment, PageAllocator::Permission access) { + DCHECK_NOT_NULL(page_allocator); DCHECK_EQ(address, AlignedAddress(address, alignment)); - DCHECK_EQ(0UL, size & (GetPageAllocator()->AllocatePageSize() - 1)); + DCHECK_EQ(0UL, size & (page_allocator->AllocatePageSize() - 1)); void* result = nullptr; for (int i = 0; i < kAllocationTries; ++i) { - result = - GetPageAllocator()->AllocatePages(address, size, alignment, access); + result = page_allocator->AllocatePages(address, size, alignment, access); if (result != nullptr) break; - size_t request_size = size + alignment - AllocatePageSize(); + size_t request_size = size + alignment - page_allocator->AllocatePageSize(); if (!OnCriticalMemoryPressure(request_size)) break; } -#if defined(LEAK_SANITIZER) - if (result != nullptr) { - __lsan_register_root_region(result, size); - } -#endif return result; } -bool FreePages(void* address, const size_t size) { - DCHECK_EQ(0UL, size & (GetPageAllocator()->AllocatePageSize() - 1)); - bool result = GetPageAllocator()->FreePages(address, size); -#if defined(LEAK_SANITIZER) - if (result) { - __lsan_unregister_root_region(address, size); - } -#endif - return result; +bool FreePages(v8::PageAllocator* page_allocator, void* address, + const size_t size) { + DCHECK_NOT_NULL(page_allocator); + DCHECK_EQ(0UL, size & (page_allocator->AllocatePageSize() - 1)); + return page_allocator->FreePages(address, size); } -bool ReleasePages(void* address, size_t size, size_t new_size) { +bool ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size, + size_t new_size) { + DCHECK_NOT_NULL(page_allocator); DCHECK_LT(new_size, size); - bool result = GetPageAllocator()->ReleasePages(address, size, new_size); -#if defined(LEAK_SANITIZER) - if (result) { - __lsan_unregister_root_region(address, size); - __lsan_register_root_region(address, new_size); - } -#endif - return result; + return page_allocator->ReleasePages(address, size, new_size); } -bool SetPermissions(void* address, size_t size, - PageAllocator::Permission access) { - return GetPageAllocator()->SetPermissions(address, size, access); +bool SetPermissions(v8::PageAllocator* page_allocator, void* address, + size_t size, PageAllocator::Permission access) { + DCHECK_NOT_NULL(page_allocator); + return page_allocator->SetPermissions(address, size, access); } -byte* AllocatePage(void* address, size_t* allocated) { - size_t page_size = AllocatePageSize(); - void* result = - AllocatePages(address, page_size, page_size, PageAllocator::kReadWrite); +byte* AllocatePage(v8::PageAllocator* page_allocator, void* address, + size_t* allocated) { + DCHECK_NOT_NULL(page_allocator); + size_t page_size = page_allocator->AllocatePageSize(); + void* result = AllocatePages(page_allocator, address, page_size, page_size, + PageAllocator::kReadWrite); if (result != nullptr) *allocated = page_size; return static_cast(result); } @@ -206,16 +205,17 @@ bool OnCriticalMemoryPressure(size_t length) { return true; } -VirtualMemory::VirtualMemory() : address_(kNullAddress), size_(0) {} - -VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment) - : address_(kNullAddress), size_(0) { - size_t page_size = AllocatePageSize(); - size_t alloc_size = RoundUp(size, page_size); - address_ = reinterpret_cast
( - AllocatePages(hint, alloc_size, alignment, PageAllocator::kNoAccess)); - if (address_ != kNullAddress) { - size_ = alloc_size; +VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size, + void* hint, size_t alignment) + : page_allocator_(page_allocator) { + DCHECK_NOT_NULL(page_allocator); + size_t page_size = page_allocator_->AllocatePageSize(); + alignment = RoundUp(alignment, page_size); + size = RoundUp(size, page_size); + Address address = reinterpret_cast
(AllocatePages( + page_allocator_, hint, size, alignment, PageAllocator::kNoAccess)); + if (address != kNullAddress) { + region_ = base::AddressRegion(address, size); } } @@ -226,30 +226,31 @@ VirtualMemory::~VirtualMemory() { } void VirtualMemory::Reset() { - address_ = kNullAddress; - size_ = 0; + page_allocator_ = nullptr; + region_ = base::AddressRegion(); } bool VirtualMemory::SetPermissions(Address address, size_t size, PageAllocator::Permission access) { CHECK(InVM(address, size)); - bool result = v8::internal::SetPermissions(address, size, access); + bool result = + v8::internal::SetPermissions(page_allocator_, address, size, access); DCHECK(result); return result; } size_t VirtualMemory::Release(Address free_start) { DCHECK(IsReserved()); - DCHECK(IsAddressAligned(free_start, CommitPageSize())); + DCHECK(IsAddressAligned(free_start, page_allocator_->CommitPageSize())); // Notice: Order is important here. The VirtualMemory object might live // inside the allocated region. - const size_t free_size = size_ - (free_start - address_); + + const size_t old_size = region_.size(); + const size_t free_size = old_size - (free_start - region_.begin()); CHECK(InVM(free_start, free_size)); - DCHECK_LT(address_, free_start); - DCHECK_LT(free_start, address_ + size_); - CHECK(ReleasePages(reinterpret_cast(address_), size_, - size_ - free_size)); - size_ -= free_size; + region_.set_size(old_size - free_size); + CHECK(ReleasePages(page_allocator_, reinterpret_cast(region_.begin()), + old_size, region_.size())); return free_size; } @@ -257,41 +258,21 @@ void VirtualMemory::Free() { DCHECK(IsReserved()); // Notice: Order is important here. The VirtualMemory object might live // inside the allocated region. - Address address = address_; - size_t size = size_; - CHECK(InVM(address, size)); + v8::PageAllocator* page_allocator = page_allocator_; + base::AddressRegion region = region_; Reset(); - // FreePages expects size to be aligned to allocation granularity. Trimming - // may leave size at only commit granularity. Align it here. - CHECK(FreePages(reinterpret_cast(address), - RoundUp(size, AllocatePageSize()))); + // FreePages expects size to be aligned to allocation granularity however + // ReleasePages may leave size at only commit granularity. Align it here. + CHECK(FreePages(page_allocator, reinterpret_cast(region.begin()), + RoundUp(region.size(), page_allocator->AllocatePageSize()))); } void VirtualMemory::TakeControl(VirtualMemory* from) { DCHECK(!IsReserved()); - address_ = from->address_; - size_ = from->size_; + page_allocator_ = from->page_allocator_; + region_ = from->region_; from->Reset(); } -bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) { - VirtualMemory vm(size, hint); - if (vm.IsReserved()) { - result->TakeControl(&vm); - return true; - } - return false; -} - -bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint, - VirtualMemory* result) { - VirtualMemory vm(size, hint, alignment); - if (vm.IsReserved()) { - result->TakeControl(&vm); - return true; - } - return false; -} - } // namespace internal } // namespace v8 diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h index 8e17a35514e4bf..3a21310af8e698 100644 --- a/deps/v8/src/allocation.h +++ b/deps/v8/src/allocation.h @@ -6,6 +6,7 @@ #define V8_ALLOCATION_H_ #include "include/v8-platform.h" +#include "src/base/address-region.h" #include "src/base/compiler-specific.h" #include "src/base/platform/platform.h" #include "src/globals.h" @@ -82,6 +83,9 @@ void* AllocWithRetry(size_t size); void* AlignedAlloc(size_t size, size_t alignment); void AlignedFree(void *ptr); +// Returns platfrom page allocator instance. Guaranteed to be a valid pointer. +V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator(); + // Gets the page granularity for AllocatePages and FreePages. Addresses returned // by AllocatePages and AllocatePage are aligned to this size. V8_EXPORT_PRIVATE size_t AllocatePageSize(); @@ -101,14 +105,16 @@ V8_EXPORT_PRIVATE void* GetRandomMmapAddr(); // AllocatePageSize(). Returns the address of the allocated memory, with the // specified size and alignment, or nullptr on failure. V8_EXPORT_PRIVATE -V8_WARN_UNUSED_RESULT void* AllocatePages(void* address, size_t size, +V8_WARN_UNUSED_RESULT void* AllocatePages(v8::PageAllocator* page_allocator, + void* address, size_t size, size_t alignment, PageAllocator::Permission access); // Frees memory allocated by a call to AllocatePages. |address| and |size| must // be multiples of AllocatePageSize(). Returns true on success, otherwise false. V8_EXPORT_PRIVATE -V8_WARN_UNUSED_RESULT bool FreePages(void* address, const size_t size); +V8_WARN_UNUSED_RESULT bool FreePages(v8::PageAllocator* page_allocator, + void* address, const size_t size); // Releases memory that is no longer needed. The range specified by |address| // and |size| must be an allocated memory region. |size| and |new_size| must be @@ -116,7 +122,8 @@ V8_WARN_UNUSED_RESULT bool FreePages(void* address, const size_t size); // Released memory is left in an undefined state, so it should not be accessed. // Returns true on success, otherwise false. V8_EXPORT_PRIVATE -V8_WARN_UNUSED_RESULT bool ReleasePages(void* address, size_t size, +V8_WARN_UNUSED_RESULT bool ReleasePages(v8::PageAllocator* page_allocator, + void* address, size_t size, size_t new_size); // Sets permissions according to |access|. |address| and |size| must be @@ -124,18 +131,21 @@ V8_WARN_UNUSED_RESULT bool ReleasePages(void* address, size_t size, // cause the memory contents to be lost. Returns true on success, otherwise // false. V8_EXPORT_PRIVATE -V8_WARN_UNUSED_RESULT bool SetPermissions(void* address, size_t size, +V8_WARN_UNUSED_RESULT bool SetPermissions(v8::PageAllocator* page_allocator, + void* address, size_t size, PageAllocator::Permission access); -inline bool SetPermissions(Address address, size_t size, - PageAllocator::Permission access) { - return SetPermissions(reinterpret_cast(address), size, access); +inline bool SetPermissions(v8::PageAllocator* page_allocator, Address address, + size_t size, PageAllocator::Permission access) { + return SetPermissions(page_allocator, reinterpret_cast(address), size, + access); } // Convenience function that allocates a single system page with read and write // permissions. |address| is a hint. Returns the base address of the memory and // the page size via |allocated| on success. Returns nullptr on failure. V8_EXPORT_PRIVATE -V8_WARN_UNUSED_RESULT byte* AllocatePage(void* address, size_t* allocated); +V8_WARN_UNUSED_RESULT byte* AllocatePage(v8::PageAllocator* page_allocator, + void* address, size_t* allocated); // Function that may release reserved memory regions to allow failed allocations // to succeed. |length| is the amount of memory needed. Returns |true| if memory @@ -143,50 +153,67 @@ V8_WARN_UNUSED_RESULT byte* AllocatePage(void* address, size_t* allocated); V8_EXPORT_PRIVATE bool OnCriticalMemoryPressure(size_t length); // Represents and controls an area of reserved memory. -class V8_EXPORT_PRIVATE VirtualMemory { +class V8_EXPORT_PRIVATE VirtualMemory final { public: // Empty VirtualMemory object, controlling no reserved memory. - VirtualMemory(); + VirtualMemory() = default; // Reserves virtual memory containing an area of the given size that is - // aligned per alignment. This may not be at the position returned by - // address(). - VirtualMemory(size_t size, void* hint, size_t alignment = AllocatePageSize()); + // aligned per |alignment| rounded up to the |page_allocator|'s allocate page + // size. + // This may not be at the position returned by address(). + VirtualMemory(v8::PageAllocator* page_allocator, size_t size, void* hint, + size_t alignment = 1); // Construct a virtual memory by assigning it some already mapped address // and size. - VirtualMemory(Address address, size_t size) - : address_(address), size_(size) {} + VirtualMemory(v8::PageAllocator* page_allocator, Address address, size_t size) + : page_allocator_(page_allocator), region_(address, size) { + DCHECK_NOT_NULL(page_allocator); + } // Releases the reserved memory, if any, controlled by this VirtualMemory // object. ~VirtualMemory(); + // Move constructor. + VirtualMemory(VirtualMemory&& other) V8_NOEXCEPT { TakeControl(&other); } + + // Move assignment operator. + VirtualMemory& operator=(VirtualMemory&& other) V8_NOEXCEPT { + TakeControl(&other); + return *this; + } + // Returns whether the memory has been reserved. - bool IsReserved() const { return address_ != kNullAddress; } + bool IsReserved() const { return region_.begin() != kNullAddress; } // Initialize or resets an embedded VirtualMemory object. void Reset(); + v8::PageAllocator* page_allocator() { return page_allocator_; } + + const base::AddressRegion& region() const { return region_; } + // Returns the start address of the reserved memory. // If the memory was reserved with an alignment, this address is not // necessarily aligned. The user might need to round it up to a multiple of // the alignment to get the start of the aligned block. Address address() const { DCHECK(IsReserved()); - return address_; + return region_.begin(); } Address end() const { DCHECK(IsReserved()); - return address_ + size_; + return region_.end(); } // Returns the size of the reserved memory. The returned value is only // meaningful when IsReserved() returns true. // If the memory was reserved with an alignment, this size may be larger // than the requested size. - size_t size() const { return size_; } + size_t size() const { return region_.size(); } // Sets permissions according to the access argument. address and size must be // multiples of CommitPageSize(). Returns true on success, otherwise false. @@ -204,17 +231,16 @@ class V8_EXPORT_PRIVATE VirtualMemory { void TakeControl(VirtualMemory* from); bool InVM(Address address, size_t size) { - return (address_ <= address) && ((address_ + size_) >= (address + size)); + return region_.contains(address, size); } private: - Address address_; // Start address of the virtual memory. - size_t size_; // Size of the virtual memory. -}; + // Page allocator that controls the virtual memory. + v8::PageAllocator* page_allocator_ = nullptr; + base::AddressRegion region_; -bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result); -bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint, - VirtualMemory* result); + DISALLOW_COPY_AND_ASSIGN(VirtualMemory); +}; } // namespace internal } // namespace v8 diff --git a/deps/v8/src/api-arguments-inl.h b/deps/v8/src/api-arguments-inl.h index 89f606ed410a51..1e5d6b2aaa7ceb 100644 --- a/deps/v8/src/api-arguments-inl.h +++ b/deps/v8/src/api-arguments-inl.h @@ -8,6 +8,7 @@ #include "src/api-arguments.h" #include "src/api-inl.h" +#include "src/debug/debug.h" #include "src/objects/api-callbacks.h" #include "src/tracing/trace-event.h" #include "src/vm-state-inl.h" @@ -34,6 +35,10 @@ inline JSObject* PropertyCallbackArguments::holder() { return JSObject::cast(this->begin()[T::kHolderIndex]); } +inline Object* PropertyCallbackArguments::receiver() { + return Object::cast(this->begin()[T::kThisIndex]); +} + inline JSObject* FunctionCallbackArguments::holder() { return JSObject::cast(this->begin()[T::kHolderIndex]); } @@ -47,14 +52,24 @@ inline JSObject* FunctionCallbackArguments::holder() { DCHECK(!name->IsPrivate()); \ DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols()); -#define PREPARE_CALLBACK_INFO(ISOLATE, F, RETURN_VALUE, API_RETURN_TYPE, \ - CALLBACK_INFO) \ - if (ISOLATE->debug_execution_mode() == DebugInfo::kSideEffects && \ - !ISOLATE->debug()->PerformSideEffectCheckForCallback(CALLBACK_INFO)) { \ - return RETURN_VALUE(); \ - } \ - VMState state(ISOLATE); \ - ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \ +#define PREPARE_CALLBACK_INFO(ISOLATE, F, RETURN_VALUE, API_RETURN_TYPE, \ + CALLBACK_INFO, RECEIVER, ACCESSOR_KIND) \ + if (ISOLATE->debug_execution_mode() == DebugInfo::kSideEffects && \ + !ISOLATE->debug()->PerformSideEffectCheckForCallback( \ + CALLBACK_INFO, RECEIVER, Debug::k##ACCESSOR_KIND)) { \ + return RETURN_VALUE(); \ + } \ + VMState state(ISOLATE); \ + ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \ + PropertyCallbackInfo callback_info(begin()); + +#define PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(ISOLATE, F, RETURN_VALUE, \ + API_RETURN_TYPE) \ + if (ISOLATE->debug_execution_mode() == DebugInfo::kSideEffects) { \ + return RETURN_VALUE(); \ + } \ + VMState state(ISOLATE); \ + ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \ PropertyCallbackInfo callback_info(begin()); #define CREATE_NAMED_CALLBACK(FUNCTION, TYPE, RETURN_TYPE, API_RETURN_TYPE, \ @@ -65,11 +80,13 @@ inline JSObject* FunctionCallbackArguments::holder() { Isolate* isolate = this->isolate(); \ RuntimeCallTimerScope timer( \ isolate, RuntimeCallCounterId::kNamed##FUNCTION##Callback); \ + Handle receiver_check_unsupported; \ GenericNamedProperty##FUNCTION##Callback f = \ ToCData( \ interceptor->TYPE()); \ PREPARE_CALLBACK_INFO(isolate, f, Handle, API_RETURN_TYPE, \ - INFO_FOR_SIDE_EFFECT); \ + INFO_FOR_SIDE_EFFECT, receiver_check_unsupported, \ + NotAccessor); \ LOG(isolate, \ ApiNamedPropertyAccess("interceptor-named-" #TYPE, holder(), *name)); \ f(v8::Utils::ToLocal(name), callback_info); \ @@ -87,10 +104,12 @@ FOR_EACH_CALLBACK(CREATE_NAMED_CALLBACK) Isolate* isolate = this->isolate(); \ RuntimeCallTimerScope timer( \ isolate, RuntimeCallCounterId::kIndexed##FUNCTION##Callback); \ + Handle receiver_check_unsupported; \ IndexedProperty##FUNCTION##Callback f = \ ToCData(interceptor->TYPE()); \ PREPARE_CALLBACK_INFO(isolate, f, Handle, API_RETURN_TYPE, \ - INFO_FOR_SIDE_EFFECT); \ + INFO_FOR_SIDE_EFFECT, receiver_check_unsupported, \ + NotAccessor); \ LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" #TYPE, \ holder(), index)); \ f(index, callback_info); \ @@ -108,9 +127,11 @@ Handle FunctionCallbackArguments::Call(CallHandlerInfo* handler) { RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback); v8::FunctionCallback f = v8::ToCData(handler->callback()); + Handle receiver_check_unsupported; if (isolate->debug_execution_mode() == DebugInfo::kSideEffects && !isolate->debug()->PerformSideEffectCheckForCallback( - handle(handler, isolate))) { + handle(handler, isolate), receiver_check_unsupported, + Debug::kNotAccessor)) { return Handle(); } VMState state(isolate); @@ -167,10 +188,11 @@ Handle PropertyCallbackArguments::CallNamedDescriptor( Handle PropertyCallbackArguments::BasicCallNamedGetterCallback( GenericNamedPropertyGetterCallback f, Handle name, - Handle info) { + Handle info, Handle receiver) { DCHECK(!name->IsPrivate()); Isolate* isolate = this->isolate(); - PREPARE_CALLBACK_INFO(isolate, f, Handle, v8::Value, info); + PREPARE_CALLBACK_INFO(isolate, f, Handle, v8::Value, info, receiver, + Getter); f(v8::Utils::ToLocal(name), callback_info); return GetReturnValue(isolate); } @@ -184,9 +206,8 @@ Handle PropertyCallbackArguments::CallNamedSetter( Isolate* isolate = this->isolate(); RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kNamedSetterCallback); - Handle side_effect_check_not_supported; - PREPARE_CALLBACK_INFO(isolate, f, Handle, v8::Value, - side_effect_check_not_supported); + PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle, + v8::Value); LOG(isolate, ApiNamedPropertyAccess("interceptor-named-set", holder(), *name)); f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), callback_info); @@ -202,9 +223,8 @@ Handle PropertyCallbackArguments::CallNamedDefiner( RuntimeCallCounterId::kNamedDefinerCallback); GenericNamedPropertyDefinerCallback f = ToCData(interceptor->definer()); - Handle side_effect_check_not_supported; - PREPARE_CALLBACK_INFO(isolate, f, Handle, v8::Value, - side_effect_check_not_supported); + PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle, + v8::Value); LOG(isolate, ApiNamedPropertyAccess("interceptor-named-define", holder(), *name)); f(v8::Utils::ToLocal(name), desc, callback_info); @@ -219,9 +239,8 @@ Handle PropertyCallbackArguments::CallIndexedSetter( RuntimeCallCounterId::kIndexedSetterCallback); IndexedPropertySetterCallback f = ToCData(interceptor->setter()); - Handle side_effect_check_not_supported; - PREPARE_CALLBACK_INFO(isolate, f, Handle, v8::Value, - side_effect_check_not_supported); + PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle, + v8::Value); LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-set", holder(), index)); f(index, v8::Utils::ToLocal(value), callback_info); @@ -237,9 +256,8 @@ Handle PropertyCallbackArguments::CallIndexedDefiner( RuntimeCallCounterId::kIndexedDefinerCallback); IndexedPropertyDefinerCallback f = ToCData(interceptor->definer()); - Handle side_effect_check_not_supported; - PREPARE_CALLBACK_INFO(isolate, f, Handle, v8::Value, - side_effect_check_not_supported); + PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle, + v8::Value); LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-define", holder(), index)); f(index, desc, callback_info); @@ -275,7 +293,9 @@ Handle PropertyCallbackArguments::CallIndexedDescriptor( Handle PropertyCallbackArguments::BasicCallIndexedGetterCallback( IndexedPropertyGetterCallback f, uint32_t index, Handle info) { Isolate* isolate = this->isolate(); - PREPARE_CALLBACK_INFO(isolate, f, Handle, v8::Value, info); + Handle receiver_check_unsupported; + PREPARE_CALLBACK_INFO(isolate, f, Handle, v8::Value, info, + receiver_check_unsupported, Getter); f(index, callback_info); return GetReturnValue(isolate); } @@ -287,7 +307,9 @@ Handle PropertyCallbackArguments::CallPropertyEnumerator( v8::ToCData(interceptor->enumerator()); // TODO(cbruni): assert same type for indexed and named callback. Isolate* isolate = this->isolate(); - PREPARE_CALLBACK_INFO(isolate, f, Handle, v8::Array, interceptor); + Handle receiver_check_unsupported; + PREPARE_CALLBACK_INFO(isolate, f, Handle, v8::Array, interceptor, + receiver_check_unsupported, NotAccessor); f(callback_info); return GetReturnValue(isolate); } @@ -303,7 +325,8 @@ Handle PropertyCallbackArguments::CallAccessorGetter( LOG(isolate, ApiNamedPropertyAccess("accessor-getter", holder(), *name)); AccessorNameGetterCallback f = ToCData(info->getter()); - return BasicCallNamedGetterCallback(f, name, info); + return BasicCallNamedGetterCallback(f, name, info, + handle(receiver(), isolate)); } Handle PropertyCallbackArguments::CallAccessorSetter( @@ -314,15 +337,15 @@ Handle PropertyCallbackArguments::CallAccessorSetter( RuntimeCallCounterId::kAccessorSetterCallback); AccessorNameSetterCallback f = ToCData(accessor_info->setter()); - Handle side_effect_check_not_supported; - PREPARE_CALLBACK_INFO(isolate, f, Handle, void, - side_effect_check_not_supported); + PREPARE_CALLBACK_INFO(isolate, f, Handle, void, accessor_info, + handle(receiver(), isolate), Setter); LOG(isolate, ApiNamedPropertyAccess("accessor-setter", holder(), *name)); f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), callback_info); return GetReturnValue(isolate); } #undef PREPARE_CALLBACK_INFO +#undef PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK } // namespace internal } // namespace v8 diff --git a/deps/v8/src/api-arguments.h b/deps/v8/src/api-arguments.h index 0a0a7362c7c687..d8fc2b49ab2824 100644 --- a/deps/v8/src/api-arguments.h +++ b/deps/v8/src/api-arguments.h @@ -26,12 +26,12 @@ class CustomArguments : public CustomArgumentsBase { public: static const int kReturnValueOffset = T::kReturnValueIndex; - ~CustomArguments() { + ~CustomArguments() override { this->begin()[kReturnValueOffset] = reinterpret_cast(kHandleZapValue); } - virtual inline void IterateInstance(RootVisitor* v) { + inline void IterateInstance(RootVisitor* v) override { v->VisitRootPointers(Root::kRelocatable, nullptr, values_, values_ + T::kArgsLength); } @@ -133,9 +133,10 @@ class PropertyCallbackArguments IndexedPropertyGetterCallback f, uint32_t index, Handle info); inline Handle BasicCallNamedGetterCallback( GenericNamedPropertyGetterCallback f, Handle name, - Handle info); + Handle info, Handle receiver = Handle()); inline JSObject* holder(); + inline Object* receiver(); // Don't copy PropertyCallbackArguments, because they would both have the // same prev_ pointer. diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc index 977d6cdafc95fc..11b63d56d8da2c 100644 --- a/deps/v8/src/api-natives.cc +++ b/deps/v8/src/api-natives.cc @@ -114,9 +114,8 @@ MaybeHandle DefineDataProperty(Isolate* isolate, } #endif - MAYBE_RETURN_NULL( - Object::AddDataProperty(&it, value, attributes, kThrowOnError, - Object::CERTAINLY_NOT_STORE_FROM_KEYED)); + MAYBE_RETURN_NULL(Object::AddDataProperty( + &it, value, attributes, kThrowOnError, StoreOrigin::kNamed)); return value; } @@ -403,8 +402,10 @@ MaybeHandle InstantiateObject(Isolate* isolate, } Handle object; - ASSIGN_RETURN_ON_EXCEPTION(isolate, object, - JSObject::New(constructor, new_target), JSObject); + ASSIGN_RETURN_ON_EXCEPTION( + isolate, object, + JSObject::New(constructor, new_target, Handle::null()), + JSObject); if (is_prototype) JSObject::OptimizeAsPrototype(object); @@ -495,8 +496,15 @@ MaybeHandle InstantiateFunction(Isolate* isolate, parent_prototype); } } + InstanceType function_type = + (!data->needs_access_check() && + data->named_property_handler()->IsUndefined(isolate) && + data->indexed_property_handler()->IsUndefined(isolate)) + ? JS_API_OBJECT_TYPE + : JS_SPECIAL_API_OBJECT_TYPE; + Handle function = ApiNatives::CreateApiFunction( - isolate, data, prototype, ApiNatives::JavaScriptObjectType, maybe_name); + isolate, data, prototype, function_type, maybe_name); if (serial_number) { // Cache the function. CacheTemplateInstantiation(isolate, serial_number, CachingMode::kUnlimited, @@ -625,8 +633,7 @@ void ApiNatives::AddNativeDataProperty(Isolate* isolate, Handle ApiNatives::CreateApiFunction( Isolate* isolate, Handle obj, - Handle prototype, ApiInstanceType instance_type, - MaybeHandle maybe_name) { + Handle prototype, InstanceType type, MaybeHandle maybe_name) { Handle shared = FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, obj, maybe_name); @@ -670,33 +677,10 @@ Handle ApiNatives::CreateApiFunction( immutable_proto = instance_template->immutable_proto(); } - // TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing - // JSObject::GetHeaderSize. - int instance_size = kPointerSize * embedder_field_count; - InstanceType type; - switch (instance_type) { - case JavaScriptObjectType: - if (!obj->needs_access_check() && - obj->named_property_handler()->IsUndefined(isolate) && - obj->indexed_property_handler()->IsUndefined(isolate)) { - type = JS_API_OBJECT_TYPE; - } else { - type = JS_SPECIAL_API_OBJECT_TYPE; - } - instance_size += JSObject::kHeaderSize; - break; - case GlobalObjectType: - type = JS_GLOBAL_OBJECT_TYPE; - instance_size += JSGlobalObject::kSize; - break; - case GlobalProxyType: - type = JS_GLOBAL_PROXY_TYPE; - instance_size += JSGlobalProxy::kSize; - break; - default: - UNREACHABLE(); - break; - } + // JS_FUNCTION_TYPE requires information about the prototype slot. + DCHECK_NE(JS_FUNCTION_TYPE, type); + int instance_size = + JSObject::GetHeaderSize(type) + kPointerSize * embedder_field_count; Handle map = isolate->factory()->NewMap(type, instance_size, TERMINAL_FAST_ELEMENTS_KIND); diff --git a/deps/v8/src/api-natives.h b/deps/v8/src/api-natives.h index e8bb32d40a02fb..ff6cdc6c864537 100644 --- a/deps/v8/src/api-natives.h +++ b/deps/v8/src/api-natives.h @@ -9,6 +9,7 @@ #include "src/base/macros.h" #include "src/handles.h" #include "src/maybe-handles.h" +#include "src/objects.h" #include "src/property-details.h" namespace v8 { @@ -33,15 +34,9 @@ class ApiNatives { V8_WARN_UNUSED_RESULT static MaybeHandle InstantiateRemoteObject( Handle data); - enum ApiInstanceType { - JavaScriptObjectType, - GlobalObjectType, - GlobalProxyType - }; - static Handle CreateApiFunction( Isolate* isolate, Handle obj, - Handle prototype, ApiInstanceType instance_type, + Handle prototype, InstanceType type, MaybeHandle name = MaybeHandle()); static void AddDataProperty(Isolate* isolate, Handle info, diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index d141496c5749cc..d174ef36148429 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -876,12 +876,12 @@ void RegisteredExtension::UnregisterAll() { namespace { class ExtensionResource : public String::ExternalOneByteStringResource { public: - ExtensionResource() : data_(0), length_(0) {} + ExtensionResource() : data_(nullptr), length_(0) {} ExtensionResource(const char* data, size_t length) : data_(data), length_(length) {} - const char* data() const { return data_; } - size_t length() const { return length_; } - virtual void Dispose() {} + const char* data() const override { return data_; } + size_t length() const override { return length_; } + void Dispose() override {} private: const char* data_; @@ -1391,7 +1391,7 @@ static Local FunctionTemplateNew( next_serial_number = isolate->heap()->GetNextTemplateSerialNumber(); } obj->set_serial_number(i::Smi::FromInt(next_serial_number)); - if (callback != 0) { + if (callback != nullptr) { Utils::ToLocal(obj)->SetCallHandler(callback, data, side_effect_type); } obj->set_length(length); @@ -1676,7 +1676,8 @@ static void TemplateSetAccessor( Template* template_obj, v8::Local name, Getter getter, Setter setter, Data data, AccessControl settings, PropertyAttribute attribute, v8::Local signature, bool is_special_data_property, - bool replace_on_access, SideEffectType getter_side_effect_type) { + bool replace_on_access, SideEffectType getter_side_effect_type, + SideEffectType setter_side_effect_type) { auto info = Utils::OpenHandle(template_obj); auto isolate = info->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); @@ -1686,8 +1687,8 @@ static void TemplateSetAccessor( is_special_data_property, replace_on_access); accessor_info->set_initial_property_attributes( static_cast(attribute)); - accessor_info->set_has_no_side_effect(getter_side_effect_type == - SideEffectType::kHasNoSideEffect); + accessor_info->set_getter_side_effect_type(getter_side_effect_type); + accessor_info->set_setter_side_effect_type(setter_side_effect_type); i::ApiNatives::AddNativeDataProperty(isolate, info, accessor_info); } @@ -1695,29 +1696,34 @@ void Template::SetNativeDataProperty( v8::Local name, AccessorGetterCallback getter, AccessorSetterCallback setter, v8::Local data, PropertyAttribute attribute, v8::Local signature, - AccessControl settings, SideEffectType getter_side_effect_type) { + AccessControl settings, SideEffectType getter_side_effect_type, + SideEffectType setter_side_effect_type) { TemplateSetAccessor(this, name, getter, setter, data, settings, attribute, - signature, true, false, getter_side_effect_type); + signature, true, false, getter_side_effect_type, + setter_side_effect_type); } void Template::SetNativeDataProperty( v8::Local name, AccessorNameGetterCallback getter, AccessorNameSetterCallback setter, v8::Local data, PropertyAttribute attribute, v8::Local signature, - AccessControl settings, SideEffectType getter_side_effect_type) { + AccessControl settings, SideEffectType getter_side_effect_type, + SideEffectType setter_side_effect_type) { TemplateSetAccessor(this, name, getter, setter, data, settings, attribute, - signature, true, false, getter_side_effect_type); + signature, true, false, getter_side_effect_type, + setter_side_effect_type); } void Template::SetLazyDataProperty(v8::Local name, AccessorNameGetterCallback getter, v8::Local data, PropertyAttribute attribute, - SideEffectType getter_side_effect_type) { + SideEffectType getter_side_effect_type, + SideEffectType setter_side_effect_type) { TemplateSetAccessor(this, name, getter, static_cast(nullptr), data, DEFAULT, attribute, Local(), true, - true, getter_side_effect_type); + true, getter_side_effect_type, setter_side_effect_type); } void Template::SetIntrinsicDataProperty(Local name, Intrinsic intrinsic, @@ -1737,10 +1743,11 @@ void ObjectTemplate::SetAccessor(v8::Local name, v8::Local data, AccessControl settings, PropertyAttribute attribute, v8::Local signature, - SideEffectType getter_side_effect_type) { + SideEffectType getter_side_effect_type, + SideEffectType setter_side_effect_type) { TemplateSetAccessor(this, name, getter, setter, data, settings, attribute, signature, i::FLAG_disable_old_api_accessors, false, - getter_side_effect_type); + getter_side_effect_type, setter_side_effect_type); } void ObjectTemplate::SetAccessor(v8::Local name, @@ -1749,10 +1756,11 @@ void ObjectTemplate::SetAccessor(v8::Local name, v8::Local data, AccessControl settings, PropertyAttribute attribute, v8::Local signature, - SideEffectType getter_side_effect_type) { + SideEffectType getter_side_effect_type, + SideEffectType setter_side_effect_type) { TemplateSetAccessor(this, name, getter, setter, data, settings, attribute, signature, i::FLAG_disable_old_api_accessors, false, - getter_side_effect_type); + getter_side_effect_type, setter_side_effect_type); } template CreateInterceptorInfo( isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE, i::TENURED)); obj->set_flags(0); - if (getter != 0) SET_FIELD_WRAPPED(isolate, obj, set_getter, getter); - if (setter != 0) SET_FIELD_WRAPPED(isolate, obj, set_setter, setter); - if (query != 0) SET_FIELD_WRAPPED(isolate, obj, set_query, query); - if (descriptor != 0) + if (getter != nullptr) SET_FIELD_WRAPPED(isolate, obj, set_getter, getter); + if (setter != nullptr) SET_FIELD_WRAPPED(isolate, obj, set_setter, setter); + if (query != nullptr) SET_FIELD_WRAPPED(isolate, obj, set_query, query); + if (descriptor != nullptr) SET_FIELD_WRAPPED(isolate, obj, set_descriptor, descriptor); - if (remover != 0) SET_FIELD_WRAPPED(isolate, obj, set_deleter, remover); - if (enumerator != 0) + if (remover != nullptr) SET_FIELD_WRAPPED(isolate, obj, set_deleter, remover); + if (enumerator != nullptr) SET_FIELD_WRAPPED(isolate, obj, set_enumerator, enumerator); - if (definer != 0) SET_FIELD_WRAPPED(isolate, obj, set_definer, definer); + if (definer != nullptr) SET_FIELD_WRAPPED(isolate, obj, set_definer, definer); obj->set_can_intercept_symbols( !(static_cast(flags) & static_cast(PropertyHandlerFlags::kOnlyInterceptStrings))); @@ -4015,7 +4023,8 @@ Maybe v8::Object::Set(v8::Local context, auto value_obj = Utils::OpenHandle(*value); has_pending_exception = i::Runtime::SetObjectProperty(isolate, self, key_obj, value_obj, - i::LanguageMode::kSloppy) + i::LanguageMode::kSloppy, + i::StoreOrigin::kMaybeKeyed) .is_null(); RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); return Just(true); @@ -4569,8 +4578,8 @@ static Maybe ObjectSetAccessor( Local context, Object* self, Local name, Getter getter, Setter setter, Data data, AccessControl settings, PropertyAttribute attributes, bool is_special_data_property, - bool replace_on_access, - SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect) { + bool replace_on_access, SideEffectType getter_side_effect_type, + SideEffectType setter_side_effect_type) { auto isolate = reinterpret_cast(context->GetIsolate()); ENTER_V8_NO_SCRIPT(isolate, context, Object, SetAccessor, Nothing(), i::HandleScope); @@ -4581,8 +4590,8 @@ static Maybe ObjectSetAccessor( i::Handle info = MakeAccessorInfo(isolate, name, getter, setter, data, settings, signature, is_special_data_property, replace_on_access); - info->set_has_no_side_effect(getter_side_effect_type == - SideEffectType::kHasNoSideEffect); + info->set_getter_side_effect_type(getter_side_effect_type); + info->set_setter_side_effect_type(setter_side_effect_type); if (info.is_null()) return Nothing(); bool fast = obj->HasFastProperties(); i::Handle result; @@ -4605,11 +4614,12 @@ Maybe Object::SetAccessor(Local context, Local name, AccessorNameSetterCallback setter, MaybeLocal data, AccessControl settings, PropertyAttribute attribute, - SideEffectType getter_side_effect_type) { + SideEffectType getter_side_effect_type, + SideEffectType setter_side_effect_type) { return ObjectSetAccessor(context, this, name, getter, setter, data.FromMaybe(Local()), settings, attribute, i::FLAG_disable_old_api_accessors, false, - getter_side_effect_type); + getter_side_effect_type, setter_side_effect_type); } @@ -4636,19 +4646,22 @@ Maybe Object::SetNativeDataProperty( v8::Local context, v8::Local name, AccessorNameGetterCallback getter, AccessorNameSetterCallback setter, v8::Local data, PropertyAttribute attributes, - SideEffectType getter_side_effect_type) { + SideEffectType getter_side_effect_type, + SideEffectType setter_side_effect_type) { return ObjectSetAccessor(context, this, name, getter, setter, data, DEFAULT, - attributes, true, false, getter_side_effect_type); + attributes, true, false, getter_side_effect_type, + setter_side_effect_type); } Maybe Object::SetLazyDataProperty( v8::Local context, v8::Local name, AccessorNameGetterCallback getter, v8::Local data, - PropertyAttribute attributes, SideEffectType getter_side_effect_type) { + PropertyAttribute attributes, SideEffectType getter_side_effect_type, + SideEffectType setter_side_effect_type) { return ObjectSetAccessor(context, this, name, getter, static_cast(nullptr), data, DEFAULT, attributes, true, true, - getter_side_effect_type); + getter_side_effect_type, setter_side_effect_type); } Maybe v8::Object::HasOwnProperty(Local context, @@ -5938,16 +5951,16 @@ HeapStatistics::HeapStatistics() malloced_memory_(0), external_memory_(0), peak_malloced_memory_(0), - does_zap_garbage_(0), + does_zap_garbage_(false), number_of_native_contexts_(0), number_of_detached_contexts_(0) {} -HeapSpaceStatistics::HeapSpaceStatistics(): space_name_(0), - space_size_(0), - space_used_size_(0), - space_available_size_(0), - physical_space_size_(0) { } - +HeapSpaceStatistics::HeapSpaceStatistics() + : space_name_(nullptr), + space_size_(0), + space_used_size_(0), + space_available_size_(0), + physical_space_size_(0) {} HeapObjectStatistics::HeapObjectStatistics() : object_type_(nullptr), @@ -7300,14 +7313,6 @@ WasmCompiledModule::BufferReference WasmCompiledModule::GetWasmWireBytesRef() { return {bytes_vec.start(), bytes_vec.size()}; } -Local WasmCompiledModule::GetWasmWireBytes() { - BufferReference ref = GetWasmWireBytesRef(); - CHECK_LE(ref.size, String::kMaxLength); - return String::NewFromOneByte(GetIsolate(), ref.start, NewStringType::kNormal, - static_cast(ref.size)) - .ToLocalChecked(); -} - WasmCompiledModule::TransferrableModule WasmCompiledModule::GetTransferrableModule() { if (i::FLAG_wasm_shared_code) { @@ -7408,7 +7413,7 @@ class AsyncCompilationResolver : public i::wasm::CompilationResultResolver { reinterpret_cast(isolate)->global_handles()->Create( *Utils::OpenHandle(*promise))) {} - ~AsyncCompilationResolver() { + ~AsyncCompilationResolver() override { i::GlobalHandles::Destroy(i::Handle::cast(promise_).location()); } @@ -7447,9 +7452,6 @@ void WasmModuleObjectBuilderStreaming::Finish() { void WasmModuleObjectBuilderStreaming::Abort(MaybeLocal exception) { } -WasmModuleObjectBuilderStreaming::~WasmModuleObjectBuilderStreaming() { -} - // static v8::ArrayBuffer::Allocator* v8::ArrayBuffer::Allocator::NewDefaultAllocator() { return new ArrayBufferAllocator(); @@ -7509,9 +7511,8 @@ void ArrayBufferDeleter(void* buffer, size_t length, void* info) { v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents() { i::Handle self = Utils::OpenHandle(this); - size_t byte_length = static_cast(self->byte_length()->Number()); Contents contents( - self->backing_store(), byte_length, self->allocation_base(), + self->backing_store(), self->byte_length(), self->allocation_base(), self->allocation_length(), self->is_wasm_memory() ? Allocator::AllocationMode::kReservation : Allocator::AllocationMode::kNormal, @@ -7539,7 +7540,7 @@ void v8::ArrayBuffer::Neuter() { size_t v8::ArrayBuffer::ByteLength() const { i::Handle obj = Utils::OpenHandle(this); - return static_cast(obj->byte_length()->Number()); + return obj->byte_length(); } @@ -7563,6 +7564,7 @@ Local v8::ArrayBuffer::New(Isolate* isolate, void* data, ArrayBufferCreationMode mode) { // Embedders must guarantee that the external backing store is valid. CHECK(byte_length == 0 || data != nullptr); + CHECK_LE(byte_length, i::JSArrayBuffer::kMaxByteLength); i::Isolate* i_isolate = reinterpret_cast(isolate); LOG_API(i_isolate, ArrayBuffer, New); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); @@ -7594,9 +7596,8 @@ Local v8::ArrayBufferView::Buffer() { size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) { i::Handle self = Utils::OpenHandle(this); - size_t byte_offset = i::NumberToSize(self->byte_offset()); - size_t bytes_to_copy = - i::Min(byte_length, i::NumberToSize(self->byte_length())); + size_t byte_offset = self->byte_offset(); + size_t bytes_to_copy = i::Min(byte_length, self->byte_length()); if (bytes_to_copy) { i::DisallowHeapAllocation no_gc; i::Isolate* isolate = self->GetIsolate(); @@ -7627,13 +7628,13 @@ bool v8::ArrayBufferView::HasBuffer() const { size_t v8::ArrayBufferView::ByteOffset() { i::Handle obj = Utils::OpenHandle(this); - return static_cast(obj->byte_offset()->Number()); + return obj->WasNeutered() ? 0 : obj->byte_offset(); } size_t v8::ArrayBufferView::ByteLength() { i::Handle obj = Utils::OpenHandle(this); - return static_cast(obj->byte_length()->Number()); + return obj->WasNeutered() ? 0 : obj->byte_length(); } @@ -7747,9 +7748,8 @@ v8::SharedArrayBuffer::Contents::Contents( v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() { i::Handle self = Utils::OpenHandle(this); - size_t byte_length = static_cast(self->byte_length()->Number()); Contents contents( - self->backing_store(), byte_length, self->allocation_base(), + self->backing_store(), self->byte_length(), self->allocation_base(), self->allocation_length(), self->is_wasm_memory() ? ArrayBuffer::Allocator::AllocationMode::kReservation @@ -7765,7 +7765,7 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() { size_t v8::SharedArrayBuffer::ByteLength() const { i::Handle obj = Utils::OpenHandle(this); - return static_cast(obj->byte_length()->Number()); + return obj->byte_length(); } Local v8::SharedArrayBuffer::New(Isolate* isolate, @@ -7819,8 +7819,8 @@ Local v8::Symbol::New(Isolate* isolate, Local name) { Local v8::Symbol::For(Isolate* isolate, Local name) { i::Isolate* i_isolate = reinterpret_cast(isolate); i::Handle i_name = Utils::OpenHandle(*name); - return Utils::ToLocal(i_isolate->SymbolFor( - i::Heap::kPublicSymbolTableRootIndex, i_name, false)); + return Utils::ToLocal( + i_isolate->SymbolFor(i::RootIndex::kPublicSymbolTable, i_name, false)); } @@ -7828,10 +7828,11 @@ Local v8::Symbol::ForApi(Isolate* isolate, Local name) { i::Isolate* i_isolate = reinterpret_cast(isolate); i::Handle i_name = Utils::OpenHandle(*name); return Utils::ToLocal( - i_isolate->SymbolFor(i::Heap::kApiSymbolTableRootIndex, i_name, false)); + i_isolate->SymbolFor(i::RootIndex::kApiSymbolTable, i_name, false)); } #define WELL_KNOWN_SYMBOLS(V) \ + V(AsyncIterator, async_iterator) \ V(HasInstance, has_instance) \ V(IsConcatSpreadable, is_concat_spreadable) \ V(Iterator, iterator) \ @@ -7868,8 +7869,8 @@ Local v8::Private::New(Isolate* isolate, Local name) { Local v8::Private::ForApi(Isolate* isolate, Local name) { i::Isolate* i_isolate = reinterpret_cast(isolate); i::Handle i_name = Utils::OpenHandle(*name); - Local result = Utils::ToLocal(i_isolate->SymbolFor( - i::Heap::kApiPrivateSymbolTableRootIndex, i_name, true)); + Local result = Utils::ToLocal( + i_isolate->SymbolFor(i::RootIndex::kApiPrivateSymbolTable, i_name, true)); return v8::Local(reinterpret_cast(*result)); } @@ -8093,6 +8094,11 @@ void Isolate::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) { isolate->heap()->SetEmbedderHeapTracer(tracer); } +EmbedderHeapTracer* Isolate::GetEmbedderHeapTracer() { + i::Isolate* isolate = reinterpret_cast(this); + return isolate->heap()->GetEmbedderHeapTracer(); +} + void Isolate::SetGetExternallyAllocatedMemoryInBytesCallback( GetExternallyAllocatedMemoryInBytesCallback callback) { i::Isolate* isolate = reinterpret_cast(this); @@ -8132,9 +8138,9 @@ void Isolate::RequestGarbageCollectionForTesting(GarbageCollectionType type) { kGCCallbackFlagForced); } else { DCHECK_EQ(kFullGarbageCollection, type); - reinterpret_cast(this)->heap()->CollectAllGarbage( - i::Heap::kAbortIncrementalMarkingMask, - i::GarbageCollectionReason::kTesting, kGCCallbackFlagForced); + reinterpret_cast(this)->heap()->PreciseCollectAllGarbage( + i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting, + kGCCallbackFlagForced); } } @@ -8273,6 +8279,11 @@ void Isolate::SetHostInitializeImportMetaObjectCallback( isolate->SetHostInitializeImportMetaObjectCallback(callback); } +void Isolate::SetPrepareStackTraceCallback(PrepareStackTraceCallback callback) { + i::Isolate* isolate = reinterpret_cast(this); + isolate->SetPrepareStackTraceCallback(callback); +} + Isolate::DisallowJavascriptExecutionScope::DisallowJavascriptExecutionScope( Isolate* isolate, Isolate::DisallowJavascriptExecutionScope::OnFailure on_failure) @@ -8698,17 +8709,17 @@ void Isolate::SetStackLimit(uintptr_t stack_limit) { void Isolate::GetCodeRange(void** start, size_t* length_in_bytes) { i::Isolate* isolate = reinterpret_cast(this); - if (isolate->heap()->memory_allocator()->code_range()->valid()) { - *start = reinterpret_cast( - isolate->heap()->memory_allocator()->code_range()->start()); - *length_in_bytes = - isolate->heap()->memory_allocator()->code_range()->size(); - } else { - *start = nullptr; - *length_in_bytes = 0; - } + const base::AddressRegion& code_range = + isolate->heap()->memory_allocator()->code_range(); + *start = reinterpret_cast(code_range.begin()); + *length_in_bytes = code_range.size(); } +MemoryRange Isolate::GetBuiltinsCodeRange() { + i::Isolate* isolate = reinterpret_cast(this); + return {reinterpret_cast(isolate->embedded_blob()), + isolate->embedded_blob_size()}; +} #define CALLBACK_SETTER(ExternalName, Type, InternalName) \ void Isolate::Set##ExternalName(Type callback) { \ @@ -9093,7 +9104,10 @@ int debug::Script::ColumnOffset() const { std::vector debug::Script::LineEnds() const { i::Handle script = Utils::OpenHandle(this); - if (script->type() == i::Script::TYPE_WASM) return std::vector(); + if (script->type() == i::Script::TYPE_WASM && + this->SourceMappingURL().IsEmpty()) { + return std::vector(); + } i::Isolate* isolate = script->GetIsolate(); i::HandleScope scope(isolate); i::Script::InitLineEnds(script); @@ -9182,7 +9196,8 @@ bool debug::Script::GetPossibleBreakpoints( std::vector* locations) const { CHECK(!start.IsEmpty()); i::Handle script = Utils::OpenHandle(this); - if (script->type() == i::Script::TYPE_WASM) { + if (script->type() == i::Script::TYPE_WASM && + this->SourceMappingURL().IsEmpty()) { i::WasmModuleObject* module_object = i::WasmModuleObject::cast(script->wasm_module_object()); return module_object->GetPossibleBreakpoints(start, end, locations); @@ -9233,9 +9248,13 @@ bool debug::Script::GetPossibleBreakpoints( int debug::Script::GetSourceOffset(const debug::Location& location) const { i::Handle script = Utils::OpenHandle(this); if (script->type() == i::Script::TYPE_WASM) { - return i::WasmModuleObject::cast(script->wasm_module_object()) - ->GetFunctionOffset(location.GetLineNumber()) + - location.GetColumnNumber(); + if (this->SourceMappingURL().IsEmpty()) { + return i::WasmModuleObject::cast(script->wasm_module_object()) + ->GetFunctionOffset(location.GetLineNumber()) + + location.GetColumnNumber(); + } + DCHECK_EQ(0, location.GetLineNumber()); + return location.GetColumnNumber(); } int line = std::max(location.GetLineNumber() - script->line_offset(), 0); @@ -9678,7 +9697,7 @@ int debug::GetNativeAccessorDescriptor(v8::Local context, } auto isolate = reinterpret_cast(context->GetIsolate()); int result = 0; -#define IS_BUILTIN_ACESSOR(name, _) \ +#define IS_BUILTIN_ACESSOR(name, ...) \ if (*structure == *isolate->factory()->name##_accessor()) \ result |= static_cast(debug::NativeAccessorType::IsBuiltin); ACCESSOR_INFO_LIST(IS_BUILTIN_ACESSOR) @@ -9727,7 +9746,7 @@ debug::PostponeInterruptsScope::PostponeInterruptsScope(v8::Isolate* isolate) new i::PostponeInterruptsScope(reinterpret_cast(isolate), i::StackGuard::API_INTERRUPT)) {} -debug::PostponeInterruptsScope::~PostponeInterruptsScope() {} +debug::PostponeInterruptsScope::~PostponeInterruptsScope() = default; Local CpuProfileNode::GetFunctionName() const { const i::ProfileNode* node = reinterpret_cast(this); @@ -9851,6 +9870,47 @@ debug::TypeProfile::ScriptData debug::TypeProfile::GetScriptData( return ScriptData(i, type_profile_); } +v8::MaybeLocal debug::WeakMap::Get(v8::Local context, + v8::Local key) { + PREPARE_FOR_EXECUTION(context, WeakMap, Get, Value); + auto self = Utils::OpenHandle(this); + Local result; + i::Handle argv[] = {Utils::OpenHandle(*key)}; + has_pending_exception = + !ToLocal(i::Execution::Call(isolate, isolate->weakmap_get(), self, + arraysize(argv), argv), + &result); + RETURN_ON_FAILED_EXECUTION(Value); + RETURN_ESCAPED(result); +} + +v8::MaybeLocal debug::WeakMap::Set( + v8::Local context, v8::Local key, + v8::Local value) { + PREPARE_FOR_EXECUTION(context, WeakMap, Set, WeakMap); + auto self = Utils::OpenHandle(this); + i::Handle result; + i::Handle argv[] = {Utils::OpenHandle(*key), + Utils::OpenHandle(*value)}; + has_pending_exception = !i::Execution::Call(isolate, isolate->weakmap_set(), + self, arraysize(argv), argv) + .ToHandle(&result); + RETURN_ON_FAILED_EXECUTION(WeakMap); + RETURN_ESCAPED(Local::Cast(Utils::ToLocal(result))); +} + +Local debug::WeakMap::New(v8::Isolate* isolate) { + i::Isolate* i_isolate = reinterpret_cast(isolate); + LOG_API(i_isolate, WeakMap, New); + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); + i::Handle obj = i_isolate->factory()->NewJSWeakMap(); + return ToApiHandle(obj); +} + +debug::WeakMap* debug::WeakMap::Cast(v8::Value* value) { + return static_cast(value); +} + const char* CpuProfileNode::GetFunctionNameStr() const { const i::ProfileNode* node = reinterpret_cast(this); return node->entry()->name(); @@ -10442,9 +10502,9 @@ void EmbedderHeapTracer::GarbageCollectionForTesting( CHECK(i::FLAG_expose_gc); i::Heap* const heap = reinterpret_cast(isolate_)->heap(); heap->SetEmbedderStackStateForNextFinalizaton(stack_state); - heap->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask, - i::GarbageCollectionReason::kTesting, - kGCCallbackFlagForced); + heap->PreciseCollectAllGarbage(i::Heap::kNoGCFlags, + i::GarbageCollectionReason::kTesting, + kGCCallbackFlagForced); } bool EmbedderHeapTracer::AdvanceTracing(double deadline_in_ms) { diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h index ae0ce350a4aaac..e5f5c7da70f9e0 100644 --- a/deps/v8/src/api.h +++ b/deps/v8/src/api.h @@ -116,6 +116,7 @@ class RegisteredExtension { V(Proxy, JSProxy) \ V(debug::GeneratorObject, JSGeneratorObject) \ V(debug::Script, Script) \ + V(debug::WeakMap, JSWeakMap) \ V(Promise, JSPromise) \ V(Primitive, Object) \ V(PrimitiveArray, FixedArray) \ diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h index 0bfdd770f58351..db1ee5467c99c8 100644 --- a/deps/v8/src/arguments.h +++ b/deps/v8/src/arguments.h @@ -27,7 +27,7 @@ namespace internal { // Note that length_ (whose value is in the integer range) is defined // as intptr_t to provide endian-neutrality on 64-bit archs. -class Arguments BASE_EMBEDDED { +class Arguments { public: Arguments(int length, Object** arguments) : length_(length), arguments_(arguments) { diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index 163fa4c2192f8a..c253b3033c4c32 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -46,6 +46,7 @@ #include "src/deoptimizer.h" #include "src/macro-assembler.h" #include "src/objects-inl.h" +#include "src/string-constants.h" namespace v8 { namespace internal { @@ -417,6 +418,13 @@ Operand Operand::EmbeddedCode(CodeStub* stub) { return result; } +Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) { + Operand result(0, RelocInfo::EMBEDDED_OBJECT); + result.is_heap_object_request_ = true; + result.value_.heap_object_request = HeapObjectRequest(str); + return result; +} + MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) : rn_(rn), rm_(no_reg), offset_(offset), am_(am) { // Accesses below the stack pointer are not safe, and are prohibited by the @@ -472,6 +480,7 @@ void NeonMemOperand::SetAlignment(int align) { } void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { + DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty()); for (auto& request : heap_object_requests_) { Handle object; switch (request.kind()) { @@ -483,6 +492,12 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { request.code_stub()->set_isolate(isolate); object = request.code_stub()->GetCode(); break; + case HeapObjectRequest::kStringConstant: { + const StringConstantBase* str = request.string(); + CHECK_NOT_NULL(str); + object = str->AllocateStringConstant(isolate); + break; + } } Address pc = reinterpret_cast
(buffer_) + request.offset(); Memory
(constant_pool_entry_address(pc, 0 /* unused */)) = diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index fb367028825fa5..1bfa58b8534466 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -393,7 +393,7 @@ enum Coprocessor { // Machine instruction Operands // Class Operand represents a shifter operand in data processing instructions -class Operand BASE_EMBEDDED { +class Operand { public: // immediate V8_INLINE explicit Operand(int32_t immediate, @@ -425,6 +425,7 @@ class Operand BASE_EMBEDDED { static Operand EmbeddedNumber(double number); // Smi or HeapNumber. static Operand EmbeddedCode(CodeStub* stub); + static Operand EmbeddedStringConstant(const StringConstantBase* str); // Return true if this is a register operand. bool IsRegister() const { @@ -498,7 +499,7 @@ class Operand BASE_EMBEDDED { // Class MemOperand represents a memory operand in load and store instructions -class MemOperand BASE_EMBEDDED { +class MemOperand { public: // [rn +/- offset] Offset/NegOffset // [rn +/- offset]! PreIndex/NegPreIndex @@ -557,7 +558,7 @@ class MemOperand BASE_EMBEDDED { // Class NeonMemOperand represents a memory operand in load and // store NEON instructions -class NeonMemOperand BASE_EMBEDDED { +class NeonMemOperand { public: // [rn {:align}] Offset // [rn {:align}]! PostIndex @@ -580,7 +581,7 @@ class NeonMemOperand BASE_EMBEDDED { // Class NeonListOperand represents a list of NEON registers -class NeonListOperand BASE_EMBEDDED { +class NeonListOperand { public: explicit NeonListOperand(DoubleRegister base, int register_count = 1) : base_(base), register_count_(register_count) {} @@ -1693,7 +1694,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { friend class UseScratchRegisterScope; }; -class EnsureSpace BASE_EMBEDDED { +class EnsureSpace { public: V8_INLINE explicit EnsureSpace(Assembler* assembler); }; diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index bb5becefb854a0..c7eaef1325aa8e 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -131,7 +131,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) { IsolateAddressId::kPendingExceptionAddress, isolate()))); } __ str(r0, MemOperand(scratch)); - __ LoadRoot(r0, Heap::kExceptionRootIndex); + __ LoadRoot(r0, RootIndex::kException); __ b(&exit); // Invoke: Link this frame into the handler chain. @@ -418,7 +418,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, __ LeaveExitFrame(false, r4, stack_space_operand != nullptr); // Check if the function scheduled an exception. - __ LoadRoot(r4, Heap::kTheHoleValueRootIndex); + __ LoadRoot(r4, RootIndex::kTheHoleValue); __ Move(r6, ExternalReference::scheduled_exception_address(isolate)); __ ldr(r5, MemOperand(r6)); __ cmp(r4, r5); @@ -469,14 +469,14 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(FCA::kHolderIndex == 0); // new target - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); // call data __ push(call_data); Register scratch0 = call_data; Register scratch1 = r5; - __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex); + __ LoadRoot(scratch0, RootIndex::kUndefinedValue); // return value __ push(scratch0); // return value default @@ -549,7 +549,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) { // Push data from AccessorInfo. __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset)); __ push(scratch); - __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); + __ LoadRoot(scratch, RootIndex::kUndefinedValue); __ Push(scratch, scratch); __ Move(scratch, ExternalReference::isolate_address(isolate())); __ Push(scratch, holder); diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 39f756d152c4a3..7dc4ced32129d7 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -9,7 +9,6 @@ #include "src/arm/assembler-arm-inl.h" #include "src/arm/simulator-arm.h" #include "src/codegen.h" -#include "src/isolate.h" #include "src/macro-assembler.h" namespace v8 { @@ -19,17 +18,17 @@ namespace internal { #if defined(V8_HOST_ARCH_ARM) -MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate, - MemCopyUint8Function stub) { +MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) { #if defined(USE_SIMULATOR) return stub; #else + v8::PageAllocator* page_allocator = GetPlatformPageAllocator(); size_t allocated = 0; - byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated); + byte* buffer = AllocatePage(page_allocator, + page_allocator->GetRandomMmapAddr(), &allocated); if (buffer == nullptr) return stub; - MacroAssembler masm(isolate, buffer, static_cast(allocated), - CodeObjectRequired::kNo); + MacroAssembler masm(AssemblerOptions{}, buffer, static_cast(allocated)); Register dest = r0; Register src = r1; @@ -166,11 +165,12 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate, __ Ret(); CodeDesc desc; - masm.GetCode(isolate, &desc); + masm.GetCode(nullptr, &desc); DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc)); Assembler::FlushICache(buffer, allocated); - CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute)); + CHECK(SetPermissions(page_allocator, buffer, allocated, + PageAllocator::kReadExecute)); return FUNCTION_CAST(buffer); #endif } @@ -178,16 +178,17 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate, // Convert 8 to 16. The number of character to copy must be at least 8. MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function( - Isolate* isolate, MemCopyUint16Uint8Function stub) { + MemCopyUint16Uint8Function stub) { #if defined(USE_SIMULATOR) return stub; #else + v8::PageAllocator* page_allocator = GetPlatformPageAllocator(); size_t allocated = 0; - byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated); + byte* buffer = AllocatePage(page_allocator, + page_allocator->GetRandomMmapAddr(), &allocated); if (buffer == nullptr) return stub; - MacroAssembler masm(isolate, buffer, static_cast(allocated), - CodeObjectRequired::kNo); + MacroAssembler masm(AssemblerOptions{}, buffer, static_cast(allocated)); Register dest = r0; Register src = r1; @@ -256,25 +257,27 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function( } CodeDesc desc; - masm.GetCode(isolate, &desc); + masm.GetCode(nullptr, &desc); Assembler::FlushICache(buffer, allocated); - CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute)); + CHECK(SetPermissions(page_allocator, buffer, allocated, + PageAllocator::kReadExecute)); return FUNCTION_CAST(buffer); #endif } #endif -UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) { +UnaryMathFunction CreateSqrtFunction() { #if defined(USE_SIMULATOR) return nullptr; #else + v8::PageAllocator* page_allocator = GetPlatformPageAllocator(); size_t allocated = 0; - byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated); + byte* buffer = AllocatePage(page_allocator, + page_allocator->GetRandomMmapAddr(), &allocated); if (buffer == nullptr) return nullptr; - MacroAssembler masm(isolate, buffer, static_cast(allocated), - CodeObjectRequired::kNo); + MacroAssembler masm(AssemblerOptions{}, buffer, static_cast(allocated)); __ MovFromFloatParameter(d0); __ vsqrt(d0, d0); @@ -282,12 +285,13 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) { __ Ret(); CodeDesc desc; - masm.GetCode(isolate, &desc); + masm.GetCode(nullptr, &desc); DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc)); Assembler::FlushICache(buffer, allocated); - CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute)); - return FUNCTION_CAST(buffer); + CHECK(SetPermissions(page_allocator, buffer, allocated, + PageAllocator::kReadExecute)); + return FUNCTION_CAST(buffer); #endif } diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc index 8af455fc6e4d3d..f3be7a7c4ae5c3 100644 --- a/deps/v8/src/arm/interface-descriptors-arm.cc +++ b/deps/v8/src/arm/interface-descriptors-arm.cc @@ -88,9 +88,9 @@ void CallVarargsDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { // r0 : number of arguments (on the stack, not including receiver) // r1 : the target to call - // r2 : arguments list (FixedArray) // r4 : arguments list length (untagged) - Register registers[] = {r1, r0, r2, r4}; + // r2 : arguments list (FixedArray) + Register registers[] = {r1, r0, r4, r2}; data->InitializePlatformSpecific(arraysize(registers), registers); } @@ -125,9 +125,9 @@ void ConstructVarargsDescriptor::InitializePlatformSpecific( // r0 : number of arguments (on the stack, not including receiver) // r1 : the target to call // r3 : the new target - // r2 : arguments list (FixedArray) // r4 : arguments list length (untagged) - Register registers[] = {r1, r3, r0, r2, r4}; + // r2 : arguments list (FixedArray) + Register registers[] = {r1, r3, r0, r4, r2}; data->InitializePlatformSpecific(arraysize(registers), registers); } @@ -193,7 +193,7 @@ void BinaryOpDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(arraysize(registers), registers); } -void ArgumentAdaptorDescriptor::InitializePlatformSpecific( +void ArgumentsAdaptorDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { Register registers[] = { r1, // JSFunction @@ -237,10 +237,10 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { Register registers[] = { r0, // argument count (not including receiver) - r3, // new target + r4, // address of the first argument r1, // constructor to call + r3, // new target r2, // allocation site feedback if available, undefined otherwise - r4 // address of the first argument }; data->InitializePlatformSpecific(arraysize(registers), registers); } diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 09db465d598b0a..cdf9dad1d90e74 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -130,7 +130,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, void TurboAssembler::LoadFromConstantsTable(Register destination, int constant_index) { DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant( - Heap::kBuiltinsConstantsTableRootIndex)); + RootIndex::kBuiltinsConstantsTable)); // The ldr call below could end up clobbering ip when the offset does not fit // into 12 bits (and thus needs to be loaded from the constant pool). In that @@ -147,7 +147,7 @@ void TurboAssembler::LoadFromConstantsTable(Register destination, reg = r7; } - LoadRoot(reg, Heap::kBuiltinsConstantsTableRootIndex); + LoadRoot(reg, RootIndex::kBuiltinsConstantsTable); ldr(destination, MemOperand(reg, offset)); if (could_clobber_ip) { @@ -527,7 +527,7 @@ void MacroAssembler::Store(Register src, } } -void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index, +void TurboAssembler::LoadRoot(Register destination, RootIndex index, Condition cond) { ldr(destination, MemOperand(kRootRegister, RootRegisterOffset(index)), cond); } @@ -615,8 +615,6 @@ void TurboAssembler::CallRecordWriteStub( RecordWriteDescriptor::kObject)); Register slot_parameter( callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot)); - Register isolate_parameter(callable.descriptor().GetRegisterParameter( - RecordWriteDescriptor::kIsolate)); Register remembered_set_parameter(callable.descriptor().GetRegisterParameter( RecordWriteDescriptor::kRememberedSet)); Register fp_mode_parameter(callable.descriptor().GetRegisterParameter( @@ -628,7 +626,6 @@ void TurboAssembler::CallRecordWriteStub( Pop(slot_parameter); Pop(object_parameter); - Move(isolate_parameter, ExternalReference::isolate_address(isolate())); Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action)); Move(fp_mode_parameter, Smi::FromEnum(fp_mode)); Call(callable.code(), RelocInfo::CODE_TARGET); @@ -1520,7 +1517,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, // Clear the new.target register if not given. if (!new_target.is_valid()) { - LoadRoot(r3, Heap::kUndefinedValueRootIndex); + LoadRoot(r3, RootIndex::kUndefinedValue); } Label done; @@ -1642,9 +1639,7 @@ void MacroAssembler::CompareInstanceType(Register map, cmp(type_reg, Operand(type)); } - -void MacroAssembler::CompareRoot(Register obj, - Heap::RootListIndex index) { +void MacroAssembler::CompareRoot(Register obj, RootIndex index) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); DCHECK(obj != scratch); @@ -2053,7 +2048,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, if (emit_debug_code()) { Label done_checking; AssertNotSmi(object); - CompareRoot(object, Heap::kUndefinedValueRootIndex); + CompareRoot(object, RootIndex::kUndefinedValue); b(eq, &done_checking); ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE); diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 055b6e6fbc86a9..ef75c3fe4ca281 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -71,6 +71,9 @@ enum TargetAddressStorageMode { class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { public: + TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size) + : TurboAssemblerBase(options, buffer, buffer_size) {} + TurboAssembler(Isolate* isolate, const AssemblerOptions& options, void* buffer, int buffer_size, CodeObjectRequired create_code_object) @@ -481,11 +484,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { } // Load an object from the root table. - void LoadRoot(Register destination, Heap::RootListIndex index) override { + void LoadRoot(Register destination, RootIndex index) override { LoadRoot(destination, index, al); } - void LoadRoot(Register destination, Heap::RootListIndex index, - Condition cond); + void LoadRoot(Register destination, RootIndex index, Condition cond); // Jump if the register contains a smi. void JumpIfSmi(Register value, Label* smi_label); @@ -566,10 +568,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // MacroAssembler implements a collection of frequently used macros. class MacroAssembler : public TurboAssembler { public: + MacroAssembler(const AssemblerOptions& options, void* buffer, int size) + : TurboAssembler(options, buffer, size) {} + MacroAssembler(Isolate* isolate, void* buffer, int size, CodeObjectRequired create_code_object) : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer, size, create_code_object) {} + MacroAssembler(Isolate* isolate, const AssemblerOptions& options, void* buffer, int size, CodeObjectRequired create_code_object); @@ -713,8 +719,8 @@ class MacroAssembler : public TurboAssembler { // Compare the object in a register to a value from the root list. // Acquires a scratch register. - void CompareRoot(Register obj, Heap::RootListIndex index); - void PushRoot(Heap::RootListIndex index) { + void CompareRoot(Register obj, RootIndex index); + void PushRoot(RootIndex index) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); LoadRoot(scratch, index); @@ -722,14 +728,13 @@ class MacroAssembler : public TurboAssembler { } // Compare the object in a register to a value and jump if they are equal. - void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) { + void JumpIfRoot(Register with, RootIndex index, Label* if_equal) { CompareRoot(with, index); b(eq, if_equal); } // Compare the object in a register to a value and jump if they are not equal. - void JumpIfNotRoot(Register with, Heap::RootListIndex index, - Label* if_not_equal) { + void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) { CompareRoot(with, index); b(ne, if_not_equal); } diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index b1e84218762bed..e9d74104d3b030 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -3212,15 +3212,15 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { DecodeVCMP(instr); } else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) { // vsqrt - lazily_initialize_fast_sqrt(isolate_); + lazily_initialize_fast_sqrt(); if (instr->SzValue() == 0x1) { double dm_value = get_double_from_d_register(vm).get_scalar(); - double dd_value = fast_sqrt(dm_value, isolate_); + double dd_value = fast_sqrt(dm_value); dd_value = canonicalizeNaN(dd_value); set_d_register_from_double(vd, dd_value); } else { float sm_value = get_float_from_s_register(m).get_scalar(); - float sd_value = fast_sqrt(sm_value, isolate_); + float sd_value = fast_sqrt(sm_value); sd_value = canonicalizeNaN(sd_value); set_s_register_from_float(d, sd_value); } @@ -5282,10 +5282,10 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) { src[i] = bit_cast(result); } } else { - lazily_initialize_fast_sqrt(isolate_); + lazily_initialize_fast_sqrt(); for (int i = 0; i < 4; i++) { float radicand = bit_cast(src[i]); - float result = 1.0f / fast_sqrt(radicand, isolate_); + float result = 1.0f / fast_sqrt(radicand); result = canonicalizeNaN(result); src[i] = bit_cast(result); } diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h index 52df8143ef1311..5a163b06fdc9ed 100644 --- a/deps/v8/src/arm64/assembler-arm64-inl.h +++ b/deps/v8/src/arm64/assembler-arm64-inl.h @@ -341,7 +341,9 @@ Immediate Operand::immediate_for_heap_object_request() const { DCHECK((heap_object_request().kind() == HeapObjectRequest::kHeapNumber && immediate_.rmode() == RelocInfo::EMBEDDED_OBJECT) || (heap_object_request().kind() == HeapObjectRequest::kCodeStub && - immediate_.rmode() == RelocInfo::CODE_TARGET)); + immediate_.rmode() == RelocInfo::CODE_TARGET) || + (heap_object_request().kind() == HeapObjectRequest::kStringConstant && + immediate_.rmode() == RelocInfo::EMBEDDED_OBJECT)); return immediate_; } diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc index d41b1a7d7f4927..879e78d5387d43 100644 --- a/deps/v8/src/arm64/assembler-arm64.cc +++ b/deps/v8/src/arm64/assembler-arm64.cc @@ -36,6 +36,7 @@ #include "src/code-stubs.h" #include "src/frame-constants.h" #include "src/register-configuration.h" +#include "src/string-constants.h" namespace v8 { namespace internal { @@ -583,6 +584,7 @@ void Assembler::Reset() { } void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { + DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty()); for (auto& request : heap_object_requests_) { Address pc = reinterpret_cast
(buffer_) + request.offset(); switch (request.kind()) { @@ -601,6 +603,13 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { request.code_stub()->GetCode()); break; } + case HeapObjectRequest::kStringConstant: { + const StringConstantBase* str = request.string(); + CHECK_NOT_NULL(str); + set_target_address_at(pc, 0 /* unused */, + str->AllocateStringConstant(isolate).address()); + break; + } } } } @@ -1717,6 +1726,13 @@ Operand Operand::EmbeddedCode(CodeStub* stub) { return result; } +Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) { + Operand result(0, RelocInfo::EMBEDDED_OBJECT); + result.heap_object_request_.emplace(str); + DCHECK(result.IsHeapObjectRequest()); + return result; +} + void Assembler::ldr(const CPURegister& rt, const Operand& operand) { if (operand.IsHeapObjectRequest()) { RequestHeapObject(operand.heap_object_request()); diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h index b42b80f9ca9308..0432708fd12f6a 100644 --- a/deps/v8/src/arm64/assembler-arm64.h +++ b/deps/v8/src/arm64/assembler-arm64.h @@ -718,6 +718,7 @@ class Operand { static Operand EmbeddedNumber(double number); // Smi or HeapNumber. static Operand EmbeddedCode(CodeStub* stub); + static Operand EmbeddedStringConstant(const StringConstantBase* str); inline bool IsHeapObjectRequest() const; inline HeapObjectRequest heap_object_request() const; @@ -3624,8 +3625,7 @@ class PatchingAssembler : public Assembler { void PatchSubSp(uint32_t immediate); }; - -class EnsureSpace BASE_EMBEDDED { +class EnsureSpace { public: explicit EnsureSpace(Assembler* assembler) { assembler->CheckBufferSpace(); diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc index 328983f42c6b3a..9b8114c9bfc1c0 100644 --- a/deps/v8/src/arm64/code-stubs-arm64.cc +++ b/deps/v8/src/arm64/code-stubs-arm64.cc @@ -124,7 +124,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) { IsolateAddressId::kPendingExceptionAddress, isolate()))); } __ Str(code_entry, MemOperand(x10)); - __ LoadRoot(x0, Heap::kExceptionRootIndex); + __ LoadRoot(x0, RootIndex::kException); __ B(&exit); // Invoke: Link this frame into the handler chain. @@ -434,8 +434,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, // Check if the function scheduled an exception. __ Mov(x5, ExternalReference::scheduled_exception_address(isolate)); __ Ldr(x5, MemOperand(x5)); - __ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, - &promote_scheduled_exception); + __ JumpIfNotRoot(x5, RootIndex::kTheHoleValue, &promote_scheduled_exception); __ DropSlots(stack_space); __ Ret(); @@ -484,7 +483,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(FCA::kHolderIndex == 0); Register undef = x7; - __ LoadRoot(undef, Heap::kUndefinedValueRootIndex); + __ LoadRoot(undef, RootIndex::kUndefinedValue); // Push new target, call data. __ Push(undef, call_data); @@ -562,7 +561,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) { name)); __ Ldr(data, FieldMemOperand(callback, AccessorInfo::kDataOffset)); - __ LoadRoot(undef, Heap::kUndefinedValueRootIndex); + __ LoadRoot(undef, RootIndex::kUndefinedValue); __ Mov(isolate_address, ExternalReference::isolate_address(isolate())); __ Ldr(name, FieldMemOperand(callback, AccessorInfo::kNameOffset)); diff --git a/deps/v8/src/arm64/codegen-arm64.cc b/deps/v8/src/arm64/codegen-arm64.cc index ad7703328007d9..180e3f54b73803 100644 --- a/deps/v8/src/arm64/codegen-arm64.cc +++ b/deps/v8/src/arm64/codegen-arm64.cc @@ -8,7 +8,6 @@ #include "src/arm64/macro-assembler-arm64-inl.h" #include "src/arm64/simulator-arm64.h" #include "src/codegen.h" -#include "src/isolate.h" #include "src/macro-assembler.h" namespace v8 { @@ -16,9 +15,7 @@ namespace internal { #define __ ACCESS_MASM(masm) -UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) { - return nullptr; -} +UnaryMathFunction CreateSqrtFunction() { return nullptr; } #undef __ diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc index bb1c22aff55bb4..905cc51a5705f2 100644 --- a/deps/v8/src/arm64/interface-descriptors-arm64.cc +++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc @@ -89,9 +89,9 @@ void CallVarargsDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { // x0 : number of arguments (on the stack, not including receiver) // x1 : the target to call - // x2 : arguments list (FixedArray) // x4 : arguments list length (untagged) - Register registers[] = {x1, x0, x2, x4}; + // x2 : arguments list (FixedArray) + Register registers[] = {x1, x0, x4, x2}; data->InitializePlatformSpecific(arraysize(registers), registers); } @@ -126,9 +126,9 @@ void ConstructVarargsDescriptor::InitializePlatformSpecific( // x0 : number of arguments (on the stack, not including receiver) // x1 : the target to call // x3 : the new target - // x2 : arguments list (FixedArray) // x4 : arguments list length (untagged) - Register registers[] = {x1, x3, x0, x2, x4}; + // x2 : arguments list (FixedArray) + Register registers[] = {x1, x3, x0, x4, x2}; data->InitializePlatformSpecific(arraysize(registers), registers); } @@ -198,7 +198,7 @@ void BinaryOpDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(arraysize(registers), registers); } -void ArgumentAdaptorDescriptor::InitializePlatformSpecific( +void ArgumentsAdaptorDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { Register registers[] = { x1, // JSFunction @@ -242,10 +242,10 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { Register registers[] = { x0, // argument count (not including receiver) - x3, // new target + x4, // address of the first argument x1, // constructor to call + x3, // new target x2, // allocation site feedback if available, undefined otherwise - x4 // address of the first argument }; data->InitializePlatformSpecific(arraysize(registers), registers); } diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc index b15ab4747345ba..eb1aec8f103a64 100644 --- a/deps/v8/src/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/arm64/macro-assembler-arm64.cc @@ -1516,7 +1516,7 @@ void TurboAssembler::CanonicalizeNaN(const VRegister& dst, Fsub(dst, src, fp_zero); } -void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) { +void TurboAssembler::LoadRoot(Register destination, RootIndex index) { // TODO(jbramley): Most root values are constants, and can be synthesized // without a load. Refer to the ARM back end for details. Ldr(destination, MemOperand(kRootRegister, RootRegisterOffset(index))); @@ -1646,7 +1646,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) { Register scratch = temps.AcquireX(); Label done_checking; AssertNotSmi(object); - JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking); + JumpIfRoot(object, RootIndex::kUndefinedValue, &done_checking); Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE); Assert(eq, AbortReason::kExpectedUndefinedOrCell); @@ -1806,8 +1806,8 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args, void TurboAssembler::LoadFromConstantsTable(Register destination, int constant_index) { DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant( - Heap::kBuiltinsConstantsTableRootIndex)); - LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex); + RootIndex::kBuiltinsConstantsTable)); + LoadRoot(destination, RootIndex::kBuiltinsConstantsTable); Ldr(destination, FieldMemOperand(destination, FixedArray::kHeaderSize + constant_index * kPointerSize)); @@ -2225,7 +2225,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, // Clear the new.target register if not given. if (!new_target.is_valid()) { - LoadRoot(x3, Heap::kUndefinedValueRootIndex); + LoadRoot(x3, RootIndex::kUndefinedValue); } Label done; @@ -2597,8 +2597,7 @@ void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) { DecodeField(result); } -void MacroAssembler::CompareRoot(const Register& obj, - Heap::RootListIndex index) { +void MacroAssembler::CompareRoot(const Register& obj, RootIndex index) { UseScratchRegisterScope temps(this); Register temp = temps.AcquireX(); DCHECK(!AreAliased(obj, temp)); @@ -2606,17 +2605,13 @@ void MacroAssembler::CompareRoot(const Register& obj, Cmp(obj, temp); } - -void MacroAssembler::JumpIfRoot(const Register& obj, - Heap::RootListIndex index, +void MacroAssembler::JumpIfRoot(const Register& obj, RootIndex index, Label* if_equal) { CompareRoot(obj, index); B(eq, if_equal); } - -void MacroAssembler::JumpIfNotRoot(const Register& obj, - Heap::RootListIndex index, +void MacroAssembler::JumpIfNotRoot(const Register& obj, RootIndex index, Label* if_not_equal) { CompareRoot(obj, index); B(ne, if_not_equal); @@ -2823,8 +2818,6 @@ void TurboAssembler::CallRecordWriteStub( RecordWriteDescriptor::kObject)); Register slot_parameter( callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot)); - Register isolate_parameter(callable.descriptor().GetRegisterParameter( - RecordWriteDescriptor::kIsolate)); Register remembered_set_parameter(callable.descriptor().GetRegisterParameter( RecordWriteDescriptor::kRememberedSet)); Register fp_mode_parameter(callable.descriptor().GetRegisterParameter( @@ -2834,7 +2827,6 @@ void TurboAssembler::CallRecordWriteStub( Pop(slot_parameter, object_parameter); - Mov(isolate_parameter, ExternalReference::isolate_address(isolate())); Mov(remembered_set_parameter, Smi::FromEnum(remembered_set_action)); Mov(fp_mode_parameter, Smi::FromEnum(fp_mode)); Call(callable.code(), RelocInfo::CODE_TARGET); @@ -2915,8 +2907,7 @@ void TurboAssembler::AssertUnreachable(AbortReason reason) { if (emit_debug_code()) Abort(reason); } -void MacroAssembler::AssertRegisterIsRoot(Register reg, - Heap::RootListIndex index, +void MacroAssembler::AssertRegisterIsRoot(Register reg, RootIndex index, AbortReason reason) { if (emit_debug_code()) { CompareRoot(reg, index); diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h index a2862748a6b233..8648ff04398c81 100644 --- a/deps/v8/src/arm64/macro-assembler-arm64.h +++ b/deps/v8/src/arm64/macro-assembler-arm64.h @@ -180,6 +180,9 @@ enum PreShiftImmMode { class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { public: + TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size) + : TurboAssemblerBase(options, buffer, buffer_size) {} + TurboAssembler(Isolate* isolate, const AssemblerOptions& options, void* buffer, int buffer_size, CodeObjectRequired create_code_object) @@ -1126,7 +1129,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { #undef DECLARE_FUNCTION // Load an object from the root table. - void LoadRoot(Register destination, Heap::RootListIndex index) override; + void LoadRoot(Register destination, RootIndex index) override; inline void Ret(const Register& xn = lr); @@ -1262,10 +1265,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { class MacroAssembler : public TurboAssembler { public: + MacroAssembler(const AssemblerOptions& options, void* buffer, int size) + : TurboAssembler(options, buffer, size) {} + MacroAssembler(Isolate* isolate, void* buffer, int size, CodeObjectRequired create_code_object) : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer, size, create_code_object) {} + MacroAssembler(Isolate* isolate, const AssemblerOptions& options, void* buffer, int size, CodeObjectRequired create_code_object); @@ -1821,17 +1828,13 @@ class MacroAssembler : public TurboAssembler { void LoadElementsKindFromMap(Register result, Register map); // Compare the object in a register to a value from the root list. - void CompareRoot(const Register& obj, Heap::RootListIndex index); + void CompareRoot(const Register& obj, RootIndex index); // Compare the object in a register to a value and jump if they are equal. - void JumpIfRoot(const Register& obj, - Heap::RootListIndex index, - Label* if_equal); + void JumpIfRoot(const Register& obj, RootIndex index, Label* if_equal); // Compare the object in a register to a value and jump if they are not equal. - void JumpIfNotRoot(const Register& obj, - Heap::RootListIndex index, - Label* if_not_equal); + void JumpIfNotRoot(const Register& obj, RootIndex index, Label* if_not_equal); // Compare the contents of a register with an operand, and branch to true, // false or fall through, depending on condition. @@ -1944,7 +1947,7 @@ class MacroAssembler : public TurboAssembler { // Debugging. void AssertRegisterIsRoot( - Register reg, Heap::RootListIndex index, + Register reg, RootIndex index, AbortReason reason = AbortReason::kRegisterDidNotMatchExpectedRoot); // Abort if the specified register contains the invalid color bit pattern. @@ -2025,7 +2028,7 @@ class MacroAssembler : public TurboAssembler { // instructions. This scope prevents the MacroAssembler from being called and // literal pools from being emitted. It also asserts the number of instructions // emitted is what you specified when creating the scope. -class InstructionAccurateScope BASE_EMBEDDED { +class InstructionAccurateScope { public: explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0) : tasm_(tasm) diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc index fd973c8a36c2f5..aea4c0a21b07f4 100644 --- a/deps/v8/src/asmjs/asm-js.cc +++ b/deps/v8/src/asmjs/asm-js.cc @@ -391,7 +391,7 @@ MaybeHandle AsmJs::InstantiateAsmWasm(Isolate* isolate, return MaybeHandle(); } memory->set_is_growable(false); - size_t size = NumberToSize(memory->byte_length()); + size_t size = memory->byte_length(); // Check the asm.js heap size against the valid limits. if (!IsValidAsmjsMemorySize(size)) { ReportInstantiationFailure(script, position, "Invalid heap size"); diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index 0f216052f3fa7c..2037a0ec8fbcdf 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -44,10 +44,24 @@ #include "src/simulator.h" // For flushing instruction cache. #include "src/snapshot/serializer-common.h" #include "src/snapshot/snapshot.h" +#include "src/string-constants.h" namespace v8 { namespace internal { +AssemblerOptions AssemblerOptions::EnableV8AgnosticCode() const { + AssemblerOptions options = *this; + options.v8_agnostic_code = true; + options.record_reloc_info_for_serialization = false; + options.enable_root_array_delta_access = false; + // Inherit |enable_simulator_code| value. + options.isolate_independent_code = false; + options.inline_offheap_trampolines = false; + // Inherit |code_range_start| value. + // Inherit |use_pc_relative_calls_and_jumps| value. + return options; +} + AssemblerOptions AssemblerOptions::Default( Isolate* isolate, bool explicitly_support_serialization) { AssemblerOptions options; @@ -61,9 +75,12 @@ AssemblerOptions AssemblerOptions::Default( options.enable_simulator_code = !serializer; #endif options.inline_offheap_trampolines = !serializer; + #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 - options.code_range_start = - isolate->heap()->memory_allocator()->code_range()->start(); + const base::AddressRegion& code_range = + isolate->heap()->memory_allocator()->code_range(); + DCHECK_IMPLIES(code_range.begin() != kNullAddress, !code_range.is_empty()); + options.code_range_start = code_range.begin(); #endif return options; } @@ -355,6 +372,13 @@ HeapObjectRequest::HeapObjectRequest(CodeStub* code_stub, int offset) DCHECK_NOT_NULL(value_.code_stub); } +HeapObjectRequest::HeapObjectRequest(const StringConstantBase* string, + int offset) + : kind_(kStringConstant), offset_(offset) { + value_.string = string; + DCHECK_NOT_NULL(value_.string); +} + // Platform specific but identical code for all the platforms. void Assembler::RecordDeoptReason(DeoptimizeReason reason, @@ -381,11 +405,13 @@ void Assembler::DataAlign(int m) { } void AssemblerBase::RequestHeapObject(HeapObjectRequest request) { + DCHECK(!options().v8_agnostic_code); request.set_offset(pc_offset()); heap_object_requests_.push_front(request); } int AssemblerBase::AddCodeTarget(Handle target) { + DCHECK(!options().v8_agnostic_code); int current = static_cast(code_targets_.size()); if (current > 0 && !target.is_null() && code_targets_.back().address() == target.address()) { @@ -398,6 +424,7 @@ int AssemblerBase::AddCodeTarget(Handle target) { } Handle AssemblerBase::GetCodeTarget(intptr_t code_target_index) const { + DCHECK(!options().v8_agnostic_code); DCHECK_LE(0, code_target_index); DCHECK_LT(code_target_index, code_targets_.size()); return code_targets_[code_target_index]; @@ -405,6 +432,7 @@ Handle AssemblerBase::GetCodeTarget(intptr_t code_target_index) const { void AssemblerBase::UpdateCodeTarget(intptr_t code_target_index, Handle code) { + DCHECK(!options().v8_agnostic_code); DCHECK_LE(0, code_target_index); DCHECK_LT(code_target_index, code_targets_.size()); code_targets_[code_target_index] = code; diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index b108c5dfff7487..e669e0f0f7289b 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -67,6 +67,7 @@ class Isolate; class SCTableReference; class SourcePosition; class StatsCounter; +class StringConstantBase; // ----------------------------------------------------------------------------- // Optimization for far-jmp like instructions that can be replaced by shorter. @@ -97,8 +98,9 @@ class HeapObjectRequest { public: explicit HeapObjectRequest(double heap_number, int offset = -1); explicit HeapObjectRequest(CodeStub* code_stub, int offset = -1); + explicit HeapObjectRequest(const StringConstantBase* string, int offset = -1); - enum Kind { kHeapNumber, kCodeStub }; + enum Kind { kHeapNumber, kCodeStub, kStringConstant }; Kind kind() const { return kind_; } double heap_number() const { @@ -111,6 +113,11 @@ class HeapObjectRequest { return value_.code_stub; } + const StringConstantBase* string() const { + DCHECK_EQ(kind(), kStringConstant); + return value_.string; + } + // The code buffer offset at the time of the request. int offset() const { DCHECK_GE(offset_, 0); @@ -128,6 +135,7 @@ class HeapObjectRequest { union { double heap_number; CodeStub* code_stub; + const StringConstantBase* string; } value_; int offset_; @@ -139,6 +147,9 @@ class HeapObjectRequest { enum class CodeObjectRequired { kNo, kYes }; struct V8_EXPORT_PRIVATE AssemblerOptions { + // Prohibits using any V8-specific features of assembler like (isolates, + // heap objects, external references, etc.). + bool v8_agnostic_code = false; // Recording reloc info for external references and off-heap targets is // needed whenever code is serialized, e.g. into the snapshot or as a WASM // module. This flag allows this reloc info to be disabled for code that @@ -168,6 +179,9 @@ struct V8_EXPORT_PRIVATE AssemblerOptions { // the instruction immediates. bool use_pc_relative_calls_and_jumps = false; + // Constructs V8-agnostic set of options from current state. + AssemblerOptions EnableV8AgnosticCode() const; + static AssemblerOptions Default( Isolate* isolate, bool explicitly_support_serialization = false); }; @@ -268,11 +282,11 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { } } - // {RequestHeapObject} records the need for a future heap number allocation or - // code stub generation. After code assembly, each platform's - // {Assembler::AllocateAndInstallRequestedHeapObjects} will allocate these - // objects and place them where they are expected (determined by the pc offset - // associated with each request). + // {RequestHeapObject} records the need for a future heap number allocation, + // code stub generation or string allocation. After code assembly, each + // platform's {Assembler::AllocateAndInstallRequestedHeapObjects} will + // allocate these objects and place them where they are expected (determined + // by the pc offset associated with each request). void RequestHeapObject(HeapObjectRequest request); private: @@ -301,7 +315,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { }; // Avoids emitting debug code during the lifetime of this scope object. -class DontEmitDebugCodeScope BASE_EMBEDDED { +class DontEmitDebugCodeScope { public: explicit DontEmitDebugCodeScope(AssemblerBase* assembler) : assembler_(assembler), old_value_(assembler->emit_debug_code()) { @@ -332,7 +346,7 @@ class PredictableCodeSizeScope { // Enable a specified feature within a scope. -class CpuFeatureScope BASE_EMBEDDED { +class CpuFeatureScope { public: enum CheckPolicy { kCheckSupported, @@ -350,12 +364,12 @@ class CpuFeatureScope BASE_EMBEDDED { #else CpuFeatureScope(AssemblerBase* assembler, CpuFeature f, CheckPolicy check = kCheckSupported) {} - // Define a destructor to avoid unused variable warnings. - ~CpuFeatureScope() {} + ~CpuFeatureScope() { // NOLINT (modernize-use-equals-default) + // Define a destructor to avoid unused variable warnings. + } #endif }; - // CpuFeatures keeps track of which features are supported by the target CPU. // Supported features must be enabled by a CpuFeatureScope before use. // Example: @@ -420,7 +434,7 @@ class CpuFeatures : public AllStatic { // Utility functions // Computes pow(x, y) with the special cases in the spec for Math.pow. -double power_helper(Isolate* isolate, double x, double y); +double power_helper(double x, double y); double power_double_int(double x, int y); double power_double_double(double x, double y); @@ -430,7 +444,7 @@ double power_double_double(double x, double y); class ConstantPoolEntry { public: - ConstantPoolEntry() {} + ConstantPoolEntry() = default; ConstantPoolEntry(int position, intptr_t value, bool sharing_ok, RelocInfo::Mode rmode = RelocInfo::NONE) : position_(position), @@ -447,7 +461,7 @@ class ConstantPoolEntry { int position() const { return position_; } bool sharing_ok() const { return merged_index_ != SHARING_PROHIBITED; } bool is_merged() const { return merged_index_ >= 0; } - int merged_index(void) const { + int merged_index() const { DCHECK(is_merged()); return merged_index_; } @@ -456,7 +470,7 @@ class ConstantPoolEntry { merged_index_ = index; DCHECK(is_merged()); } - int offset(void) const { + int offset() const { DCHECK_GE(merged_index_, 0); return merged_index_; } @@ -493,7 +507,7 @@ class ConstantPoolEntry { // ----------------------------------------------------------------------------- // Embedded constant pool support -class ConstantPoolBuilder BASE_EMBEDDED { +class ConstantPoolBuilder { public: ConstantPoolBuilder(int ptr_reach_bits, int double_reach_bits); diff --git a/deps/v8/src/assert-scope.h b/deps/v8/src/assert-scope.h index acf764979252f6..b64f95dfa57224 100644 --- a/deps/v8/src/assert-scope.h +++ b/deps/v8/src/assert-scope.h @@ -77,7 +77,9 @@ class PerThreadAssertScopeDebugOnly : public #else class PerThreadAssertScopeDebugOnly { public: - PerThreadAssertScopeDebugOnly() { } + PerThreadAssertScopeDebugOnly() { // NOLINT (modernize-use-equals-default) + // Define a constructor to avoid unused variable warnings. + } void Release() {} #endif }; diff --git a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc index 5cb1e87d2395dc..7e3a25890b5c84 100644 --- a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc +++ b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc @@ -14,7 +14,7 @@ AstFunctionLiteralIdReindexer::AstFunctionLiteralIdReindexer(size_t stack_limit, int delta) : AstTraversalVisitor(stack_limit), delta_(delta) {} -AstFunctionLiteralIdReindexer::~AstFunctionLiteralIdReindexer() {} +AstFunctionLiteralIdReindexer::~AstFunctionLiteralIdReindexer() = default; void AstFunctionLiteralIdReindexer::Reindex(Expression* pattern) { Visit(pattern); diff --git a/deps/v8/src/ast/ast-source-ranges.h b/deps/v8/src/ast/ast-source-ranges.h index cf7bab53daa607..6e077d65bde9d1 100644 --- a/deps/v8/src/ast/ast-source-ranges.h +++ b/deps/v8/src/ast/ast-source-ranges.h @@ -56,7 +56,7 @@ enum class SourceRangeKind { class AstNodeSourceRanges : public ZoneObject { public: - virtual ~AstNodeSourceRanges() {} + virtual ~AstNodeSourceRanges() = default; virtual SourceRange GetRange(SourceRangeKind kind) = 0; }; @@ -65,7 +65,7 @@ class BinaryOperationSourceRanges final : public AstNodeSourceRanges { explicit BinaryOperationSourceRanges(const SourceRange& right_range) : right_range_(right_range) {} - SourceRange GetRange(SourceRangeKind kind) { + SourceRange GetRange(SourceRangeKind kind) override { DCHECK_EQ(kind, SourceRangeKind::kRight); return right_range_; } @@ -79,7 +79,7 @@ class ContinuationSourceRanges : public AstNodeSourceRanges { explicit ContinuationSourceRanges(int32_t continuation_position) : continuation_position_(continuation_position) {} - SourceRange GetRange(SourceRangeKind kind) { + SourceRange GetRange(SourceRangeKind kind) override { DCHECK_EQ(kind, SourceRangeKind::kContinuation); return SourceRange::OpenEnded(continuation_position_); } @@ -99,7 +99,7 @@ class CaseClauseSourceRanges final : public AstNodeSourceRanges { explicit CaseClauseSourceRanges(const SourceRange& body_range) : body_range_(body_range) {} - SourceRange GetRange(SourceRangeKind kind) { + SourceRange GetRange(SourceRangeKind kind) override { DCHECK_EQ(kind, SourceRangeKind::kBody); return body_range_; } @@ -114,7 +114,7 @@ class ConditionalSourceRanges final : public AstNodeSourceRanges { const SourceRange& else_range) : then_range_(then_range), else_range_(else_range) {} - SourceRange GetRange(SourceRangeKind kind) { + SourceRange GetRange(SourceRangeKind kind) override { switch (kind) { case SourceRangeKind::kThen: return then_range_; @@ -136,7 +136,7 @@ class IfStatementSourceRanges final : public AstNodeSourceRanges { const SourceRange& else_range) : then_range_(then_range), else_range_(else_range) {} - SourceRange GetRange(SourceRangeKind kind) { + SourceRange GetRange(SourceRangeKind kind) override { switch (kind) { case SourceRangeKind::kElse: return else_range_; @@ -162,7 +162,7 @@ class IterationStatementSourceRanges final : public AstNodeSourceRanges { explicit IterationStatementSourceRanges(const SourceRange& body_range) : body_range_(body_range) {} - SourceRange GetRange(SourceRangeKind kind) { + SourceRange GetRange(SourceRangeKind kind) override { switch (kind) { case SourceRangeKind::kBody: return body_range_; @@ -198,7 +198,7 @@ class NaryOperationSourceRanges final : public AstNodeSourceRanges { void AddRange(const SourceRange& range) { ranges_.push_back(range); } size_t RangeCount() const { return ranges_.size(); } - SourceRange GetRange(SourceRangeKind kind) { UNREACHABLE(); } + SourceRange GetRange(SourceRangeKind kind) override { UNREACHABLE(); } private: ZoneVector ranges_; @@ -227,7 +227,7 @@ class TryCatchStatementSourceRanges final : public AstNodeSourceRanges { explicit TryCatchStatementSourceRanges(const SourceRange& catch_range) : catch_range_(catch_range) {} - SourceRange GetRange(SourceRangeKind kind) { + SourceRange GetRange(SourceRangeKind kind) override { switch (kind) { case SourceRangeKind::kCatch: return catch_range_; @@ -247,7 +247,7 @@ class TryFinallyStatementSourceRanges final : public AstNodeSourceRanges { explicit TryFinallyStatementSourceRanges(const SourceRange& finally_range) : finally_range_(finally_range) {} - SourceRange GetRange(SourceRangeKind kind) { + SourceRange GetRange(SourceRangeKind kind) override { switch (kind) { case SourceRangeKind::kFinally: return finally_range_; diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc index 8cf81b24a50100..67ea77bfbf7887 100644 --- a/deps/v8/src/ast/ast-value-factory.cc +++ b/deps/v8/src/ast/ast-value-factory.cc @@ -242,6 +242,17 @@ const AstRawString* AstValueFactory::GetString(Handle literal) { return result; } +const AstRawString* AstValueFactory::CloneFromOtherFactory( + const AstRawString* raw_string) { + const AstRawString* result = GetString( + raw_string->hash_field(), raw_string->is_one_byte(), + Vector(raw_string->raw_data(), raw_string->byte_length())); + // Check we weren't trying to clone a string that was already in this + // ast-value-factory. + DCHECK_NE(result, raw_string); + return result; +} + AstConsString* AstValueFactory::NewConsString() { AstConsString* new_string = new (zone_) AstConsString; DCHECK_NOT_NULL(new_string); diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h index e85b0675bf6fed..146a2bc998c0e0 100644 --- a/deps/v8/src/ast/ast-value-factory.h +++ b/deps/v8/src/ast/ast-value-factory.h @@ -297,10 +297,15 @@ class AstValueFactory { return GetTwoByteStringInternal(literal); } const AstRawString* GetString(Handle literal); + + // Clones an AstRawString from another ast value factory, adding it to this + // factory and returning the clone. + const AstRawString* CloneFromOtherFactory(const AstRawString* raw_string); + V8_EXPORT_PRIVATE AstConsString* NewConsString(); - AstConsString* NewConsString(const AstRawString* str); - AstConsString* NewConsString(const AstRawString* str1, - const AstRawString* str2); + V8_EXPORT_PRIVATE AstConsString* NewConsString(const AstRawString* str); + V8_EXPORT_PRIVATE AstConsString* NewConsString(const AstRawString* str1, + const AstRawString* str2); V8_EXPORT_PRIVATE void Internalize(Isolate* isolate); diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc index 5a4add60390970..617a26b9377912 100644 --- a/deps/v8/src/ast/ast.cc +++ b/deps/v8/src/ast/ast.cc @@ -551,12 +551,6 @@ bool ObjectLiteral::IsFastCloningSupported() const { ConstructorBuiltins::kMaximumClonedShallowObjectProperties; } -bool ArrayLiteral::is_empty() const { - DCHECK(is_initialized()); - return values()->is_empty() && (boilerplate_description().is_null() || - boilerplate_description()->is_empty()); -} - int ArrayLiteral::InitDepthAndFlags() { if (is_initialized()) return depth(); diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h index 6c1e989d30beb5..a27cc1531b62d1 100644 --- a/deps/v8/src/ast/ast.h +++ b/deps/v8/src/ast/ast.h @@ -397,6 +397,7 @@ class Declaration : public AstNode { Declaration** next() { return &next_; } Declaration* next_; friend List; + friend ThreadedListTraits; }; class VariableDeclaration : public Declaration { @@ -1477,8 +1478,6 @@ class ArrayLiteral final : public AggregateLiteral { int first_spread_index() const { return first_spread_index_; } - bool is_empty() const; - // Populate the depth field and flags, returns the depth. int InitDepthAndFlags(); @@ -1578,8 +1577,15 @@ class VariableProxy final : public Expression { // Bind this proxy to the variable var. void BindTo(Variable* var); - void set_next_unresolved(VariableProxy* next) { next_unresolved_ = next; } - VariableProxy* next_unresolved() { return next_unresolved_; } + V8_INLINE VariableProxy* next_unresolved() { return next_unresolved_; } + + // Provides an access type for the ThreadedList used by the PreParsers + // expressions, lists, and formal parameters. + struct PreParserNext { + static VariableProxy** next(VariableProxy* t) { + return t->pre_parser_expr_next(); + } + }; private: friend class AstNodeFactory; @@ -1590,7 +1596,8 @@ class VariableProxy final : public Expression { int start_position) : Expression(start_position, kVariableProxy), raw_name_(name), - next_unresolved_(nullptr) { + next_unresolved_(nullptr), + pre_parser_expr_next_(nullptr) { bit_field_ |= IsThisField::encode(variable_kind == THIS_VARIABLE) | IsAssignedField::encode(false) | IsResolvedField::encode(false) | @@ -1613,9 +1620,15 @@ class VariableProxy final : public Expression { const AstRawString* raw_name_; // if !is_resolved_ Variable* var_; // if is_resolved_ }; + + V8_INLINE VariableProxy** next() { return &next_unresolved_; } VariableProxy* next_unresolved_; -}; + VariableProxy** pre_parser_expr_next() { return &pre_parser_expr_next_; } + VariableProxy* pre_parser_expr_next_; + + friend ThreadedListTraits; +}; // Left-hand side can only be a property, a global or a (parameter or local) // slot. @@ -2248,7 +2261,7 @@ class FunctionLiteral final : public Expression { void mark_as_iife() { bit_field_ = IIFEBit::update(bit_field_, true); } bool is_iife() const { return IIFEBit::decode(bit_field_); } - bool is_top_level() const { + bool is_toplevel() const { return function_literal_id() == FunctionLiteral::kIdTypeTopLevel; } bool is_wrapped() const { return function_type() == kWrapped; } @@ -2308,7 +2321,7 @@ class FunctionLiteral final : public Expression { // - (function() { ... })(); // - var x = function() { ... }(); bool ShouldEagerCompile() const; - void SetShouldEagerCompile(); + V8_EXPORT_PRIVATE void SetShouldEagerCompile(); FunctionType function_type() const { return FunctionTypeBits::decode(bit_field_); @@ -2736,7 +2749,7 @@ class TemplateLiteral final : public Expression { // class SpecificVisitor : public AstVisitor { ... } template -class AstVisitor BASE_EMBEDDED { +class AstVisitor { public: void Visit(AstNode* node) { impl()->Visit(node); } @@ -2823,7 +2836,7 @@ class AstVisitor BASE_EMBEDDED { // ---------------------------------------------------------------------------- // AstNode factory -class AstNodeFactory final BASE_EMBEDDED { +class AstNodeFactory final { public: AstNodeFactory(AstValueFactory* ast_value_factory, Zone* zone) : zone_(zone), ast_value_factory_(ast_value_factory) {} @@ -3330,7 +3343,6 @@ class AstNodeFactory final BASE_EMBEDDED { } Zone* zone() const { return zone_; } - void set_zone(Zone* zone) { zone_ = zone; } private: // This zone may be deallocated upon returning from parsing a function body diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc index d2e56a93351f7f..f9c2243099cae0 100644 --- a/deps/v8/src/ast/prettyprinter.cc +++ b/deps/v8/src/ast/prettyprinter.cc @@ -31,7 +31,7 @@ CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js) InitializeAstVisitor(isolate); } -CallPrinter::~CallPrinter() {} +CallPrinter::~CallPrinter() = default; CallPrinter::ErrorHint CallPrinter::GetErrorHint() const { if (is_call_error_) { @@ -666,7 +666,7 @@ void AstPrinter::PrintLiteral(const AstConsString* value, bool quote) { //----------------------------------------------------------------------------- -class IndentedScope BASE_EMBEDDED { +class IndentedScope { public: IndentedScope(AstPrinter* printer, const char* txt) : ast_printer_(printer) { diff --git a/deps/v8/src/ast/scopes-inl.h b/deps/v8/src/ast/scopes-inl.h new file mode 100644 index 00000000000000..a70166c5ca61aa --- /dev/null +++ b/deps/v8/src/ast/scopes-inl.h @@ -0,0 +1,66 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_AST_SCOPES_INL_H_ +#define V8_AST_SCOPES_INL_H_ + +#include "src/ast/scopes.h" + +namespace v8 { +namespace internal { + +template +void Scope::ResolveScopesThenForEachVariable(DeclarationScope* max_outer_scope, + T variable_proxy_stackvisitor, + ParseInfo* info) { + // Module variables must be allocated before variable resolution + // to ensure that UpdateNeedsHoleCheck() can detect import variables. + if (info != nullptr && is_module_scope()) { + AsModuleScope()->AllocateModuleVariables(); + } + // Lazy parsed declaration scopes are already partially analyzed. If there are + // unresolved references remaining, they just need to be resolved in outer + // scopes. + Scope* lookup = + is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed() + ? outer_scope() + : this; + + for (VariableProxy *proxy = unresolved_list_.first(), *next = nullptr; + proxy != nullptr; proxy = next) { + next = proxy->next_unresolved(); + + DCHECK(!proxy->is_resolved()); + Variable* var = + lookup->LookupRecursive(info, proxy, max_outer_scope->outer_scope()); + if (var == nullptr) { + variable_proxy_stackvisitor(proxy); + } else if (var != Scope::kDummyPreParserVariable && + var != Scope::kDummyPreParserLexicalVariable) { + if (info != nullptr) { + // In this case we need to leave scopes in a way that they can be + // allocated. If we resolved variables from lazy parsed scopes, we need + // to context allocate the var. + ResolveTo(info, proxy, var); + if (!var->is_dynamic() && lookup != this) var->ForceContextAllocation(); + } else { + var->set_is_used(); + if (proxy->is_assigned()) var->set_maybe_assigned(); + } + } + } + + // Clear unresolved_list_ as it's in an inconsistent state. + unresolved_list_.Clear(); + + for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) { + scope->ResolveScopesThenForEachVariable(max_outer_scope, + variable_proxy_stackvisitor, info); + } +} + +} // namespace internal +} // namespace v8 + +#endif // V8_AST_SCOPES_INL_H_ diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc index 74d50c44de59a5..68f23e2197f073 100644 --- a/deps/v8/src/ast/scopes.cc +++ b/deps/v8/src/ast/scopes.cc @@ -8,6 +8,7 @@ #include "src/accessors.h" #include "src/ast/ast.h" +#include "src/ast/scopes-inl.h" #include "src/base/optional.h" #include "src/bootstrapper.h" #include "src/counters.h" @@ -23,15 +24,11 @@ namespace v8 { namespace internal { namespace { -void* kDummyPreParserVariable = reinterpret_cast(0x1); -void* kDummyPreParserLexicalVariable = reinterpret_cast(0x2); - bool IsLexical(Variable* variable) { - if (variable == kDummyPreParserLexicalVariable) return true; - if (variable == kDummyPreParserVariable) return false; + if (variable == Scope::kDummyPreParserLexicalVariable) return true; + if (variable == Scope::kDummyPreParserVariable) return false; return IsLexicalVariableMode(variable->mode()); } - } // namespace // ---------------------------------------------------------------------------- @@ -76,8 +73,9 @@ Variable* VariableMap::DeclareName(Zone* zone, const AstRawString* name, if (p->value == nullptr) { // The variable has not been declared yet -> insert it. DCHECK_EQ(name, p->key); - p->value = mode == VariableMode::kVar ? kDummyPreParserVariable - : kDummyPreParserLexicalVariable; + p->value = mode == VariableMode::kVar + ? Scope::kDummyPreParserVariable + : Scope::kDummyPreParserLexicalVariable; } return reinterpret_cast(p->value); } @@ -154,7 +152,7 @@ Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type) Scope::Snapshot::Snapshot(Scope* scope) : outer_scope_(scope), top_inner_scope_(scope->inner_scope_), - top_unresolved_(scope->unresolved_), + top_unresolved_(scope->unresolved_list_.first()), top_local_(scope->GetClosureScope()->locals_.end()), top_decl_(scope->GetClosureScope()->decls_.end()), outer_scope_calls_eval_(scope->scope_calls_eval_) { @@ -319,7 +317,7 @@ void DeclarationScope::SetDefaults() { should_eager_compile_ = false; was_lazily_parsed_ = false; is_skipped_function_ = false; - produced_preparsed_scope_data_ = nullptr; + preparsed_scope_data_builder_ = nullptr; #ifdef DEBUG DeclarationScope* outer_declaration_scope = outer_scope_ ? outer_scope_->GetDeclarationScope() : nullptr; @@ -337,7 +335,7 @@ void Scope::SetDefaults() { #endif inner_scope_ = nullptr; sibling_ = nullptr; - unresolved_ = nullptr; + unresolved_list_.Clear(); start_position_ = kNoSourcePosition; end_position_ = kNoSourcePosition; @@ -834,16 +832,9 @@ Scope* Scope::FinalizeBlockScope() { } // Move unresolved variables - if (unresolved_ != nullptr) { - if (outer_scope()->unresolved_ != nullptr) { - VariableProxy* unresolved = unresolved_; - while (unresolved->next_unresolved() != nullptr) { - unresolved = unresolved->next_unresolved(); - } - unresolved->set_next_unresolved(outer_scope()->unresolved_); - } - outer_scope()->unresolved_ = unresolved_; - unresolved_ = nullptr; + if (!unresolved_list_.is_empty()) { + outer_scope()->unresolved_list_.Prepend(std::move(unresolved_list_)); + unresolved_list_.Clear(); } if (inner_scope_calls_eval_) outer_scope()->inner_scope_calls_eval_ = true; @@ -887,7 +878,7 @@ void Scope::Snapshot::Reparent(DeclarationScope* new_parent) const { DCHECK_EQ(new_parent->outer_scope_, outer_scope_); DCHECK_EQ(new_parent, new_parent->GetClosureScope()); DCHECK_NULL(new_parent->inner_scope_); - DCHECK_NULL(new_parent->unresolved_); + DCHECK(new_parent->unresolved_list_.is_empty()); DCHECK(new_parent->locals_.is_empty()); Scope* inner_scope = new_parent->sibling_; if (inner_scope != top_inner_scope_) { @@ -910,14 +901,21 @@ void Scope::Snapshot::Reparent(DeclarationScope* new_parent) const { new_parent->sibling_ = top_inner_scope_; } - if (outer_scope_->unresolved_ != top_unresolved_) { - VariableProxy* last = outer_scope_->unresolved_; - while (last->next_unresolved() != top_unresolved_) { - last = last->next_unresolved(); + if (outer_scope_->unresolved_list_.first() != top_unresolved_) { + // If the marked VariableProxy (snapshoted) is not the first, we need to + // find it and move all VariableProxys up to that point into the new_parent, + // then we restore the snapshoted state by reinitializing the outer_scope + // list. + { + auto iter = outer_scope_->unresolved_list_.begin(); + while (*iter != top_unresolved_) { + ++iter; + } + outer_scope_->unresolved_list_.Rewind(iter); } - last->set_next_unresolved(nullptr); - new_parent->unresolved_ = outer_scope_->unresolved_; - outer_scope_->unresolved_ = top_unresolved_; + + new_parent->unresolved_list_ = std::move(outer_scope_->unresolved_list_); + outer_scope_->unresolved_list_.ReinitializeHead(top_unresolved_); } // TODO(verwaest): This currently only moves do-expression declared variables @@ -1261,8 +1259,7 @@ void Scope::DeclareCatchVariableName(const AstRawString* name) { void Scope::AddUnresolved(VariableProxy* proxy) { DCHECK(!already_resolved_); DCHECK(!proxy->is_resolved()); - proxy->set_next_unresolved(unresolved_); - unresolved_ = proxy; + unresolved_list_.AddFront(proxy); } Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name, @@ -1274,22 +1271,7 @@ Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name, } bool Scope::RemoveUnresolved(VariableProxy* var) { - if (unresolved_ == var) { - unresolved_ = var->next_unresolved(); - var->set_next_unresolved(nullptr); - return true; - } - VariableProxy* current = unresolved_; - while (current != nullptr) { - VariableProxy* next = current->next_unresolved(); - if (var == next) { - current->set_next_unresolved(next->next_unresolved()); - var->set_next_unresolved(nullptr); - return true; - } - current = next; - } - return false; + return unresolved_list_.Remove(var); } Variable* Scope::NewTemporary(const AstRawString* name) { @@ -1483,11 +1465,12 @@ Scope* Scope::GetOuterScopeWithContext() { Handle DeclarationScope::CollectNonLocals( Isolate* isolate, ParseInfo* info, Handle non_locals) { - VariableProxy* free_variables = FetchFreeVariables(this, info); - for (VariableProxy* proxy = free_variables; proxy != nullptr; - proxy = proxy->next_unresolved()) { - non_locals = StringSet::Add(isolate, non_locals, proxy->name()); - } + ResolveScopesThenForEachVariable(this, + [=, &non_locals](VariableProxy* proxy) { + non_locals = StringSet::Add( + isolate, non_locals, proxy->name()); + }, + info); return non_locals; } @@ -1504,7 +1487,7 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory, decls_.Clear(); locals_.Clear(); inner_scope_ = nullptr; - unresolved_ = nullptr; + unresolved_list_.Clear(); sloppy_block_function_map_ = nullptr; rare_data_ = nullptr; has_rest_ = false; @@ -1532,7 +1515,7 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory, void Scope::SavePreParsedScopeData() { DCHECK(FLAG_preparser_scope_analysis); - if (ProducedPreParsedScopeData::ScopeIsSkippableFunctionScope(this)) { + if (PreParsedScopeDataBuilder::ScopeIsSkippableFunctionScope(this)) { AsDeclarationScope()->SavePreParsedScopeDataForDeclarationScope(); } @@ -1542,30 +1525,27 @@ void Scope::SavePreParsedScopeData() { } void DeclarationScope::SavePreParsedScopeDataForDeclarationScope() { - if (produced_preparsed_scope_data_ != nullptr) { + if (preparsed_scope_data_builder_ != nullptr) { DCHECK(FLAG_preparser_scope_analysis); - produced_preparsed_scope_data_->SaveScopeAllocationData(this); + preparsed_scope_data_builder_->SaveScopeAllocationData(this); } } void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) { DCHECK(!force_eager_compilation_); - VariableProxy* unresolved = nullptr; - + ThreadedList new_unresolved_list; if (!outer_scope_->is_script_scope() || (FLAG_preparser_scope_analysis && - produced_preparsed_scope_data_ != nullptr && - produced_preparsed_scope_data_->ContainsInnerFunctions())) { + preparsed_scope_data_builder_ != nullptr && + preparsed_scope_data_builder_->ContainsInnerFunctions())) { // Try to resolve unresolved variables for this Scope and migrate those // which cannot be resolved inside. It doesn't make sense to try to resolve // them in the outer Scopes here, because they are incomplete. - for (VariableProxy* proxy = FetchFreeVariables(this); proxy != nullptr; - proxy = proxy->next_unresolved()) { - DCHECK(!proxy->is_resolved()); - VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy); - copy->set_next_unresolved(unresolved); - unresolved = copy; - } + ResolveScopesThenForEachVariable( + this, [=, &new_unresolved_list](VariableProxy* proxy) { + VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy); + new_unresolved_list.AddFront(copy); + }); // Migrate function_ to the right Zone. if (function_ != nullptr) { @@ -1586,7 +1566,7 @@ void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) { ResetAfterPreparsing(ast_node_factory->ast_value_factory(), false); - unresolved_ = unresolved; + unresolved_list_ = std::move(new_unresolved_list); } #ifdef DEBUG @@ -1673,8 +1653,8 @@ void PrintMap(int indent, const char* label, VariableMap* map, bool locals, for (VariableMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) { Variable* var = reinterpret_cast(p->value); if (var == function_var) continue; - if (var == kDummyPreParserVariable || - var == kDummyPreParserLexicalVariable) { + if (var == Scope::kDummyPreParserVariable || + var == Scope::kDummyPreParserLexicalVariable) { continue; } bool local = !IsDynamicVariableMode(var->mode()); @@ -2045,8 +2025,7 @@ bool Scope::ResolveVariablesRecursively(ParseInfo* info) { // scopes. if (is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()) { DCHECK_EQ(variables_.occupancy(), 0); - for (VariableProxy* proxy = unresolved_; proxy != nullptr; - proxy = proxy->next_unresolved()) { + for (VariableProxy* proxy : unresolved_list_) { Variable* var = outer_scope()->LookupRecursive(info, proxy, nullptr); if (var == nullptr) { DCHECK(proxy->is_private_field()); @@ -2060,8 +2039,7 @@ bool Scope::ResolveVariablesRecursively(ParseInfo* info) { } } else { // Resolve unresolved variables for this scope. - for (VariableProxy* proxy = unresolved_; proxy != nullptr; - proxy = proxy->next_unresolved()) { + for (VariableProxy* proxy : unresolved_list_) { if (!ResolveVariable(info, proxy)) return false; } @@ -2074,57 +2052,6 @@ bool Scope::ResolveVariablesRecursively(ParseInfo* info) { return true; } -VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope, - ParseInfo* info, - VariableProxy* stack) { - // Module variables must be allocated before variable resolution - // to ensure that UpdateNeedsHoleCheck() can detect import variables. - if (info != nullptr && is_module_scope()) { - AsModuleScope()->AllocateModuleVariables(); - } - // Lazy parsed declaration scopes are already partially analyzed. If there are - // unresolved references remaining, they just need to be resolved in outer - // scopes. - Scope* lookup = - is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed() - ? outer_scope() - : this; - for (VariableProxy *proxy = unresolved_, *next = nullptr; proxy != nullptr; - proxy = next) { - next = proxy->next_unresolved(); - DCHECK(!proxy->is_resolved()); - Variable* var = - lookup->LookupRecursive(info, proxy, max_outer_scope->outer_scope()); - if (var == nullptr) { - proxy->set_next_unresolved(stack); - stack = proxy; - } else if (var != kDummyPreParserVariable && - var != kDummyPreParserLexicalVariable) { - if (info != nullptr) { - // In this case we need to leave scopes in a way that they can be - // allocated. If we resolved variables from lazy parsed scopes, we need - // to context allocate the var. - ResolveTo(info, proxy, var); - if (!var->is_dynamic() && lookup != this) var->ForceContextAllocation(); - } else { - var->set_is_used(); - if (proxy->is_assigned()) { - var->set_maybe_assigned(); - } - } - } - } - - // Clear unresolved_ as it's in an inconsistent state. - unresolved_ = nullptr; - - for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) { - stack = scope->FetchFreeVariables(max_outer_scope, info, stack); - } - - return stack; -} - bool Scope::MustAllocate(Variable* var) { if (var == kDummyPreParserLexicalVariable || var == kDummyPreParserVariable) { return true; @@ -2410,5 +2337,9 @@ int Scope::ContextLocalCount() const { (is_function_var_in_context ? 1 : 0); } +void* const Scope::kDummyPreParserVariable = reinterpret_cast(0x1); +void* const Scope::kDummyPreParserLexicalVariable = + reinterpret_cast(0x2); + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h index f43761af58d37e..c98494e32f160a 100644 --- a/deps/v8/src/ast/scopes.h +++ b/deps/v8/src/ast/scopes.h @@ -20,8 +20,7 @@ class AstValueFactory; class AstRawString; class Declaration; class ParseInfo; -class PreParsedScopeData; -class ProducedPreParsedScopeData; +class PreParsedScopeDataBuilder; class SloppyBlockFunctionStatement; class Statement; class StringSet; @@ -114,7 +113,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { ModuleScope* AsModuleScope(); const ModuleScope* AsModuleScope() const; - class Snapshot final BASE_EMBEDDED { + class Snapshot final { public: explicit Snapshot(Scope* scope); ~Snapshot(); @@ -218,8 +217,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { DCHECK(!already_resolved_); DCHECK_EQ(factory->zone(), zone()); VariableProxy* proxy = factory->NewVariableProxy(name, kind, start_pos); - proxy->set_next_unresolved(unresolved_); - unresolved_ = proxy; + AddUnresolved(proxy); return proxy; } @@ -480,6 +478,9 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { return false; } + static void* const kDummyPreParserVariable; + static void* const kDummyPreParserLexicalVariable; + protected: explicit Scope(Zone* zone); @@ -525,7 +526,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { ThreadedList locals_; // Unresolved variables referred to from this scope. The proxies themselves // form a linked list of all unresolved proxies. - VariableProxy* unresolved_; + ThreadedList unresolved_list_; // Declarations. ThreadedList decls_; @@ -597,9 +598,10 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { // Finds free variables of this scope. This mutates the unresolved variables // list along the way, so full resolution cannot be done afterwards. // If a ParseInfo* is passed, non-free variables will be resolved. - VariableProxy* FetchFreeVariables(DeclarationScope* max_outer_scope, - ParseInfo* info = nullptr, - VariableProxy* stack = nullptr); + template + void ResolveScopesThenForEachVariable(DeclarationScope* max_outer_scope, + T variable_proxy_stackvisitor, + ParseInfo* info = nullptr); // Predicates. bool MustAllocate(Variable* var); @@ -682,6 +684,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { } bool is_being_lazily_parsed() const { return is_being_lazily_parsed_; } #endif + void set_zone(Zone* zone) { zone_ = zone; } bool ShouldEagerCompile() const; void set_should_eager_compile(); @@ -919,13 +922,13 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { // saved in produced_preparsed_scope_data_. void SavePreParsedScopeDataForDeclarationScope(); - void set_produced_preparsed_scope_data( - ProducedPreParsedScopeData* produced_preparsed_scope_data) { - produced_preparsed_scope_data_ = produced_preparsed_scope_data; + void set_preparsed_scope_data_builder( + PreParsedScopeDataBuilder* preparsed_scope_data_builder) { + preparsed_scope_data_builder_ = preparsed_scope_data_builder; } - ProducedPreParsedScopeData* produced_preparsed_scope_data() const { - return produced_preparsed_scope_data_; + PreParsedScopeDataBuilder* preparsed_scope_data_builder() const { + return preparsed_scope_data_builder_; } private: @@ -981,7 +984,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { Variable* arguments_; // For producing the scope allocation data during preparsing. - ProducedPreParsedScopeData* produced_preparsed_scope_data_; + PreParsedScopeDataBuilder* preparsed_scope_data_builder_; struct RareData : public ZoneObject { // Convenience variable; Subclass constructor only diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h index 10ac5c48a53cee..d9917f324796d4 100644 --- a/deps/v8/src/ast/variables.h +++ b/deps/v8/src/ast/variables.h @@ -215,6 +215,7 @@ class Variable final : public ZoneObject { ForceHoleInitializationField::kNext, 1> {}; Variable** next() { return &next_; } friend List; + friend ThreadedListTraits; }; } // namespace internal } // namespace v8 diff --git a/deps/v8/src/base/address-region.h b/deps/v8/src/base/address-region.h new file mode 100644 index 00000000000000..2c03b53685959b --- /dev/null +++ b/deps/v8/src/base/address-region.h @@ -0,0 +1,56 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_ADDRESS_REGION_H_ +#define V8_BASE_ADDRESS_REGION_H_ + +#include + +#include "src/base/macros.h" + +namespace v8 { +namespace base { + +// Helper class representing an address region of certian size. +class AddressRegion { + public: + typedef uintptr_t Address; + + AddressRegion() = default; + + AddressRegion(Address address, size_t size) + : address_(address), size_(size) {} + + Address begin() const { return address_; } + Address end() const { return address_ + size_; } + + size_t size() const { return size_; } + void set_size(size_t size) { size_ = size; } + + bool is_empty() const { return size_ == 0; } + + bool contains(Address address) const { + STATIC_ASSERT(std::is_unsigned
::value); + return (address - begin()) < size(); + } + + bool contains(Address address, size_t size) const { + STATIC_ASSERT(std::is_unsigned
::value); + Address offset = address - begin(); + return (offset < size_) && (offset <= size_ - size); + } + + bool contains(const AddressRegion& region) const { + return contains(region.address_, region.size_); + } + + private: + Address address_ = 0; + size_t size_ = 0; +}; + +} // namespace base +} // namespace v8 + +#endif // V8_BASE_ADDRESS_REGION_H_ diff --git a/deps/v8/src/base/atomic-utils.h b/deps/v8/src/base/atomic-utils.h index d81c537e577455..90681b8a352a31 100644 --- a/deps/v8/src/base/atomic-utils.h +++ b/deps/v8/src/base/atomic-utils.h @@ -377,6 +377,22 @@ class AtomicElement { T value_; }; +template ::value>::type> +inline void CheckedIncrement(std::atomic* number, T amount) { + const T old = number->fetch_add(amount); + DCHECK_GE(old + amount, old); + USE(old); +} + +template ::value>::type> +inline void CheckedDecrement(std::atomic* number, T amount) { + const T old = number->fetch_sub(amount); + DCHECK_GE(old, amount); + USE(old); +} + } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h index 731a7181d76e5b..147a1730b28296 100644 --- a/deps/v8/src/base/bits.h +++ b/deps/v8/src/base/bits.h @@ -146,6 +146,14 @@ constexpr inline bool IsPowerOfTwo(T value) { V8_BASE_EXPORT uint32_t RoundUpToPowerOfTwo32(uint32_t value); // Same for 64 bit integers. |value| must be <= 2^63 V8_BASE_EXPORT uint64_t RoundUpToPowerOfTwo64(uint64_t value); +// Same for size_t integers. +inline size_t RoundUpToPowerOfTwo(size_t value) { + if (sizeof(size_t) == sizeof(uint64_t)) { + return RoundUpToPowerOfTwo64(value); + } else { + return RoundUpToPowerOfTwo32(value); + } +} // RoundDownToPowerOfTwo32(value) returns the greatest power of two which is // less than or equal to |value|. If you pass in a |value| that is already a diff --git a/deps/v8/src/base/bounded-page-allocator.cc b/deps/v8/src/base/bounded-page-allocator.cc new file mode 100644 index 00000000000000..9768d1032ad3e3 --- /dev/null +++ b/deps/v8/src/base/bounded-page-allocator.cc @@ -0,0 +1,83 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/base/bounded-page-allocator.h" + +namespace v8 { +namespace base { + +BoundedPageAllocator::BoundedPageAllocator(v8::PageAllocator* page_allocator, + Address start, size_t size, + size_t allocate_page_size) + : allocate_page_size_(allocate_page_size), + commit_page_size_(page_allocator->CommitPageSize()), + page_allocator_(page_allocator), + region_allocator_(start, size, allocate_page_size_) { + CHECK_NOT_NULL(page_allocator); + CHECK(IsAligned(allocate_page_size, page_allocator->AllocatePageSize())); + CHECK(IsAligned(allocate_page_size_, commit_page_size_)); +} + +void* BoundedPageAllocator::AllocatePages(void* hint, size_t size, + size_t alignment, + PageAllocator::Permission access) { + LockGuard guard(&mutex_); + CHECK(IsAligned(alignment, region_allocator_.page_size())); + + // Region allocator does not support alignments bigger than it's own + // allocation alignment. + CHECK_LE(alignment, allocate_page_size_); + + // TODO(ishell): Consider using randomized version here. + Address address = region_allocator_.AllocateRegion(size); + if (address == RegionAllocator::kAllocationFailure) { + return nullptr; + } + CHECK(page_allocator_->SetPermissions(reinterpret_cast(address), size, + access)); + return reinterpret_cast(address); +} + +bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) { + LockGuard guard(&mutex_); + + Address address = reinterpret_cast
(raw_address); + size_t freed_size = region_allocator_.FreeRegion(address); + if (freed_size != size) return false; + CHECK(page_allocator_->SetPermissions(raw_address, size, + PageAllocator::kNoAccess)); + return true; +} + +bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size, + size_t new_size) { + Address address = reinterpret_cast
(raw_address); +#ifdef DEBUG + { + CHECK_LT(new_size, size); + CHECK(IsAligned(size - new_size, commit_page_size_)); + // There must be an allocated region at given |address| of a size not + // smaller than |size|. + LockGuard guard(&mutex_); + size_t used_region_size = region_allocator_.CheckRegion(address); + CHECK_LE(size, used_region_size); + } +#endif + // Keep the region in "used" state just uncommit some pages. + Address free_address = address + new_size; + size_t free_size = size - new_size; + return page_allocator_->SetPermissions(reinterpret_cast(free_address), + free_size, PageAllocator::kNoAccess); +} + +bool BoundedPageAllocator::SetPermissions(void* address, size_t size, + PageAllocator::Permission access) { + DCHECK(IsAligned(reinterpret_cast
(address), commit_page_size_)); + DCHECK(IsAligned(size, commit_page_size_)); + DCHECK(region_allocator_.contains(reinterpret_cast
(address), size)); + return page_allocator_->SetPermissions(address, size, access); +} + +} // namespace base +} // namespace v8 diff --git a/deps/v8/src/base/bounded-page-allocator.h b/deps/v8/src/base/bounded-page-allocator.h new file mode 100644 index 00000000000000..20159ccf6d1c1c --- /dev/null +++ b/deps/v8/src/base/bounded-page-allocator.h @@ -0,0 +1,78 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_BOUNDED_PAGE_ALLOCATOR_H_ +#define V8_BASE_BOUNDED_PAGE_ALLOCATOR_H_ + +#include "include/v8-platform.h" +#include "src/base/platform/mutex.h" +#include "src/base/region-allocator.h" + +namespace v8 { +namespace base { + +// This is a v8::PageAllocator implementation that allocates pages within the +// pre-reserved region of virtual space. This class requires the virtual space +// to be kept reserved during the lifetime of this object. +// The main application of bounded page allocator are +// - V8 heap pointer compression which requires the whole V8 heap to be +// allocated within a contiguous range of virtual address space, +// - executable page allocation, which allows to use PC-relative 32-bit code +// displacement on certain 64-bit platforms. +// Bounded page allocator uses other page allocator instance for doing actual +// page allocations. +// The implementation is thread-safe. +class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator { + public: + typedef uintptr_t Address; + + BoundedPageAllocator(v8::PageAllocator* page_allocator, Address start, + size_t size, size_t allocate_page_size); + ~BoundedPageAllocator() override = default; + + Address begin() const { return region_allocator_.begin(); } + size_t size() const { return region_allocator_.size(); } + + // Returns true if given address is in the range controlled by the bounded + // page allocator instance. + bool contains(Address address) const { + return region_allocator_.contains(address); + } + + size_t AllocatePageSize() override { return allocate_page_size_; } + + size_t CommitPageSize() override { return commit_page_size_; } + + void SetRandomMmapSeed(int64_t seed) override { + page_allocator_->SetRandomMmapSeed(seed); + } + + void* GetRandomMmapAddr() override { + return page_allocator_->GetRandomMmapAddr(); + } + + void* AllocatePages(void* address, size_t size, size_t alignment, + PageAllocator::Permission access) override; + + bool FreePages(void* address, size_t size) override; + + bool ReleasePages(void* address, size_t size, size_t new_size) override; + + bool SetPermissions(void* address, size_t size, + PageAllocator::Permission access) override; + + private: + v8::base::Mutex mutex_; + const size_t allocate_page_size_; + const size_t commit_page_size_; + v8::PageAllocator* const page_allocator_; + v8::base::RegionAllocator region_allocator_; + + DISALLOW_COPY_AND_ASSIGN(BoundedPageAllocator); +}; + +} // namespace base +} // namespace v8 + +#endif // V8_BASE_BOUNDED_PAGE_ALLOCATOR_H_ diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h index df0d1110a5851b..695e67a6187d37 100644 --- a/deps/v8/src/base/build_config.h +++ b/deps/v8/src/base/build_config.h @@ -196,9 +196,9 @@ #endif #if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64) -#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 1 +#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK true #else -#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 0 +#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK false #endif // Number of bits to represent the page size for paged spaces. The value of 19 diff --git a/deps/v8/src/base/debug/stack_trace.cc b/deps/v8/src/base/debug/stack_trace.cc index 2a3fb87a19e78e..cbf00ad17c7df5 100644 --- a/deps/v8/src/base/debug/stack_trace.cc +++ b/deps/v8/src/base/debug/stack_trace.cc @@ -21,7 +21,7 @@ StackTrace::StackTrace(const void* const* trace, size_t count) { count_ = count; } -StackTrace::~StackTrace() {} +StackTrace::~StackTrace() = default; const void* const* StackTrace::Addresses(size_t* count) const { *count = count_; diff --git a/deps/v8/src/base/debug/stack_trace_posix.cc b/deps/v8/src/base/debug/stack_trace_posix.cc index 51b821bdd13a3e..ed602af547d2f9 100644 --- a/deps/v8/src/base/debug/stack_trace_posix.cc +++ b/deps/v8/src/base/debug/stack_trace_posix.cc @@ -61,7 +61,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding); namespace { volatile sig_atomic_t in_signal_handler = 0; -bool dump_stack_in_signal_handler = 1; +bool dump_stack_in_signal_handler = true; // The prefix used for mangled symbols, per the Itanium C++ ABI: // http://www.codesourcery.com/cxx-abi/abi.html#mangling @@ -104,7 +104,7 @@ void DemangleSymbols(std::string* text) { // Try to demangle the mangled symbol candidate. int status = 0; std::unique_ptr demangled_symbol( - abi::__cxa_demangle(mangled_symbol.c_str(), nullptr, 0, &status)); + abi::__cxa_demangle(mangled_symbol.c_str(), nullptr, nullptr, &status)); if (status == 0) { // Demangling is successful. // Remove the mangled symbol. text->erase(mangled_start, mangled_end - mangled_start); @@ -125,7 +125,7 @@ class BacktraceOutputHandler { virtual void HandleOutput(const char* output) = 0; protected: - virtual ~BacktraceOutputHandler() {} + virtual ~BacktraceOutputHandler() = default; }; #if HAVE_EXECINFO_H @@ -266,7 +266,7 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) { class PrintBacktraceOutputHandler : public BacktraceOutputHandler { public: - PrintBacktraceOutputHandler() {} + PrintBacktraceOutputHandler() = default; void HandleOutput(const char* output) override { // NOTE: This code MUST be async-signal safe (it's used by in-process diff --git a/deps/v8/src/base/ieee754.cc b/deps/v8/src/base/ieee754.cc index 7a1cc175cb733e..8c5641569def84 100644 --- a/deps/v8/src/base/ieee754.cc +++ b/deps/v8/src/base/ieee754.cc @@ -90,7 +90,7 @@ typedef union { ew_u.value = (d); \ (ix0) = ew_u.parts.msw; \ (ix1) = ew_u.parts.lsw; \ - } while (0) + } while (false) /* Get a 64-bit int from a double. */ #define EXTRACT_WORD64(ix, d) \ @@ -98,7 +98,7 @@ typedef union { ieee_double_shape_type ew_u; \ ew_u.value = (d); \ (ix) = ew_u.xparts.w; \ - } while (0) + } while (false) /* Get the more significant 32 bit int from a double. */ @@ -107,7 +107,7 @@ typedef union { ieee_double_shape_type gh_u; \ gh_u.value = (d); \ (i) = gh_u.parts.msw; \ - } while (0) + } while (false) /* Get the less significant 32 bit int from a double. */ @@ -116,7 +116,7 @@ typedef union { ieee_double_shape_type gl_u; \ gl_u.value = (d); \ (i) = gl_u.parts.lsw; \ - } while (0) + } while (false) /* Set a double from two 32 bit ints. */ @@ -126,7 +126,7 @@ typedef union { iw_u.parts.msw = (ix0); \ iw_u.parts.lsw = (ix1); \ (d) = iw_u.value; \ - } while (0) + } while (false) /* Set a double from a 64-bit int. */ #define INSERT_WORD64(d, ix) \ @@ -134,7 +134,7 @@ typedef union { ieee_double_shape_type iw_u; \ iw_u.xparts.w = (ix); \ (d) = iw_u.value; \ - } while (0) + } while (false) /* Set the more significant 32 bits of a double from an int. */ @@ -144,7 +144,7 @@ typedef union { sh_u.value = (d); \ sh_u.parts.msw = (v); \ (d) = sh_u.value; \ - } while (0) + } while (false) /* Set the less significant 32 bits of a double from an int. */ @@ -154,7 +154,7 @@ typedef union { sl_u.value = (d); \ sl_u.parts.lsw = (v); \ (d) = sl_u.value; \ - } while (0) + } while (false) /* Support macro. */ @@ -1210,9 +1210,9 @@ double atan(double x) { if (ix > 0x7FF00000 || (ix == 0x7FF00000 && (low != 0))) return x + x; /* NaN */ if (hx > 0) - return atanhi[3] + *(volatile double *)&atanlo[3]; + return atanhi[3] + *const_cast(&atanlo[3]); else - return -atanhi[3] - *(volatile double *)&atanlo[3]; + return -atanhi[3] - *const_cast(&atanlo[3]); } if (ix < 0x3FDC0000) { /* |x| < 0.4375 */ if (ix < 0x3E400000) { /* |x| < 2^-27 */ diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h index baf6b12ccbbb28..9a9538d06574bd 100644 --- a/deps/v8/src/base/logging.h +++ b/deps/v8/src/base/logging.h @@ -49,7 +49,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int, if (V8_UNLIKELY(!(condition))) { \ FATAL("Check failed: %s.", message); \ } \ - } while (0) + } while (false) #define CHECK(condition) CHECK_WITH_MSG(condition, #condition) #ifdef DEBUG @@ -59,7 +59,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int, if (V8_UNLIKELY(!(condition))) { \ V8_Dcheck(__FILE__, __LINE__, message); \ } \ - } while (0) + } while (false) #define DCHECK(condition) DCHECK_WITH_MSG(condition, #condition) // Helper macro for binary operators. @@ -73,7 +73,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int, FATAL("Check failed: %s.", _msg->c_str()); \ delete _msg; \ } \ - } while (0) + } while (false) #define DCHECK_OP(name, op, lhs, rhs) \ do { \ @@ -84,7 +84,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int, V8_Dcheck(__FILE__, __LINE__, _msg->c_str()); \ delete _msg; \ } \ - } while (0) + } while (false) #else @@ -98,7 +98,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int, typename ::v8::base::pass_value_or_ref::type>((lhs), \ (rhs)); \ CHECK_WITH_MSG(_cmp, #lhs " " #op " " #rhs); \ - } while (0) + } while (false) #define DCHECK_WITH_MSG(condition, msg) void(0); diff --git a/deps/v8/src/base/lsan-page-allocator.cc b/deps/v8/src/base/lsan-page-allocator.cc new file mode 100644 index 00000000000000..4840c7ea80e91a --- /dev/null +++ b/deps/v8/src/base/lsan-page-allocator.cc @@ -0,0 +1,59 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/base/lsan-page-allocator.h" + +#include "src/base/logging.h" + +#if defined(LEAK_SANITIZER) +#include +#endif + +namespace v8 { +namespace base { + +LsanPageAllocator::LsanPageAllocator(v8::PageAllocator* page_allocator) + : page_allocator_(page_allocator), + allocate_page_size_(page_allocator_->AllocatePageSize()), + commit_page_size_(page_allocator_->CommitPageSize()) { + DCHECK_NOT_NULL(page_allocator); +} + +void* LsanPageAllocator::AllocatePages(void* address, size_t size, + size_t alignment, + PageAllocator::Permission access) { + void* result = + page_allocator_->AllocatePages(address, size, alignment, access); +#if defined(LEAK_SANITIZER) + if (result != nullptr) { + __lsan_register_root_region(result, size); + } +#endif + return result; +} + +bool LsanPageAllocator::FreePages(void* address, size_t size) { + bool result = page_allocator_->FreePages(address, size); +#if defined(LEAK_SANITIZER) + if (result) { + __lsan_unregister_root_region(address, size); + } +#endif + return result; +} + +bool LsanPageAllocator::ReleasePages(void* address, size_t size, + size_t new_size) { + bool result = page_allocator_->ReleasePages(address, size, new_size); +#if defined(LEAK_SANITIZER) + if (result) { + __lsan_unregister_root_region(address, size); + __lsan_register_root_region(address, new_size); + } +#endif + return result; +} + +} // namespace base +} // namespace v8 diff --git a/deps/v8/src/base/lsan-page-allocator.h b/deps/v8/src/base/lsan-page-allocator.h new file mode 100644 index 00000000000000..d95c7fbf1ed208 --- /dev/null +++ b/deps/v8/src/base/lsan-page-allocator.h @@ -0,0 +1,56 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_LSAN_PAGE_ALLOCATOR_H_ +#define V8_BASE_LSAN_PAGE_ALLOCATOR_H_ + +#include "include/v8-platform.h" +#include "src/base/base-export.h" +#include "src/base/compiler-specific.h" + +namespace v8 { +namespace base { + +// This is a v8::PageAllocator implementation that decorates provided page +// allocator object with leak sanitizer notifications when LEAK_SANITIZER +// is defined. +class V8_BASE_EXPORT LsanPageAllocator + : public NON_EXPORTED_BASE(::v8::PageAllocator) { + public: + LsanPageAllocator(v8::PageAllocator* page_allocator); + ~LsanPageAllocator() override = default; + + size_t AllocatePageSize() override { return allocate_page_size_; } + + size_t CommitPageSize() override { return commit_page_size_; } + + void SetRandomMmapSeed(int64_t seed) override { + return page_allocator_->SetRandomMmapSeed(seed); + } + + void* GetRandomMmapAddr() override { + return page_allocator_->GetRandomMmapAddr(); + } + + void* AllocatePages(void* address, size_t size, size_t alignment, + PageAllocator::Permission access) override; + + bool FreePages(void* address, size_t size) override; + + bool ReleasePages(void* address, size_t size, size_t new_size) override; + + bool SetPermissions(void* address, size_t size, + PageAllocator::Permission access) override { + return page_allocator_->SetPermissions(address, size, access); + } + + private: + v8::PageAllocator* const page_allocator_; + const size_t allocate_page_size_; + const size_t commit_page_size_; +}; + +} // namespace base +} // namespace v8 +#endif // V8_BASE_LSAN_PAGE_ALLOCATOR_H_ diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h index 081018cc2e76b1..105ee588524239 100644 --- a/deps/v8/src/base/macros.h +++ b/deps/v8/src/base/macros.h @@ -195,8 +195,9 @@ V8_INLINE Dest bit_cast(Source const& source) { #define V8_IMMEDIATE_CRASH() ((void(*)())0)() #endif - -// TODO(all) Replace all uses of this macro with static_assert, remove macro. +// A convenience wrapper around static_assert without a string message argument. +// Once C++17 becomes the default, this macro can be removed in favor of the +// new static_assert(condition) overload. #define STATIC_ASSERT(test) static_assert(test, #test) namespace v8 { @@ -346,47 +347,37 @@ V8_INLINE A implicit_cast(A x) { // write V8_2PART_UINT64_C(0x12345678,90123456); #define V8_2PART_UINT64_C(a, b) (((static_cast(a) << 32) + 0x##b##u)) - -// Compute the 0-relative offset of some absolute value x of type T. -// This allows conversion of Addresses and integral types into -// 0-relative int offsets. -template -constexpr inline intptr_t OffsetFrom(T x) { - return x - static_cast(0); -} - - -// Compute the absolute value of type T for some 0-relative offset x. -// This allows conversion of 0-relative int offsets into Addresses and -// integral types. -template -constexpr inline T AddressFrom(intptr_t x) { - return static_cast(static_cast(0) + x); -} - - // Return the largest multiple of m which is <= x. template inline T RoundDown(T x, intptr_t m) { + STATIC_ASSERT(std::is_integral::value); // m must be a power of two. DCHECK(m != 0 && ((m & (m - 1)) == 0)); - return AddressFrom(OffsetFrom(x) & -m); + return x & -m; } template constexpr inline T RoundDown(T x) { + STATIC_ASSERT(std::is_integral::value); // m must be a power of two. STATIC_ASSERT(m != 0 && ((m & (m - 1)) == 0)); - return AddressFrom(OffsetFrom(x) & -m); + return x & -m; } // Return the smallest multiple of m which is >= x. template inline T RoundUp(T x, intptr_t m) { + STATIC_ASSERT(std::is_integral::value); return RoundDown(static_cast(x + m - 1), m); } template constexpr inline T RoundUp(T x) { - return RoundDown(static_cast(x + m - 1)); + STATIC_ASSERT(std::is_integral::value); + return RoundDown(static_cast(x + (m - 1))); +} + +template +inline bool IsAligned(T value, U alignment) { + return (value & (alignment - 1)) == 0; } inline void* AlignedAddress(void* address, size_t alignment) { diff --git a/deps/v8/src/base/optional.h b/deps/v8/src/base/optional.h index 6f5276843d6398..7dfef2d31f6c99 100644 --- a/deps/v8/src/base/optional.h +++ b/deps/v8/src/base/optional.h @@ -123,7 +123,7 @@ class Optional { public: using value_type = T; - constexpr Optional() {} + constexpr Optional() = default; constexpr Optional(base::nullopt_t) {} // NOLINT(runtime/explicit) diff --git a/deps/v8/src/base/page-allocator.cc b/deps/v8/src/base/page-allocator.cc index 25ee2e47214c02..c25104739d0307 100644 --- a/deps/v8/src/base/page-allocator.cc +++ b/deps/v8/src/base/page-allocator.cc @@ -24,11 +24,9 @@ STATIC_ASSERT_ENUM(PageAllocator::kReadExecute, #undef STATIC_ASSERT_ENUM -size_t PageAllocator::AllocatePageSize() { - return base::OS::AllocatePageSize(); -} - -size_t PageAllocator::CommitPageSize() { return base::OS::CommitPageSize(); } +PageAllocator::PageAllocator() + : allocate_page_size_(base::OS::AllocatePageSize()), + commit_page_size_(base::OS::CommitPageSize()) {} void PageAllocator::SetRandomMmapSeed(int64_t seed) { base::OS::SetRandomMmapSeed(seed); diff --git a/deps/v8/src/base/page-allocator.h b/deps/v8/src/base/page-allocator.h index ff817cdba22738..68e17db49456be 100644 --- a/deps/v8/src/base/page-allocator.h +++ b/deps/v8/src/base/page-allocator.h @@ -15,11 +15,12 @@ namespace base { class V8_BASE_EXPORT PageAllocator : public NON_EXPORTED_BASE(::v8::PageAllocator) { public: - virtual ~PageAllocator() = default; + PageAllocator(); + ~PageAllocator() override = default; - size_t AllocatePageSize() override; + size_t AllocatePageSize() override { return allocate_page_size_; } - size_t CommitPageSize() override; + size_t CommitPageSize() override { return commit_page_size_; } void SetRandomMmapSeed(int64_t seed) override; @@ -34,6 +35,10 @@ class V8_BASE_EXPORT PageAllocator bool SetPermissions(void* address, size_t size, PageAllocator::Permission access) override; + + private: + const size_t allocate_page_size_; + const size_t commit_page_size_; }; } // namespace base diff --git a/deps/v8/src/base/platform/OWNERS b/deps/v8/src/base/platform/OWNERS index 5deaa67ce7c988..cbaed6105d65b8 100644 --- a/deps/v8/src/base/platform/OWNERS +++ b/deps/v8/src/base/platform/OWNERS @@ -3,4 +3,6 @@ set noparent hpayer@chromium.org mlippautz@chromium.org +per-file platform-fuchsia.cc=wez@chromium.org + # COMPONENT: Blink>JavaScript diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc index d1979fb9d88084..713ee404bd741a 100644 --- a/deps/v8/src/base/platform/platform-fuchsia.cc +++ b/deps/v8/src/base/platform/platform-fuchsia.cc @@ -57,8 +57,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment, strlen(kVirtualMemoryName)); uintptr_t reservation; uint32_t prot = GetProtectionFromMemoryPermission(access); - zx_status_t status = zx_vmar_map_old(zx_vmar_root_self(), 0, vmo, 0, - request_size, prot, &reservation); + zx_status_t status = zx_vmar_map(zx_vmar_root_self(), prot, 0, vmo, 0, + request_size, &reservation); // Either the vmo is now referenced by the vmar, or we failed and are bailing, // so close the vmo either way. zx_handle_close(vmo); @@ -67,7 +67,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment, } uint8_t* base = reinterpret_cast(reservation); - uint8_t* aligned_base = RoundUp(base, alignment); + uint8_t* aligned_base = reinterpret_cast( + RoundUp(reinterpret_cast(base), alignment)); // Unmap extra memory reserved before and after the desired block. if (aligned_base != base) { @@ -114,9 +115,8 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { DCHECK_EQ(0, reinterpret_cast(address) % CommitPageSize()); DCHECK_EQ(0, size % CommitPageSize()); uint32_t prot = GetProtectionFromMemoryPermission(access); - return zx_vmar_protect_old(zx_vmar_root_self(), - reinterpret_cast(address), size, - prot) == ZX_OK; + return zx_vmar_protect(zx_vmar_root_self(), prot, + reinterpret_cast(address), size) == ZX_OK; } // static diff --git a/deps/v8/src/base/platform/platform-posix-time.h b/deps/v8/src/base/platform/platform-posix-time.h index 4d3373715bb1b9..7814296b83bc33 100644 --- a/deps/v8/src/base/platform/platform-posix-time.h +++ b/deps/v8/src/base/platform/platform-posix-time.h @@ -15,7 +15,7 @@ class PosixDefaultTimezoneCache : public PosixTimezoneCache { const char* LocalTimezone(double time_ms) override; double LocalTimeOffset(double time_ms, bool is_utc) override; - ~PosixDefaultTimezoneCache() override {} + ~PosixDefaultTimezoneCache() override = default; }; } // namespace base diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc index cb25196970629d..25b03005badf7d 100644 --- a/deps/v8/src/base/platform/platform-posix.cc +++ b/deps/v8/src/base/platform/platform-posix.cc @@ -86,7 +86,7 @@ namespace base { namespace { // 0 is never a valid thread id. -const pthread_t kNoThread = (pthread_t) 0; +const pthread_t kNoThread = static_cast(0); bool g_hard_abort = false; @@ -254,10 +254,6 @@ void* OS::GetRandomMmapAddr() { // Little-endian Linux: 46 bits of virtual addressing. raw_addr &= uint64_t{0x3FFFFFFF0000}; #endif -#elif V8_TARGET_ARCH_MIPS64 - // We allocate code in 256 MB aligned segments because of optimizations using - // J instruction that require that all code is within a single 256 MB segment - raw_addr &= uint64_t{0x3FFFE0000000}; #elif V8_TARGET_ARCH_S390X // Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits // of virtual addressing. Truncate to 40 bits to allow kernel chance to @@ -313,7 +309,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment, // Unmap memory allocated before the aligned base address. uint8_t* base = static_cast(result); - uint8_t* aligned_base = RoundUp(base, alignment); + uint8_t* aligned_base = reinterpret_cast( + RoundUp(reinterpret_cast(base), alignment)); if (aligned_base != base) { DCHECK_LT(base, aligned_base); size_t prefix_size = static_cast(aligned_base - base); diff --git a/deps/v8/src/base/platform/platform-posix.h b/deps/v8/src/base/platform/platform-posix.h index 55861bc9ac2ed4..8cf5e54604f897 100644 --- a/deps/v8/src/base/platform/platform-posix.h +++ b/deps/v8/src/base/platform/platform-posix.h @@ -15,7 +15,7 @@ class PosixTimezoneCache : public TimezoneCache { public: double DaylightSavingsOffset(double time_ms) override; void Clear() override {} - ~PosixTimezoneCache() override {} + ~PosixTimezoneCache() override = default; protected: static const int msPerSecond = 1000; diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc index 2e56ac5df14502..11a008e6c69787 100644 --- a/deps/v8/src/base/platform/platform-win32.cc +++ b/deps/v8/src/base/platform/platform-win32.cc @@ -822,7 +822,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment, if (base == nullptr) return nullptr; // Can't allocate, we're OOM. // If address is suitably aligned, we're done. - uint8_t* aligned_base = RoundUp(base, alignment); + uint8_t* aligned_base = reinterpret_cast( + RoundUp(reinterpret_cast(base), alignment)); if (base == aligned_base) return reinterpret_cast(base); // Otherwise, free it and try a larger allocation. @@ -843,7 +844,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment, // Try to trim the allocation by freeing the padded allocation and then // calling VirtualAlloc at the aligned base. CHECK(Free(base, padded_size)); - aligned_base = RoundUp(base, alignment); + aligned_base = reinterpret_cast( + RoundUp(reinterpret_cast(base), alignment)); base = reinterpret_cast( VirtualAlloc(aligned_base, size, flags, protect)); // We might not get the reduced allocation due to a race. In that case, diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h index 51b60148216ccc..f9d01edf00aa2d 100644 --- a/deps/v8/src/base/platform/platform.h +++ b/deps/v8/src/base/platform/platform.h @@ -188,7 +188,7 @@ class V8_BASE_EXPORT OS { class V8_BASE_EXPORT MemoryMappedFile { public: - virtual ~MemoryMappedFile() {} + virtual ~MemoryMappedFile() = default; virtual void* memory() const = 0; virtual size_t size() const = 0; diff --git a/deps/v8/src/base/platform/time.h b/deps/v8/src/base/platform/time.h index 161092ad8bd15e..9e991664870a0b 100644 --- a/deps/v8/src/base/platform/time.h +++ b/deps/v8/src/base/platform/time.h @@ -105,10 +105,7 @@ class V8_BASE_EXPORT TimeDelta final { static TimeDelta FromTimespec(struct timespec ts); struct timespec ToTimespec() const; - TimeDelta& operator=(const TimeDelta& other) { - delta_ = other.delta_; - return *this; - } + TimeDelta& operator=(const TimeDelta& other) = default; // Computations with other deltas. TimeDelta operator+(const TimeDelta& other) const { diff --git a/deps/v8/src/base/region-allocator.cc b/deps/v8/src/base/region-allocator.cc new file mode 100644 index 00000000000000..5e0fc78a841937 --- /dev/null +++ b/deps/v8/src/base/region-allocator.cc @@ -0,0 +1,284 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/base/region-allocator.h" +#include "src/base/bits.h" +#include "src/base/macros.h" + +namespace v8 { +namespace base { + +// If |free_size| < |region_size| * |kMaxLoadFactorForRandomization| stop trying +// to randomize region allocation. +constexpr double kMaxLoadFactorForRandomization = 0.40; + +// Max number of attempts to allocate page at random address. +constexpr int kMaxRandomizationAttempts = 3; + +RegionAllocator::RegionAllocator(Address memory_region_begin, + size_t memory_region_size, size_t page_size) + : whole_region_(memory_region_begin, memory_region_size, false), + region_size_in_pages_(size() / page_size), + max_load_for_randomization_( + static_cast(size() * kMaxLoadFactorForRandomization)), + free_size_(0), + page_size_(page_size) { + CHECK_LT(begin(), end()); + CHECK(base::bits::IsPowerOfTwo(page_size_)); + CHECK(IsAligned(size(), page_size_)); + CHECK(IsAligned(begin(), page_size_)); + + // Initial region. + Region* region = new Region(whole_region_); + + all_regions_.insert(region); + + FreeListAddRegion(region); +} + +RegionAllocator::~RegionAllocator() { + for (Region* region : all_regions_) { + delete region; + } +} + +RegionAllocator::AllRegionsSet::iterator RegionAllocator::FindRegion( + Address address) { + if (!whole_region_.contains(address)) return all_regions_.end(); + + Region key(address, 0, false); + AllRegionsSet::iterator iter = all_regions_.upper_bound(&key); + // Regions in |all_regions_| are compared by end() values and key's end() + // points exactly to the address we are querying, so the upper_bound will + // find the region whose |end()| is greater than the requested address. + DCHECK_NE(iter, all_regions_.end()); + DCHECK((*iter)->contains(address)); + return iter; +} + +void RegionAllocator::FreeListAddRegion(Region* region) { + free_size_ += region->size(); + free_regions_.insert(region); +} + +RegionAllocator::Region* RegionAllocator::FreeListFindRegion(size_t size) { + Region key(0, size, false); + auto iter = free_regions_.lower_bound(&key); + return iter == free_regions_.end() ? nullptr : *iter; +} + +void RegionAllocator::FreeListRemoveRegion(Region* region) { + DCHECK(!region->is_used()); + auto iter = free_regions_.find(region); + DCHECK_NE(iter, free_regions_.end()); + DCHECK_EQ(region, *iter); + DCHECK_LE(region->size(), free_size_); + free_size_ -= region->size(); + free_regions_.erase(iter); +} + +RegionAllocator::Region* RegionAllocator::Split(Region* region, + size_t new_size) { + DCHECK(IsAligned(new_size, page_size_)); + DCHECK_GT(region->size(), new_size); + + // Create new region and put it to the lists after the |region|. + bool used = region->is_used(); + Region* new_region = + new Region(region->begin() + new_size, region->size() - new_size, used); + if (!used) { + // Remove region from the free list before updating it's size. + FreeListRemoveRegion(region); + } + region->set_size(new_size); + + all_regions_.insert(new_region); + + if (!used) { + FreeListAddRegion(region); + FreeListAddRegion(new_region); + } + return new_region; +} + +void RegionAllocator::Merge(AllRegionsSet::iterator prev_iter, + AllRegionsSet::iterator next_iter) { + Region* prev = *prev_iter; + Region* next = *next_iter; + DCHECK_EQ(prev->end(), next->begin()); + prev->set_size(prev->size() + next->size()); + + all_regions_.erase(next_iter); // prev_iter stays valid. + + // The |next| region must already not be in the free list. + DCHECK_EQ(free_regions_.find(next), free_regions_.end()); + delete next; +} + +RegionAllocator::Address RegionAllocator::AllocateRegion(size_t size) { + DCHECK_NE(size, 0); + DCHECK(IsAligned(size, page_size_)); + + Region* region = FreeListFindRegion(size); + if (region == nullptr) return kAllocationFailure; + + if (region->size() != size) { + Split(region, size); + } + DCHECK(IsAligned(region->begin(), page_size_)); + DCHECK_EQ(region->size(), size); + + // Mark region as used. + FreeListRemoveRegion(region); + region->set_is_used(true); + return region->begin(); +} + +RegionAllocator::Address RegionAllocator::AllocateRegion( + RandomNumberGenerator* rng, size_t size) { + if (free_size() >= max_load_for_randomization_) { + // There is enough free space for trying to randomize the address. + size_t random = 0; + + for (int i = 0; i < kMaxRandomizationAttempts; i++) { + rng->NextBytes(&random, sizeof(random)); + size_t random_offset = page_size_ * (random % region_size_in_pages_); + Address address = begin() + random_offset; + if (AllocateRegionAt(address, size)) { + return address; + } + } + // Fall back to free list allocation. + } + return AllocateRegion(size); +} + +bool RegionAllocator::AllocateRegionAt(Address requested_address, size_t size) { + DCHECK(IsAligned(requested_address, page_size_)); + DCHECK_NE(size, 0); + DCHECK(IsAligned(size, page_size_)); + + Address requested_end = requested_address + size; + DCHECK_LE(requested_end, end()); + + Region* region; + { + AllRegionsSet::iterator region_iter = FindRegion(requested_address); + if (region_iter == all_regions_.end()) { + return false; + } + region = *region_iter; + } + if (region->is_used() || region->end() < requested_end) { + return false; + } + // Found free region that includes the requested one. + if (region->begin() != requested_address) { + // Split the region at the |requested_address| boundary. + size_t new_size = requested_address - region->begin(); + DCHECK(IsAligned(new_size, page_size_)); + region = Split(region, new_size); + } + if (region->end() != requested_end) { + // Split the region at the |requested_end| boundary. + Split(region, size); + } + DCHECK_EQ(region->begin(), requested_address); + DCHECK_EQ(region->size(), size); + + // Mark region as used. + FreeListRemoveRegion(region); + region->set_is_used(true); + return true; +} + +size_t RegionAllocator::FreeRegion(Address address) { + AllRegionsSet::iterator region_iter = FindRegion(address); + if (region_iter == all_regions_.end()) { + return 0; + } + Region* region = *region_iter; + if (region->begin() != address || !region->is_used()) { + return 0; + } + + size_t size = region->size(); + // The region must not be in the free list. + DCHECK_EQ(free_regions_.find(*region_iter), free_regions_.end()); + + region->set_is_used(false); + + // Merge current region with the surrounding ones if they are free. + if (region->end() != whole_region_.end()) { + // There must be a range after the current one. + AllRegionsSet::iterator next_iter = std::next(region_iter); + DCHECK_NE(next_iter, all_regions_.end()); + if (!(*next_iter)->is_used()) { + // |next| region object will be deleted during merge, remove it from + // the free list. + FreeListRemoveRegion(*next_iter); + Merge(region_iter, next_iter); + } + } + if (region->begin() != whole_region_.begin()) { + // There must be a range before the current one. + AllRegionsSet::iterator prev_iter = std::prev(region_iter); + DCHECK_NE(prev_iter, all_regions_.end()); + if (!(*prev_iter)->is_used()) { + // |prev| region's size will change, we'll have to re-insert it into + // the proper place of the free list. + FreeListRemoveRegion(*prev_iter); + Merge(prev_iter, region_iter); + // |prev| region becomes the current region. + region_iter = prev_iter; + region = *region_iter; + } + } + FreeListAddRegion(region); + return size; +} + +size_t RegionAllocator::CheckRegion(Address address) { + AllRegionsSet::iterator region_iter = FindRegion(address); + if (region_iter == all_regions_.end()) { + return 0; + } + Region* region = *region_iter; + if (region->begin() != address || !region->is_used()) { + return 0; + } + return region->size(); +} + +void RegionAllocator::Region::Print(std::ostream& os) const { + std::ios::fmtflags flags = os.flags(std::ios::hex | std::ios::showbase); + os << "[" << begin() << ", " << end() << "), size: " << size(); + os << ", " << (is_used() ? "used" : "free"); + os.flags(flags); +} + +void RegionAllocator::Print(std::ostream& os) const { + std::ios::fmtflags flags = os.flags(std::ios::hex | std::ios::showbase); + os << "RegionAllocator: [" << begin() << ", " << end() << ")"; + os << "\nsize: " << size(); + os << "\nfree_size: " << free_size(); + os << "\npage_size: " << page_size_; + + os << "\nall regions: "; + for (const Region* region : all_regions_) { + os << "\n "; + region->Print(os); + } + + os << "\nfree regions: "; + for (const Region* region : free_regions_) { + os << "\n "; + region->Print(os); + } + os << "\n"; + os.flags(flags); +} + +} // namespace base +} // namespace v8 diff --git a/deps/v8/src/base/region-allocator.h b/deps/v8/src/base/region-allocator.h new file mode 100644 index 00000000000000..b8a9c39453817f --- /dev/null +++ b/deps/v8/src/base/region-allocator.h @@ -0,0 +1,158 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_REGION_ALLOCATOR_H_ +#define V8_BASE_REGION_ALLOCATOR_H_ + +#include + +#include "src/base/address-region.h" +#include "src/base/utils/random-number-generator.h" +#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck + +namespace v8 { +namespace base { + +// Helper class for managing used/free regions within [address, address+size) +// region. Minimum allocation unit is |page_size|. Requested allocation size +// is rounded up to |page_size|. +// The region allocation algorithm implements best-fit with coalescing strategy: +// it tries to find a smallest suitable free region upon allocation and tries +// to merge region with its neighbors upon freeing. +// +// This class does not perform any actual region reservation. +// Not thread-safe. +class V8_BASE_EXPORT RegionAllocator final { + public: + typedef uintptr_t Address; + + static constexpr Address kAllocationFailure = static_cast
(-1); + + RegionAllocator(Address address, size_t size, size_t page_size); + ~RegionAllocator(); + + // Allocates region of |size| (must be |page_size|-aligned). Returns + // the address of the region on success or kAllocationFailure. + Address AllocateRegion(size_t size); + // Same as above but tries to randomize the region displacement. + Address AllocateRegion(RandomNumberGenerator* rng, size_t size); + + // Allocates region of |size| at |requested_address| if it's free. Both the + // address and the size must be |page_size|-aligned. On success returns + // true. + // This kind of allocation is supposed to be used during setup phase to mark + // certain regions as used or for randomizing regions displacement. + bool AllocateRegionAt(Address requested_address, size_t size); + + // Frees region at given |address|, returns the size of the region. + // There must be a used region starting at given address otherwise nothing + // will be freed and 0 will be returned. + size_t FreeRegion(Address address); + + // If there is a used region starting at given address returns its size + // otherwise 0. + size_t CheckRegion(Address address); + + Address begin() const { return whole_region_.begin(); } + Address end() const { return whole_region_.end(); } + size_t size() const { return whole_region_.size(); } + + bool contains(Address address) const { + return whole_region_.contains(address); + } + + bool contains(Address address, size_t size) const { + return whole_region_.contains(address, size); + } + + // Total size of not yet aquired regions. + size_t free_size() const { return free_size_; } + + // The alignment of the allocated region's addresses and granularity of + // the allocated region's sizes. + size_t page_size() const { return page_size_; } + + void Print(std::ostream& os) const; + + private: + class Region : public AddressRegion { + public: + Region(Address address, size_t size, bool is_used) + : AddressRegion(address, size), is_used_(is_used) {} + + bool is_used() const { return is_used_; } + void set_is_used(bool used) { is_used_ = used; } + + void Print(std::ostream& os) const; + + private: + bool is_used_; + }; + + // The whole region. + const Region whole_region_; + + // Number of |page_size_| in the whole region. + const size_t region_size_in_pages_; + + // If the free size is less than this value - stop trying to randomize the + // allocation addresses. + const size_t max_load_for_randomization_; + + // Size of all free regions. + size_t free_size_; + + // Minimum region size. Must be a pow of 2. + const size_t page_size_; + + struct AddressEndOrder { + bool operator()(const Region* a, const Region* b) const { + return a->end() < b->end(); + } + }; + // All regions ordered by addresses. + typedef std::set AllRegionsSet; + AllRegionsSet all_regions_; + + struct SizeAddressOrder { + bool operator()(const Region* a, const Region* b) const { + if (a->size() != b->size()) return a->size() < b->size(); + return a->begin() < b->begin(); + } + }; + // Free regions ordered by sizes and addresses. + std::set free_regions_; + + // Returns region containing given address or nullptr. + AllRegionsSet::iterator FindRegion(Address address); + + // Adds given region to the set of free regions. + void FreeListAddRegion(Region* region); + + // Finds best-fit free region for given size. + Region* FreeListFindRegion(size_t size); + + // Removes given region from the set of free regions. + void FreeListRemoveRegion(Region* region); + + // Splits given |region| into two: one of |new_size| size and a new one + // having the rest. The new region is returned. + Region* Split(Region* region, size_t new_size); + + // For two coalescing regions merges |next| to |prev| and deletes |next|. + void Merge(AllRegionsSet::iterator prev_iter, + AllRegionsSet::iterator next_iter); + + FRIEND_TEST(RegionAllocatorTest, AllocateRegionRandom); + FRIEND_TEST(RegionAllocatorTest, Fragmentation); + FRIEND_TEST(RegionAllocatorTest, FindRegion); + FRIEND_TEST(RegionAllocatorTest, Contains); + + DISALLOW_COPY_AND_ASSIGN(RegionAllocator); +}; + +} // namespace base +} // namespace v8 + +#endif // V8_BASE_REGION_ALLOCATOR_H_ diff --git a/deps/v8/src/base/safe_math.h b/deps/v8/src/base/safe_math.h index 62a2f723f2bd84..700bc3387f19ef 100644 --- a/deps/v8/src/base/safe_math.h +++ b/deps/v8/src/base/safe_math.h @@ -49,7 +49,7 @@ class CheckedNumeric { public: typedef T type; - CheckedNumeric() {} + CheckedNumeric() = default; // Copy constructor. template diff --git a/deps/v8/src/base/timezone-cache.h b/deps/v8/src/base/timezone-cache.h index 96ad7bb41f8936..3d97eee126936d 100644 --- a/deps/v8/src/base/timezone-cache.h +++ b/deps/v8/src/base/timezone-cache.h @@ -27,7 +27,7 @@ class TimezoneCache { virtual void Clear() = 0; // Called when tearing down the isolate - virtual ~TimezoneCache() {} + virtual ~TimezoneCache() = default; }; } // namespace base diff --git a/deps/v8/src/basic-block-profiler.cc b/deps/v8/src/basic-block-profiler.cc index eaecd5dc6889ed..d79dbcdfa8cbb8 100644 --- a/deps/v8/src/basic-block-profiler.cc +++ b/deps/v8/src/basic-block-profiler.cc @@ -27,9 +27,6 @@ BasicBlockProfiler::Data::Data(size_t n_blocks) block_rpo_numbers_(n_blocks_), counts_(n_blocks_, 0) {} -BasicBlockProfiler::Data::~Data() {} - - static void InsertIntoString(std::ostringstream* os, std::string* string) { string->insert(0, os->str()); } @@ -68,10 +65,6 @@ void BasicBlockProfiler::Data::ResetCounts() { } } - -BasicBlockProfiler::BasicBlockProfiler() {} - - BasicBlockProfiler::Data* BasicBlockProfiler::NewData(size_t n_blocks) { base::LockGuard lock(&data_list_mutex_); Data* data = new Data(n_blocks); diff --git a/deps/v8/src/basic-block-profiler.h b/deps/v8/src/basic-block-profiler.h index 975840e46e569d..835dda5356a2db 100644 --- a/deps/v8/src/basic-block-profiler.h +++ b/deps/v8/src/basic-block-profiler.h @@ -36,7 +36,7 @@ class BasicBlockProfiler { const BasicBlockProfiler::Data& s); explicit Data(size_t n_blocks); - ~Data(); + ~Data() = default; void ResetCounts(); @@ -51,7 +51,7 @@ class BasicBlockProfiler { typedef std::list DataList; - BasicBlockProfiler(); + BasicBlockProfiler() = default; ~BasicBlockProfiler(); V8_EXPORT_PRIVATE static BasicBlockProfiler* Get(); diff --git a/deps/v8/src/bit-vector.h b/deps/v8/src/bit-vector.h index ef876007537dfa..5be3198cc642d6 100644 --- a/deps/v8/src/bit-vector.h +++ b/deps/v8/src/bit-vector.h @@ -21,7 +21,7 @@ class BitVector : public ZoneObject { }; // Iterator for the elements of this BitVector. - class Iterator BASE_EMBEDDED { + class Iterator { public: explicit Iterator(BitVector* target) : target_(target), @@ -31,7 +31,7 @@ class BitVector : public ZoneObject { current_(-1) { Advance(); } - ~Iterator() {} + ~Iterator() = default; bool Done() const { return current_index_ >= target_->data_length_; } void Advance(); @@ -305,10 +305,9 @@ class BitVector : public ZoneObject { DISALLOW_COPY_AND_ASSIGN(BitVector); }; - -class GrowableBitVector BASE_EMBEDDED { +class GrowableBitVector { public: - class Iterator BASE_EMBEDDED { + class Iterator { public: Iterator(const GrowableBitVector* target, Zone* zone) : it_(target->bits_ == nullptr ? new (zone) BitVector(1, zone) diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index 30450b133b0ad5..5ff1035d90c23a 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -24,16 +24,21 @@ #include "src/objects/hash-table-inl.h" #ifdef V8_INTL_SUPPORT #include "src/objects/intl-objects.h" +#endif // V8_INTL_SUPPORT +#include "src/objects/js-array-buffer-inl.h" +#include "src/objects/js-array-inl.h" +#ifdef V8_INTL_SUPPORT +#include "src/objects/js-break-iterator.h" #include "src/objects/js-collator.h" +#include "src/objects/js-date-time-format.h" #include "src/objects/js-list-format.h" #include "src/objects/js-locale.h" +#include "src/objects/js-number-format.h" +#include "src/objects/js-plural-rules.h" #endif // V8_INTL_SUPPORT -#include "src/objects/js-array-buffer-inl.h" -#include "src/objects/js-array-inl.h" #include "src/objects/js-regexp-string-iterator.h" #include "src/objects/js-regexp.h" #ifdef V8_INTL_SUPPORT -#include "src/objects/js-plural-rules.h" #include "src/objects/js-relative-time-format.h" #endif // V8_INTL_SUPPORT #include "src/objects/templates.h" @@ -89,7 +94,7 @@ Handle Bootstrapper::GetNativeSource(NativeType type, int index) { new NativesExternalStringResource(type, index); Handle source_code = isolate_->factory()->NewNativeSourceString(resource); - DCHECK(source_code->is_short()); + DCHECK(source_code->is_uncached()); return source_code; } @@ -146,8 +151,7 @@ void Bootstrapper::TearDown() { extensions_cache_.Initialize(isolate_, false); // Yes, symmetrical } - -class Genesis BASE_EMBEDDED { +class Genesis { public: Genesis(Isolate* isolate, MaybeHandle maybe_global_proxy, v8::Local global_proxy_template, @@ -156,7 +160,7 @@ class Genesis BASE_EMBEDDED { GlobalContextType context_type); Genesis(Isolate* isolate, MaybeHandle maybe_global_proxy, v8::Local global_proxy_template); - ~Genesis() { } + ~Genesis() = default; Isolate* isolate() const { return isolate_; } Factory* factory() const { return isolate_->factory(); } @@ -1263,7 +1267,7 @@ Handle Genesis::CreateNewGlobals( isolate()); js_global_object_function = ApiNatives::CreateApiFunction( isolate(), js_global_object_constructor, factory()->the_hole_value(), - ApiNatives::GlobalObjectType); + JS_GLOBAL_OBJECT_TYPE); } js_global_object_function->initial_map()->set_is_prototype_map(true); @@ -1289,7 +1293,7 @@ Handle Genesis::CreateNewGlobals( FunctionTemplateInfo::cast(data->constructor()), isolate()); global_proxy_function = ApiNatives::CreateApiFunction( isolate(), global_constructor, factory()->the_hole_value(), - ApiNatives::GlobalProxyType); + JS_GLOBAL_PROXY_TYPE); } global_proxy_function->initial_map()->set_is_access_check_needed(true); global_proxy_function->initial_map()->set_has_hidden_prototype(true); @@ -1731,6 +1735,8 @@ void Genesis::InitializeGlobal(Handle global_object, Builtins::kArrayPrototypeFind, 1, false); SimpleInstallFunction(isolate_, proto, "findIndex", Builtins::kArrayPrototypeFindIndex, 1, false); + SimpleInstallFunction(isolate_, proto, "lastIndexOf", + Builtins::kArrayPrototypeLastIndexOf, 1, false); SimpleInstallFunction(isolate_, proto, "pop", Builtins::kArrayPrototypePop, 0, false); SimpleInstallFunction(isolate_, proto, "push", @@ -1739,19 +1745,14 @@ void Genesis::InitializeGlobal(Handle global_object, Builtins::kArrayPrototypeReverse, 0, false); SimpleInstallFunction(isolate_, proto, "shift", Builtins::kArrayPrototypeShift, 0, false); - SimpleInstallFunction(isolate_, proto, "unshift", Builtins::kArrayUnshift, - 1, false); + SimpleInstallFunction(isolate_, proto, "unshift", + Builtins::kArrayPrototypeUnshift, 1, false); SimpleInstallFunction(isolate_, proto, "slice", Builtins::kArrayPrototypeSlice, 2, false); SimpleInstallFunction(isolate_, proto, "sort", Builtins::kArrayPrototypeSort, 1, false); - if (FLAG_enable_experimental_builtins) { - SimpleInstallFunction(isolate_, proto, "splice", - Builtins::kArraySpliceTorque, 2, false); - } else { - SimpleInstallFunction(isolate_, proto, "splice", Builtins::kArraySplice, - 2, false); - } + SimpleInstallFunction(isolate_, proto, "splice", Builtins::kArraySplice, 2, + false); SimpleInstallFunction(isolate_, proto, "includes", Builtins::kArrayIncludes, 1, false); SimpleInstallFunction(isolate_, proto, "indexOf", Builtins::kArrayIndexOf, @@ -2191,9 +2192,11 @@ void Genesis::InitializeGlobal(Handle global_object, // Install the Symbol.prototype methods. SimpleInstallFunction(isolate_, prototype, "toString", - Builtins::kSymbolPrototypeToString, 0, true); + Builtins::kSymbolPrototypeToString, 0, true, + BuiltinFunctionId::kSymbolPrototypeToString); SimpleInstallFunction(isolate_, prototype, "valueOf", - Builtins::kSymbolPrototypeValueOf, 0, true); + Builtins::kSymbolPrototypeValueOf, 0, true, + BuiltinFunctionId::kSymbolPrototypeValueOf); // Install the @@toPrimitive function. Handle to_primitive = InstallFunction( @@ -2319,6 +2322,14 @@ void Genesis::InitializeGlobal(Handle global_object, SimpleInstallFunction(isolate_, prototype, "toJSON", Builtins::kDatePrototypeToJson, 1, false); +#ifdef V8_INTL_SUPPORT + SimpleInstallFunction(isolate_, prototype, "toLocaleString", + Builtins::kDatePrototypeToLocaleString, 0, false); + SimpleInstallFunction(isolate_, prototype, "toLocaleDateString", + Builtins::kDatePrototypeToLocaleDateString, 0, false); + SimpleInstallFunction(isolate_, prototype, "toLocaleTimeString", + Builtins::kDatePrototypeToLocaleTimeString, 0, false); +#else // Install Intl fallback functions. SimpleInstallFunction(isolate_, prototype, "toLocaleString", Builtins::kDatePrototypeToString, 0, false); @@ -2326,6 +2337,7 @@ void Genesis::InitializeGlobal(Handle global_object, Builtins::kDatePrototypeToDateString, 0, false); SimpleInstallFunction(isolate_, prototype, "toLocaleTimeString", Builtins::kDatePrototypeToTimeString, 0, false); +#endif // V8_INTL_SUPPORT // Install the @@toPrimitive function. Handle to_primitive = InstallFunction( @@ -2890,11 +2902,22 @@ void Genesis::InitializeGlobal(Handle global_object, { Handle date_time_format_constructor = InstallFunction( - isolate_, intl, "DateTimeFormat", JS_OBJECT_TYPE, DateFormat::kSize, - 0, factory->the_hole_value(), Builtins::kIllegal); + isolate_, intl, "DateTimeFormat", JS_INTL_DATE_TIME_FORMAT_TYPE, + JSDateTimeFormat::kSize, 0, factory->the_hole_value(), + Builtins::kDateTimeFormatConstructor); + date_time_format_constructor->shared()->set_length(0); + date_time_format_constructor->shared()->DontAdaptArguments(); + InstallWithIntrinsicDefaultProto( + isolate_, date_time_format_constructor, + Context::INTL_DATE_TIME_FORMAT_FUNCTION_INDEX); + native_context()->set_intl_date_time_format_function( *date_time_format_constructor); + SimpleInstallFunction( + isolate(), date_time_format_constructor, "supportedLocalesOf", + Builtins::kDateTimeFormatSupportedLocalesOf, 1, false); + Handle prototype( JSObject::cast(date_time_format_constructor->prototype()), isolate_); @@ -2911,21 +2934,22 @@ void Genesis::InitializeGlobal(Handle global_object, SimpleInstallGetter(isolate_, prototype, factory->InternalizeUtf8String("format"), Builtins::kDateTimeFormatPrototypeFormat, false); - - { - Handle info = SimpleCreateBuiltinSharedFunctionInfo( - isolate_, Builtins::kDateTimeFormatInternalFormat, - factory->empty_string(), 1); - native_context()->set_date_format_internal_format_shared_fun(*info); - } } { Handle number_format_constructor = InstallFunction( - isolate_, intl, "NumberFormat", JS_OBJECT_TYPE, NumberFormat::kSize, - 0, factory->the_hole_value(), Builtins::kIllegal); - native_context()->set_intl_number_format_function( - *number_format_constructor); + isolate_, intl, "NumberFormat", JS_INTL_NUMBER_FORMAT_TYPE, + JSNumberFormat::kSize, 0, factory->the_hole_value(), + Builtins::kNumberFormatConstructor); + number_format_constructor->shared()->set_length(0); + number_format_constructor->shared()->DontAdaptArguments(); + InstallWithIntrinsicDefaultProto( + isolate_, number_format_constructor, + Context::INTL_NUMBER_FORMAT_FUNCTION_INDEX); + + SimpleInstallFunction( + isolate(), number_format_constructor, "supportedLocalesOf", + Builtins::kNumberFormatSupportedLocalesOf, 1, false); Handle prototype( JSObject::cast(number_format_constructor->prototype()), isolate_); @@ -2936,20 +2960,16 @@ void Genesis::InitializeGlobal(Handle global_object, factory->Object_string(), static_cast(DONT_ENUM | READ_ONLY)); + SimpleInstallFunction(isolate_, prototype, "resolvedOptions", + Builtins::kNumberFormatPrototypeResolvedOptions, 0, + false); + SimpleInstallFunction(isolate_, prototype, "formatToParts", Builtins::kNumberFormatPrototypeFormatToParts, 1, false); SimpleInstallGetter(isolate_, prototype, factory->InternalizeUtf8String("format"), Builtins::kNumberFormatPrototypeFormatNumber, false); - - { - Handle info = SimpleCreateBuiltinSharedFunctionInfo( - isolate_, Builtins::kNumberFormatInternalFormatNumber, - factory->empty_string(), 1); - native_context()->set_number_format_internal_format_number_shared_fun( - *info); - } } { @@ -2960,6 +2980,10 @@ void Genesis::InitializeGlobal(Handle global_object, InstallWithIntrinsicDefaultProto(isolate_, collator_constructor, Context::INTL_COLLATOR_FUNCTION_INDEX); + SimpleInstallFunction(isolate(), collator_constructor, + "supportedLocalesOf", + Builtins::kCollatorSupportedLocalesOf, 1, false); + Handle prototype( JSObject::cast(collator_constructor->prototype()), isolate_); @@ -2969,25 +2993,28 @@ void Genesis::InitializeGlobal(Handle global_object, factory->Object_string(), static_cast(DONT_ENUM | READ_ONLY)); + SimpleInstallFunction(isolate_, prototype, "resolvedOptions", + Builtins::kCollatorPrototypeResolvedOptions, 0, + false); + SimpleInstallGetter(isolate_, prototype, factory->InternalizeUtf8String("compare"), Builtins::kCollatorPrototypeCompare, false); - - { - Handle info = SimpleCreateBuiltinSharedFunctionInfo( - isolate_, Builtins::kCollatorInternalCompare, - factory->empty_string(), 2); - native_context()->set_collator_internal_compare_shared_fun(*info); - } } { - Handle v8_break_iterator_constructor = - InstallFunction(isolate_, intl, "v8BreakIterator", JS_OBJECT_TYPE, - V8BreakIterator::kSize, 0, factory->the_hole_value(), - Builtins::kIllegal); - native_context()->set_intl_v8_break_iterator_function( - *v8_break_iterator_constructor); + Handle v8_break_iterator_constructor = InstallFunction( + isolate_, intl, "v8BreakIterator", JS_INTL_V8_BREAK_ITERATOR_TYPE, + JSV8BreakIterator::kSize, 0, factory->the_hole_value(), + Builtins::kV8BreakIteratorConstructor); + v8_break_iterator_constructor->shared()->DontAdaptArguments(); + InstallWithIntrinsicDefaultProto( + isolate_, v8_break_iterator_constructor, + Context::INTL_V8_BREAK_ITERATOR_FUNCTION_INDEX); + + SimpleInstallFunction( + isolate_, v8_break_iterator_constructor, "supportedLocalesOf", + Builtins::kV8BreakIteratorSupportedLocalesOf, 1, false); Handle prototype( JSObject::cast(v8_break_iterator_constructor->prototype()), isolate_); @@ -2998,17 +3025,29 @@ void Genesis::InitializeGlobal(Handle global_object, factory->Object_string(), static_cast(DONT_ENUM | READ_ONLY)); + SimpleInstallFunction(isolate_, prototype, "resolvedOptions", + Builtins::kV8BreakIteratorPrototypeResolvedOptions, + 0, false); + SimpleInstallGetter(isolate_, prototype, factory->InternalizeUtf8String("adoptText"), - Builtins::kBreakIteratorPrototypeAdoptText, false); + Builtins::kV8BreakIteratorPrototypeAdoptText, false); - { - Handle info = SimpleCreateBuiltinSharedFunctionInfo( - isolate_, Builtins::kBreakIteratorInternalAdoptText, - factory->empty_string(), 1); - native_context()->set_break_iterator_internal_adopt_text_shared_fun( - *info); - } + SimpleInstallGetter(isolate_, prototype, + factory->InternalizeUtf8String("first"), + Builtins::kV8BreakIteratorPrototypeFirst, false); + + SimpleInstallGetter(isolate_, prototype, + factory->InternalizeUtf8String("next"), + Builtins::kV8BreakIteratorPrototypeNext, false); + + SimpleInstallGetter(isolate_, prototype, + factory->InternalizeUtf8String("current"), + Builtins::kV8BreakIteratorPrototypeCurrent, false); + + SimpleInstallGetter(isolate_, prototype, + factory->InternalizeUtf8String("breakType"), + Builtins::kV8BreakIteratorPrototypeBreakType, false); } { @@ -3021,6 +3060,10 @@ void Genesis::InitializeGlobal(Handle global_object, isolate_, plural_rules_constructor, Context::INTL_PLURAL_RULES_FUNCTION_INDEX); + SimpleInstallFunction(isolate(), plural_rules_constructor, + "supportedLocalesOf", + Builtins::kPluralRulesSupportedLocalesOf, 1, false); + Handle prototype( JSObject::cast(plural_rules_constructor->prototype()), isolate_); @@ -3029,6 +3072,13 @@ void Genesis::InitializeGlobal(Handle global_object, isolate_, prototype, factory->to_string_tag_symbol(), factory->Object_string(), static_cast(DONT_ENUM | READ_ONLY)); + + SimpleInstallFunction(isolate_, prototype, "resolvedOptions", + Builtins::kPluralRulesPrototypeResolvedOptions, 0, + false); + + SimpleInstallFunction(isolate_, prototype, "select", + Builtins::kPluralRulesPrototypeSelect, 1, false); } } #endif // V8_INTL_SUPPORT @@ -3088,7 +3138,7 @@ void Genesis::InitializeGlobal(Handle global_object, SimpleInstallFunction(isolate_, atomics_object, "wake", Builtins::kAtomicsWake, 3, true); SimpleInstallFunction(isolate_, atomics_object, "notify", - Builtins::kAtomicsWake, 3, true); + Builtins::kAtomicsNotify, 3, true); } { // -- T y p e d A r r a y @@ -3435,8 +3485,9 @@ void Genesis::InitializeGlobal(Handle global_object, SimpleInstallFunction(isolate_, prototype, "delete", Builtins::kWeakMapPrototypeDelete, 1, true); - SimpleInstallFunction(isolate_, prototype, "get", Builtins::kWeakMapGet, 1, - true); + Handle weakmap_get = SimpleInstallFunction( + isolate_, prototype, "get", Builtins::kWeakMapGet, 1, true); + native_context()->set_weakmap_get(*weakmap_get); SimpleInstallFunction(isolate_, prototype, "has", Builtins::kWeakMapHas, 1, true); Handle weakmap_set = SimpleInstallFunction( @@ -4269,6 +4320,7 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_fields) EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import) EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_meta) EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_numeric_separator) +EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_json_stringify) #undef EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE @@ -4586,6 +4638,9 @@ void Genesis::InitializeGlobal_harmony_intl_list_format() { list_format_fun->shared()->set_length(0); list_format_fun->shared()->DontAdaptArguments(); + SimpleInstallFunction(isolate(), list_format_fun, "supportedLocalesOf", + Builtins::kListFormatSupportedLocalesOf, 1, false); + // Setup %ListFormatPrototype%. Handle prototype( JSObject::cast(list_format_fun->instance_prototype()), isolate()); @@ -4628,7 +4683,7 @@ void Genesis::InitializeGlobal_harmony_locale() { // Install the @@toStringTag property on the {prototype}. JSObject::AddProperty(isolate(), prototype, factory()->to_string_tag_symbol(), - factory()->NewStringFromAsciiChecked("Locale"), + factory()->NewStringFromStaticChars("Intl.Locale"), static_cast(DONT_ENUM | READ_ONLY)); SimpleInstallFunction(isolate(), prototype, "toString", @@ -4687,6 +4742,10 @@ void Genesis::InitializeGlobal_harmony_intl_relative_time_format() { relative_time_format_fun->shared()->set_length(0); relative_time_format_fun->shared()->DontAdaptArguments(); + SimpleInstallFunction( + isolate(), relative_time_format_fun, "supportedLocalesOf", + Builtins::kRelativeTimeFormatSupportedLocalesOf, 1, false); + // Setup %RelativeTimeFormatPrototype%. Handle prototype( JSObject::cast(relative_time_format_fun->instance_prototype()), @@ -4710,6 +4769,8 @@ void Genesis::InitializeGlobal_harmony_intl_relative_time_format() { #endif // V8_INTL_SUPPORT +void Genesis::InitializeGlobal_harmony_regexp_sequence() {} + Handle Genesis::CreateArrayBuffer( Handle name, ArrayBufferKind array_buffer_kind) { // Create the %ArrayBufferPrototype% diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h index e3ba8c06f2149f..4ad02eb8363164 100644 --- a/deps/v8/src/bootstrapper.h +++ b/deps/v8/src/bootstrapper.h @@ -18,7 +18,7 @@ namespace internal { // (array.js, etc.) to precompiled functions. Instead of mapping // names to functions it might make sense to let the JS2C tool // generate an index for each native JS file. -class SourceCodeCache final BASE_EMBEDDED { +class SourceCodeCache final { public: explicit SourceCodeCache(Script::Type type) : type_(type), cache_(nullptr) {} @@ -122,8 +122,7 @@ class Bootstrapper final { DISALLOW_COPY_AND_ASSIGN(Bootstrapper); }; - -class BootstrapperActive final BASE_EMBEDDED { +class BootstrapperActive final { public: explicit BootstrapperActive(Bootstrapper* bootstrapper) : bootstrapper_(bootstrapper) { diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index c18811a4b6c26f..e97a8cf0253fa1 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -60,8 +60,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { // Run the native code for the InternalArray function called as a normal // function. - // tail call a stub - __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); __ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl), RelocInfo::CODE_TARGET); } @@ -122,7 +120,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { __ SmiUntag(r0); // The receiver for the builtin/api call. - __ PushRoot(Heap::kTheHoleValueRootIndex); + __ PushRoot(RootIndex::kTheHoleValue); // Set up pointer to last argument. __ add(r4, fp, Operand(StandardFrameConstants::kCallerSPOffset)); @@ -188,7 +186,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { Label post_instantiation_deopt_entry, not_create_implicit_receiver; // Preserve the incoming parameters on the stack. - __ LoadRoot(r4, Heap::kTheHoleValueRootIndex); + __ LoadRoot(r4, RootIndex::kTheHoleValue); __ SmiTag(r0); __ Push(cp, r0, r1, r4, r3); @@ -214,7 +212,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Else: use TheHoleValue as receiver for constructor call __ bind(¬_create_implicit_receiver); - __ LoadRoot(r0, Heap::kTheHoleValueRootIndex); + __ LoadRoot(r0, RootIndex::kTheHoleValue); // ----------- S t a t e ------------- // -- r0: receiver @@ -303,7 +301,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { Label use_receiver, do_throw, leave_frame; // If the result is undefined, we jump out to using the implicit receiver. - __ JumpIfRoot(r0, Heap::kUndefinedValueRootIndex, &use_receiver); + __ JumpIfRoot(r0, RootIndex::kUndefinedValue, &use_receiver); // Otherwise we do a smi check and fall through to check if the return value // is a valid receiver. @@ -325,7 +323,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // on-stack receiver as the result. __ bind(&use_receiver); __ ldr(r0, MemOperand(sp, 0 * kPointerSize)); - __ JumpIfRoot(r0, Heap::kTheHoleValueRootIndex, &do_throw); + __ JumpIfRoot(r0, RootIndex::kTheHoleValue, &do_throw); __ bind(&leave_frame); // Restore smi-tagged arguments count from the frame. @@ -399,7 +397,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack limit". Label stack_overflow; - __ CompareRoot(sp, Heap::kRealStackLimitRootIndex); + __ CompareRoot(sp, RootIndex::kRealStackLimit); __ b(lo, &stack_overflow); // Push receiver. @@ -466,7 +464,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); __ Push(r1, r4); // Push hole as receiver since we do not use it for stepping. - __ PushRoot(Heap::kTheHoleValueRootIndex); + __ PushRoot(RootIndex::kTheHoleValue); __ CallRuntime(Runtime::kDebugOnFunctionCall); __ Pop(r1); __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset)); @@ -503,7 +501,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, // Check the stack for overflow. We are not trying to catch // interruptions (e.g. debug break and preemption) here, so the "real stack // limit" is checked. - __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex); + __ LoadRoot(scratch, RootIndex::kRealStackLimit); // Make scratch the space we have left. The stack might already be overflowed // here which will cause scratch to become negative. __ sub(scratch, sp, scratch); @@ -573,7 +571,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // Initialize all JavaScript callee-saved registers, since they will be seen // by the garbage collector as part of handlers. - __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r4, RootIndex::kUndefinedValue); __ mov(r5, Operand(r4)); __ mov(r6, Operand(r4)); __ mov(r8, Operand(r4)); @@ -878,7 +876,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Do a stack check to ensure we don't go over the limit. Label ok; __ sub(r9, sp, Operand(r4)); - __ LoadRoot(r2, Heap::kRealStackLimitRootIndex); + __ LoadRoot(r2, RootIndex::kRealStackLimit); __ cmp(r9, Operand(r2)); __ b(hs, &ok); __ CallRuntime(Runtime::kThrowStackOverflow); @@ -887,7 +885,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // If ok, push undefined as the initial value for all register file entries. Label loop_header; Label loop_check; - __ LoadRoot(r9, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r9, RootIndex::kUndefinedValue); __ b(&loop_check, al); __ bind(&loop_header); // TODO(rmcilroy): Consider doing more than one push per loop iteration. @@ -907,7 +905,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ str(r3, MemOperand(fp, r9, LSL, kPointerSizeLog2), ne); // Load accumulator with undefined. - __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); // Load the dispatch table into a register and dispatch to the bytecode // handler at the current bytecode offset. @@ -987,7 +985,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( // Push "undefined" as the receiver arg if we need to. if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); __ mov(r3, r0); // Argument count is correct. } @@ -1201,7 +1199,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { __ push(r4); } for (int i = 0; i < 3 - j; ++i) { - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); } if (j < 3) { __ jmp(&args_done); @@ -1302,15 +1300,10 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { __ Ret(); } -static void Generate_OnStackReplacementHelper(MacroAssembler* masm, - bool has_handler_frame) { +void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { // Lookup the function in the JavaScript frame. - if (has_handler_frame) { - __ ldr(r0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ ldr(r0, MemOperand(r0, JavaScriptFrameConstants::kFunctionOffset)); - } else { - __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - } + __ ldr(r0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ ldr(r0, MemOperand(r0, JavaScriptFrameConstants::kFunctionOffset)); { FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); @@ -1327,11 +1320,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm, __ bind(&skip); - // Drop any potential handler frame that is be sitting on top of the actual + // Drop the handler frame that is be sitting on top of the actual // JavaScript frame. This is the case then OSR is triggered from bytecode. - if (has_handler_frame) { - __ LeaveFrame(StackFrame::STUB); - } + __ LeaveFrame(StackFrame::STUB); // Load deoptimization data from the code object. // = [#deoptimization_data_offset] @@ -1354,14 +1345,6 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm, } } -void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { - Generate_OnStackReplacementHelper(masm, false); -} - -void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - Generate_OnStackReplacementHelper(masm, true); -} - // static void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // ----------- S t a t e ------------- @@ -1375,7 +1358,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // arguments from the stack (including the receiver), and push thisArg (if // present) instead. { - __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r5, RootIndex::kUndefinedValue); __ mov(r2, r5); __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); // receiver __ sub(r4, r0, Operand(1), SetCC); @@ -1398,8 +1381,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // 3. Tail call with no arguments if argArray is null or undefined. Label no_arguments; - __ JumpIfRoot(r2, Heap::kNullValueRootIndex, &no_arguments); - __ JumpIfRoot(r2, Heap::kUndefinedValueRootIndex, &no_arguments); + __ JumpIfRoot(r2, RootIndex::kNullValue, &no_arguments); + __ JumpIfRoot(r2, RootIndex::kUndefinedValue, &no_arguments); // 4a. Apply the receiver to the given argArray. __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), @@ -1422,7 +1405,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { Label done; __ cmp(r0, Operand::Zero()); __ b(ne, &done); - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); __ add(r0, r0, Operand(1)); __ bind(&done); } @@ -1471,7 +1454,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { // remove all arguments from the stack (including the receiver), and push // thisArgument (if present) instead. { - __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r1, RootIndex::kUndefinedValue); __ mov(r5, r1); __ mov(r2, r1); __ sub(r4, r0, Operand(1), SetCC); @@ -1513,7 +1496,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { // arguments from the stack (including the receiver), and push thisArgument // (if present) instead. { - __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r1, RootIndex::kUndefinedValue); __ mov(r2, r1); __ str(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2)); // receiver __ sub(r4, r0, Operand(1), SetCC); @@ -1600,26 +1583,13 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, __ bind(&ok); } - // Check for stack overflow. - { - // Check the stack for overflow. We are not trying to catch interruptions - // (i.e. debug break and preemption) here, so check the "real stack limit". - Label done; - __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex); - // The stack might already be overflowed here which will cause 'scratch' to - // become negative. - __ sub(scratch, sp, scratch); - // Check if the arguments will overflow the stack. - __ cmp(scratch, Operand(r4, LSL, kPointerSizeLog2)); - __ b(gt, &done); // Signed comparison. - __ TailCallRuntime(Runtime::kThrowStackOverflow); - __ bind(&done); - } + Label stack_overflow; + Generate_StackOverflowCheck(masm, r4, scratch, &stack_overflow); // Push arguments onto the stack (thisArgument is already on the stack). { __ mov(r6, Operand(0)); - __ LoadRoot(r5, Heap::kTheHoleValueRootIndex); + __ LoadRoot(r5, RootIndex::kTheHoleValue); Label done, loop; __ bind(&loop); __ cmp(r6, r4); @@ -1627,7 +1597,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, __ add(scratch, r2, Operand(r6, LSL, kPointerSizeLog2)); __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize)); __ cmp(scratch, r5); - __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex, eq); + __ LoadRoot(scratch, RootIndex::kUndefinedValue, eq); __ Push(scratch); __ add(r6, r6, Operand(1)); __ b(&loop); @@ -1637,6 +1607,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Tail-call to the actual Call or Construct builtin. __ Jump(code, RelocInfo::CODE_TARGET); + + __ bind(&stack_overflow); + __ TailCallRuntime(Runtime::kThrowStackOverflow); } // static @@ -1771,9 +1744,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ b(hs, &done_convert); if (mode != ConvertReceiverMode::kNotNullOrUndefined) { Label convert_global_proxy; - __ JumpIfRoot(r3, Heap::kUndefinedValueRootIndex, - &convert_global_proxy); - __ JumpIfNotRoot(r3, Heap::kNullValueRootIndex, &convert_to_object); + __ JumpIfRoot(r3, RootIndex::kUndefinedValue, &convert_global_proxy); + __ JumpIfNotRoot(r3, RootIndex::kNullValue, &convert_to_object); __ bind(&convert_global_proxy); { // Patch receiver to global proxy. @@ -1859,8 +1831,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack // limit". - __ CompareRoot(sp, Heap::kRealStackLimitRootIndex); - __ b(gt, &done); // Signed comparison. + __ CompareRoot(sp, RootIndex::kRealStackLimit); + __ b(hs, &done); // Restore the stack pointer. __ add(sp, sp, Operand(r4, LSL, kPointerSizeLog2)); { @@ -1987,7 +1959,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // Calling convention for function specific ConstructStubs require // r2 to contain either an AllocationSite or undefined. - __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r2, RootIndex::kUndefinedValue); Label call_generic_stub; @@ -2165,7 +2137,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // r1: function // r2: expected number of arguments // r3: new target (passed through to callee) - __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); + __ LoadRoot(scratch, RootIndex::kUndefinedValue); __ sub(r4, fp, Operand(r2, LSL, kPointerSizeLog2)); // Adjust for frame. __ sub(r4, r4, @@ -2331,7 +2303,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Check result for exception sentinel. Label exception_returned; - __ CompareRoot(r0, Heap::kExceptionRootIndex); + __ CompareRoot(r0, RootIndex::kException); __ b(eq, &exception_returned); // Check that there is no pending exception, otherwise we @@ -2342,7 +2314,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, IsolateAddressId::kPendingExceptionAddress, masm->isolate()); __ Move(r3, pending_exception_address); __ ldr(r3, MemOperand(r3)); - __ CompareRoot(r3, Heap::kTheHoleValueRootIndex); + __ CompareRoot(r3, RootIndex::kTheHoleValue); // Cannot use check here as it attempts to generate call into runtime. __ b(eq, &okay); __ stop("Unexpected pending exception"); @@ -2585,6 +2557,10 @@ namespace { void GenerateInternalArrayConstructorCase(MacroAssembler* masm, ElementsKind kind) { + // Load undefined into the allocation site parameter as required by + // ArrayNArgumentsConstructor. + __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue); + __ cmp(r0, Operand(1)); __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind) diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index 61fee9013ba81e..3d62a638e5ddbe 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -55,7 +55,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { // Run the native code for the InternalArray function called as a normal // function. - __ LoadRoot(x2, Heap::kUndefinedValueRootIndex); __ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl), RelocInfo::CODE_TARGET); } @@ -129,7 +128,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { __ Claim(slot_count); // Preserve the incoming parameters on the stack. - __ LoadRoot(x10, Heap::kTheHoleValueRootIndex); + __ LoadRoot(x10, RootIndex::kTheHoleValue); // Compute a pointer to the slot immediately above the location on the // stack to which arguments will be later copied. @@ -249,7 +248,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Else: use TheHoleValue as receiver for constructor call __ Bind(¬_create_implicit_receiver); - __ LoadRoot(x0, Heap::kTheHoleValueRootIndex); + __ LoadRoot(x0, RootIndex::kTheHoleValue); // ----------- S t a t e ------------- // -- x0: receiver @@ -342,7 +341,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { Label use_receiver, do_throw, leave_frame; // If the result is undefined, we jump out to using the implicit receiver. - __ CompareRoot(x0, Heap::kUndefinedValueRootIndex); + __ CompareRoot(x0, RootIndex::kUndefinedValue); __ B(eq, &use_receiver); // Otherwise we do a smi check and fall through to check if the return value @@ -364,7 +363,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // on-stack receiver as the result. __ Bind(&use_receiver); __ Peek(x0, 0 * kPointerSize); - __ CompareRoot(x0, Heap::kTheHoleValueRootIndex); + __ CompareRoot(x0, RootIndex::kTheHoleValue); __ B(eq, &do_throw); __ Bind(&leave_frame); @@ -425,7 +424,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack limit". Label stack_overflow; - __ CompareRoot(sp, Heap::kRealStackLimitRootIndex); + __ CompareRoot(sp, RootIndex::kRealStackLimit); __ B(lo, &stack_overflow); // Get number of arguments for generator function. @@ -508,7 +507,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { { FrameScope scope(masm, StackFrame::INTERNAL); // Push hole as receiver since we do not use it for stepping. - __ LoadRoot(x5, Heap::kTheHoleValueRootIndex); + __ LoadRoot(x5, RootIndex::kTheHoleValue); __ Push(x1, padreg, x4, x5); __ CallRuntime(Runtime::kDebugOnFunctionCall); __ Pop(padreg, x1); @@ -543,7 +542,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, // We are not trying to catch interruptions (e.g. debug break and // preemption) here, so the "real stack limit" is checked. Label enough_stack_space; - __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex); + __ LoadRoot(scratch, RootIndex::kRealStackLimit); // Make scratch the space we have left. The stack might already be overflowed // here which will cause scratch to become negative. __ Sub(scratch, sp, scratch); @@ -639,7 +638,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // Initialize all JavaScript callee-saved registers, since they will be seen // by the garbage collector as part of handlers. // The original values have been saved in JSEntryStub::GenerateBody(). - __ LoadRoot(x19, Heap::kUndefinedValueRootIndex); + __ LoadRoot(x19, RootIndex::kUndefinedValue); __ Mov(x20, x19); __ Mov(x21, x19); __ Mov(x22, x19); @@ -957,7 +956,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Do a stack check to ensure we don't go over the limit. Label ok; __ Sub(x10, sp, Operand(x11)); - __ CompareRoot(x10, Heap::kRealStackLimitRootIndex); + __ CompareRoot(x10, RootIndex::kRealStackLimit); __ B(hs, &ok); __ CallRuntime(Runtime::kThrowStackOverflow); __ Bind(&ok); @@ -966,7 +965,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Note: there should always be at least one stack slot for the return // register in the register file. Label loop_header; - __ LoadRoot(x10, Heap::kUndefinedValueRootIndex); + __ LoadRoot(x10, RootIndex::kUndefinedValue); __ Lsr(x11, x11, kPointerSizeLog2); // Round up the number of registers to a multiple of 2, to align the stack // to 16 bytes. @@ -988,7 +987,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Bind(&no_incoming_new_target_or_generator_register); // Load accumulator with undefined. - __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); // Load the dispatch table into a register and dispatch to the bytecode // handler at the current bytecode offset. @@ -1081,7 +1080,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm, if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { // Store "undefined" as the receiver arg if we need to. Register receiver = x14; - __ LoadRoot(receiver, Heap::kUndefinedValueRootIndex); + __ LoadRoot(receiver, RootIndex::kUndefinedValue); __ SlotAddress(stack_addr, num_args); __ Str(receiver, MemOperand(stack_addr)); __ Mov(slots_to_copy, num_args); @@ -1300,7 +1299,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { Register scratch1 = x12; Register scratch2 = x13; Register scratch3 = x14; - __ LoadRoot(undef, Heap::kUndefinedValueRootIndex); + __ LoadRoot(undef, RootIndex::kUndefinedValue); Label at_least_one_arg; Label three_args; @@ -1452,15 +1451,10 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { __ Ret(); } -static void Generate_OnStackReplacementHelper(MacroAssembler* masm, - bool has_handler_frame) { +void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { // Lookup the function in the JavaScript frame. - if (has_handler_frame) { - __ Ldr(x0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ Ldr(x0, MemOperand(x0, JavaScriptFrameConstants::kFunctionOffset)); - } else { - __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - } + __ Ldr(x0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ Ldr(x0, MemOperand(x0, JavaScriptFrameConstants::kFunctionOffset)); { FrameScope scope(masm, StackFrame::INTERNAL); @@ -1476,11 +1470,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm, __ Bind(&skip); - // Drop any potential handler frame that is be sitting on top of the actual + // Drop the handler frame that is be sitting on top of the actual // JavaScript frame. This is the case then OSR is triggered from bytecode. - if (has_handler_frame) { - __ LeaveFrame(StackFrame::STUB); - } + __ LeaveFrame(StackFrame::STUB); // Load deoptimization data from the code object. // = [#deoptimization_data_offset] @@ -1501,14 +1493,6 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm, __ Ret(); } -void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { - Generate_OnStackReplacementHelper(masm, false); -} - -void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - Generate_OnStackReplacementHelper(masm, true); -} - // static void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // ----------- S t a t e ------------- @@ -1526,8 +1510,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { Register undefined_value = x3; Register null_value = x4; - __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex); - __ LoadRoot(null_value, Heap::kNullValueRootIndex); + __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); + __ LoadRoot(null_value, RootIndex::kNullValue); // 1. Load receiver into x1, argArray into x2 (if present), remove all // arguments from the stack (including the receiver), and push thisArg (if @@ -1609,7 +1593,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { Label non_zero; Register scratch = x10; __ Cbnz(argc, &non_zero); - __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); + __ LoadRoot(scratch, RootIndex::kUndefinedValue); // Overwrite receiver with undefined, which will be the new receiver. // We do not need to overwrite the padding slot above it with anything. __ Poke(scratch, 0); @@ -1666,7 +1650,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { Register this_argument = x4; Register undefined_value = x3; - __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex); + __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); // 1. Load target into x1 (if present), argumentsList into x2 (if present), // remove all arguments from the stack (including the receiver), and push @@ -1743,7 +1727,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { Register new_target = x3; Register undefined_value = x4; - __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex); + __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); // 1. Load target into x1 (if present), argumentsList into x2 (if present), // new.target into x3 (if present, otherwise use target), remove all @@ -1933,21 +1917,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Register argc = x0; Register len = x4; - // Check for stack overflow. - { - // Check the stack for overflow. We are not trying to catch interruptions - // (i.e. debug break and preemption) here, so check the "real stack limit". - Label done; - __ LoadRoot(x10, Heap::kRealStackLimitRootIndex); - // Make x10 the space we have left. The stack might already be overflowed - // here which will cause x10 to become negative. - __ Sub(x10, sp, x10); - // Check if the arguments will overflow the stack. - __ Cmp(x10, Operand(len, LSL, kPointerSizeLog2)); - __ B(gt, &done); // Signed comparison. - __ TailCallRuntime(Runtime::kThrowStackOverflow); - __ Bind(&done); - } + Label stack_overflow; + Generate_StackOverflowCheck(masm, len, &stack_overflow); // Skip argument setup if we don't need to push any varargs. Label done; @@ -1963,8 +1934,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Register undefined_value = x12; Register scratch = x13; __ Add(src, arguments_list, FixedArray::kHeaderSize - kHeapObjectTag); - __ LoadRoot(the_hole_value, Heap::kTheHoleValueRootIndex); - __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex); + __ LoadRoot(the_hole_value, RootIndex::kTheHoleValue); + __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); // We do not use the CompareRoot macro as it would do a LoadRoot behind the // scenes and we want to avoid that in a loop. // TODO(all): Consider using Ldp and Stp. @@ -1980,6 +1951,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Tail-call to the actual Call or Construct builtin. __ Jump(code, RelocInfo::CODE_TARGET); + + __ bind(&stack_overflow); + __ TailCallRuntime(Runtime::kThrowStackOverflow); } // static @@ -2121,9 +2095,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ B(hs, &done_convert); if (mode != ConvertReceiverMode::kNotNullOrUndefined) { Label convert_global_proxy; - __ JumpIfRoot(x3, Heap::kUndefinedValueRootIndex, - &convert_global_proxy); - __ JumpIfNotRoot(x3, Heap::kNullValueRootIndex, &convert_to_object); + __ JumpIfRoot(x3, RootIndex::kUndefinedValue, &convert_global_proxy); + __ JumpIfNotRoot(x3, RootIndex::kNullValue, &convert_to_object); __ Bind(&convert_global_proxy); { // Patch receiver to global proxy. @@ -2211,13 +2184,13 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // (i.e. debug break and preemption) here, so check the "real stack // limit". Label done; - __ LoadRoot(x10, Heap::kRealStackLimitRootIndex); + __ LoadRoot(x10, RootIndex::kRealStackLimit); // Make x10 the space we have left. The stack might already be overflowed // here which will cause x10 to become negative. __ Sub(x10, sp, x10); // Check if the arguments will overflow the stack. __ Cmp(x10, Operand(bound_argc, LSL, kPointerSizeLog2)); - __ B(gt, &done); // Signed comparison. + __ B(hs, &done); __ TailCallRuntime(Runtime::kThrowStackOverflow); __ Bind(&done); } @@ -2379,7 +2352,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // Calling convention for function specific ConstructStubs require // x2 to contain either an AllocationSite or undefined. - __ LoadRoot(x2, Heap::kUndefinedValueRootIndex); + __ LoadRoot(x2, RootIndex::kUndefinedValue); Label call_generic_stub; @@ -2586,7 +2559,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // Fill the remaining expected arguments with undefined. __ RecordComment("-- Fill slots with undefined --"); __ Sub(copy_end, copy_to, Operand(scratch1, LSL, kPointerSizeLog2)); - __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex); + __ LoadRoot(scratch1, RootIndex::kUndefinedValue); Label fill; __ Bind(&fill); @@ -2856,7 +2829,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Check result for exception sentinel. Label exception_returned; - __ CompareRoot(result, Heap::kExceptionRootIndex); + __ CompareRoot(result, RootIndex::kException); __ B(eq, &exception_returned); // The call succeeded, so unwind the stack and return. @@ -3131,6 +3104,9 @@ void GenerateInternalArrayConstructorCase(MacroAssembler* masm, __ Bind(&n_case); // N arguments. + // Load undefined into the allocation site parameter as required by + // ArrayNArgumentsConstructor. + __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue); Handle code = BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor); __ Jump(code, RelocInfo::CODE_TARGET); } diff --git a/deps/v8/src/builtins/array-copywithin.tq b/deps/v8/src/builtins/array-copywithin.tq index 8406123b202834..6b9ba934a48b07 100644 --- a/deps/v8/src/builtins/array-copywithin.tq +++ b/deps/v8/src/builtins/array-copywithin.tq @@ -4,7 +4,7 @@ module array { macro ConvertToRelativeIndex(index: Number, length: Number): Number { - return index < 0 ? max(index + length, 0) : min(index, length); + return index < 0 ? Max(index + length, 0) : Min(index, length); } // https://tc39.github.io/ecma262/#sec-array.prototype.copyWithin @@ -17,32 +17,32 @@ module array { const length: Number = GetLengthProperty(context, object); // 3. Let relativeTarget be ? ToInteger(target). - const relative_target: Number = ToInteger_Inline(context, arguments[0]); + const relativeTarget: Number = ToInteger_Inline(context, arguments[0]); // 4. If relativeTarget < 0, let to be max((len + relativeTarget), 0); // else let to be min(relativeTarget, len). - let to: Number = ConvertToRelativeIndex(relative_target, length); + let to: Number = ConvertToRelativeIndex(relativeTarget, length); // 5. Let relativeStart be ? ToInteger(start). - const relative_start: Number = ToInteger_Inline(context, arguments[1]); + const relativeStart: Number = ToInteger_Inline(context, arguments[1]); // 6. If relativeStart < 0, let from be max((len + relativeStart), 0); // else let from be min(relativeStart, len). - let from: Number = ConvertToRelativeIndex(relative_start, length); + let from: Number = ConvertToRelativeIndex(relativeStart, length); // 7. If end is undefined, let relativeEnd be len; // else let relativeEnd be ? ToInteger(end). - let relative_end: Number = length; + let relativeEnd: Number = length; if (arguments[2] != Undefined) { - relative_end = ToInteger_Inline(context, arguments[2]); + relativeEnd = ToInteger_Inline(context, arguments[2]); } // 8. If relativeEnd < 0, let final be max((len + relativeEnd), 0); // else let final be min(relativeEnd, len). - const final: Number = ConvertToRelativeIndex(relative_end, length); + const final: Number = ConvertToRelativeIndex(relativeEnd, length); // 9. Let count be min(final-from, len-to). - let count: Number = min(final - from, length - to); + let count: Number = Min(final - from, length - to); // 10. If from). Call(context, callbackfn, thisArg, kValue, k, o); @@ -32,10 +33,10 @@ module array { javascript builtin ArrayForEachLoopEagerDeoptContinuation( context: Context, receiver: Object, callback: Object, thisArg: Object, initialK: Object, length: Object): Object { - // The unsafe cast is safe because all continuation points in forEach are + // The unsafe Cast is safe because all continuation points in forEach are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. - const jsreceiver: JSReceiver = unsafe_cast(receiver); + const jsreceiver: JSReceiver = UnsafeCast(receiver); return ArrayForEachLoopContinuation( context, jsreceiver, callback, thisArg, Undefined, jsreceiver, initialK, length, Undefined); @@ -44,10 +45,10 @@ module array { javascript builtin ArrayForEachLoopLazyDeoptContinuation( context: Context, receiver: Object, callback: Object, thisArg: Object, initialK: Object, length: Object, result: Object): Object { - // The unsafe cast is safe because all continuation points in forEach are + // The unsafe Cast is safe because all continuation points in forEach are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. - const jsreceiver: JSReceiver = unsafe_cast(receiver); + const jsreceiver: JSReceiver = UnsafeCast(receiver); return ArrayForEachLoopContinuation( context, jsreceiver, callback, thisArg, Undefined, jsreceiver, initialK, length, Undefined); @@ -59,14 +60,14 @@ module array { to: Object): Object { try { const callbackfn: Callable = - cast(callback) otherwise Unexpected; - const k: Smi = cast(initialK) otherwise Unexpected; - const number_length: Number = cast(length) otherwise Unexpected; + Cast(callback) otherwise Unexpected; + const k: Number = Cast(initialK) otherwise Unexpected; + const numberLength: Number = Cast(length) otherwise Unexpected; return ArrayForEachTorqueContinuation( - context, receiver, number_length, callbackfn, thisArg, k); + context, receiver, numberLength, callbackfn, thisArg, k); } - label Unexpected { + label Unexpected deferred { unreachable; } } @@ -100,7 +101,7 @@ module array { } } } - label Slow { + label Slow deferred { goto Bailout(k); } } @@ -111,8 +112,8 @@ module array { Bailout(Smi) { let k: Smi = 0; try { - const smi_len: Smi = cast(len) otherwise Slow; - const a: JSArray = cast(o) otherwise Slow; + const smiLen: Smi = Cast(len) otherwise Slow; + const a: JSArray = Cast(o) otherwise Slow; const map: Map = a.map; if (!IsPrototypeInitialArrayPrototype(context, map)) goto Slow; @@ -121,14 +122,14 @@ module array { if (IsElementsKindGreaterThan(elementsKind, HOLEY_ELEMENTS)) { VisitAllElements( - context, a, smi_len, callbackfn, thisArg) + context, a, smiLen, callbackfn, thisArg) otherwise Bailout; } else { - VisitAllElements(context, a, smi_len, callbackfn, thisArg) + VisitAllElements(context, a, smiLen, callbackfn, thisArg) otherwise Bailout; } } - label Slow { + label Slow deferred { goto Bailout(k); } return Undefined; @@ -153,28 +154,28 @@ module array { goto TypeError; } const callbackfn: Callable = - cast(arguments[0]) otherwise TypeError; + Cast(arguments[0]) otherwise TypeError; // 4. If thisArg is present, let T be thisArg; else let T be undefined. const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined; // Special cases. - let k: Smi = 0; + let k: Number = 0; try { return FastArrayForEach(context, o, len, callbackfn, thisArg) otherwise Bailout; } - label Bailout(k_value: Smi) { - k = k_value; + label Bailout(kValue: Smi) deferred { + k = kValue; } return ArrayForEachTorqueContinuation( context, o, len, callbackfn, thisArg, k); } - label TypeError { + label TypeError deferred { ThrowTypeError(context, kCalledNonCallable, arguments[0]); } - label NullOrUndefinedError { + label NullOrUndefinedError deferred { ThrowTypeError( context, kCalledOnNullOrUndefined, 'Array.prototype.forEach'); } diff --git a/deps/v8/src/builtins/array-lastindexof.tq b/deps/v8/src/builtins/array-lastindexof.tq new file mode 100644 index 00000000000000..a1d3e2a571f552 --- /dev/null +++ b/deps/v8/src/builtins/array-lastindexof.tq @@ -0,0 +1,151 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +module array { + macro LoadWithHoleCheck( + elements: FixedArrayBase, index: Smi): Object + labels IfHole; + + LoadWithHoleCheck(elements: FixedArrayBase, index: Smi): Object + labels IfHole { + const elements: FixedArray = UnsafeCast(elements); + const element: Object = elements[index]; + if (element == Hole) goto IfHole; + return element; + } + + LoadWithHoleCheck( + elements: FixedArrayBase, index: Smi): Object + labels IfHole { + const elements: FixedDoubleArray = UnsafeCast(elements); + const element: float64 = LoadDoubleWithHoleCheck(elements, index) + otherwise IfHole; + return AllocateHeapNumberWithValue(element); + } + + macro FastArrayLastIndexOf( + context: Context, array: JSArray, length: Smi, from: Smi, + searchElement: Object): Smi { + const elements: FixedArrayBase = array.elements; + let k: Smi = from; + while (k >= 0) { + try { + const element: Object = LoadWithHoleCheck(elements, k) + otherwise Hole; + + const same: Boolean = StrictEqual(searchElement, element); + if (same == True) { + assert(IsFastJSArray(array, context)); + return k; + } + } + label Hole {} // Do nothing for holes. + + --k; + } + + assert(IsFastJSArray(array, context)); + return -1; + } + + macro GetFromIndex( + context: Context, length: Number, + arguments: constexpr Arguments): Number { + // 4. If fromIndex is present, let n be ? ToInteger(fromIndex); + // else let n be len - 1. + const n: Number = arguments.length < 2 ? + length - 1 : + ToInteger_Inline(context, arguments[1], kTruncateMinusZero); + + // 5. If n >= 0, then. + let k: Number = SmiConstant(0); + if (n >= 0) { + // a. If n is -0, let k be +0; else let k be min(n, len - 1). + // If n was -0 it got truncated to 0.0, so taking the minimum is fine. + k = Min(n, length - 1); + } else { + // a. Let k be len + n. + k = length + n; + } + return k; + } + + macro TryFastArrayLastIndexOf( + context: Context, receiver: JSReceiver, searchElement: Object, + from: Number): Object + labels Slow { + EnsureFastJSArray(context, receiver) otherwise Slow; + const array: JSArray = UnsafeCast(receiver); + + const length: Smi = array.length_fast; + if (length == 0) return SmiConstant(-1); + + const fromSmi: Smi = Cast(from) otherwise Slow; + const kind: ElementsKind = array.map.elements_kind; + if (IsFastSmiOrTaggedElementsKind(kind)) { + return FastArrayLastIndexOf( + context, array, length, fromSmi, searchElement); + } + assert(IsDoubleElementsKind(kind)); + return FastArrayLastIndexOf( + context, array, length, fromSmi, searchElement); + } + + macro GenericArrayLastIndexOf( + context: Context, object: JSReceiver, searchElement: Object, + from: Number): Object { + let k: Number = from; + + // 7. Repeat, while k >= 0. + while (k >= 0) { + // a. Let kPresent be ? HasProperty(O, ! ToString(k)). + const kPresent: Boolean = HasProperty(context, object, k); + + // b. If kPresent is true, then. + if (kPresent == True) { + // i. Let elementK be ? Get(O, ! ToString(k)). + const element: Object = GetProperty(context, object, k); + + // ii. Let same be the result of performing Strict Equality Comparison + // searchElement === elementK. + const same: Boolean = StrictEqual(searchElement, element); + + // iii. If same is true, return k. + if (same == True) return k; + } + + // c. Decrease k by 1. + --k; + } + + // 8. Return -1. + return SmiConstant(-1); + } + + // https://tc39.github.io/ecma262/#sec-array.prototype.lastIndexOf + javascript builtin ArrayPrototypeLastIndexOf( + context: Context, receiver: Object, ...arguments): Object { + // 1. Let O be ? ToObject(this value). + const object: JSReceiver = ToObject_Inline(context, receiver); + + // 2. Let len be ? ToLength(? Get(O, "length")). + const length: Number = GetLengthProperty(context, object); + + // 3. If len is 0, return -1. + if (length == SmiConstant(0)) return SmiConstant(-1); + + // Step 4 - 6. + const from: Number = GetFromIndex(context, length, arguments); + + const searchElement: Object = arguments[0]; + + try { + return TryFastArrayLastIndexOf(context, object, searchElement, from) + otherwise Baseline; + } + label Baseline { + return GenericArrayLastIndexOf(context, object, searchElement, from); + } + } +} diff --git a/deps/v8/src/builtins/array-reverse.tq b/deps/v8/src/builtins/array-reverse.tq index 8db542ddefb7c4..72131b7e448f68 100644 --- a/deps/v8/src/builtins/array-reverse.tq +++ b/deps/v8/src/builtins/array-reverse.tq @@ -8,20 +8,20 @@ module array { LoadElement( elements: FixedArrayBase, index: Smi): Smi { - const elems: FixedArray = unsafe_cast(elements); - return unsafe_cast(elems[index]); + const elems: FixedArray = UnsafeCast(elements); + return UnsafeCast(elems[index]); } LoadElement( elements: FixedArrayBase, index: Smi): Object { - const elems: FixedArray = unsafe_cast(elements); + const elems: FixedArray = UnsafeCast(elements); return elems[index]; } LoadElement( elements: FixedArrayBase, index: Smi): float64 { try { - const elems: FixedDoubleArray = unsafe_cast(elements); + const elems: FixedDoubleArray = UnsafeCast(elements); return LoadDoubleWithHoleCheck(elems, index) otherwise Hole; } label Hole { @@ -36,19 +36,19 @@ module array { StoreElement( elements: FixedArrayBase, index: Smi, value: Smi) { - const elems: FixedArray = unsafe_cast(elements); + const elems: FixedArray = UnsafeCast(elements); StoreFixedArrayElementSmi(elems, index, value, SKIP_WRITE_BARRIER); } StoreElement( elements: FixedArrayBase, index: Smi, value: Object) { - const elems: FixedArray = unsafe_cast(elements); + const elems: FixedArray = UnsafeCast(elements); elems[index] = value; } StoreElement( elements: FixedArrayBase, index: Smi, value: float64) { - const elems: FixedDoubleArray = unsafe_cast(elements); + const elems: FixedDoubleArray = UnsafeCast(elements); assert(value == Float64SilenceNaN(value)); StoreFixedDoubleArrayElementWithSmiIndex(elems, index, value); @@ -63,10 +63,10 @@ module array { let upper: Smi = length - 1; while (lower < upper) { - const lower_value: T = LoadElement(elements, lower); - const upper_value: T = LoadElement(elements, upper); - StoreElement(elements, lower, upper_value); - StoreElement(elements, upper, lower_value); + const lowerValue: T = LoadElement(elements, lower); + const upperValue: T = LoadElement(elements, upper); + StoreElement(elements, lower, upperValue); + StoreElement(elements, upper, lowerValue); ++lower; --upper; } @@ -90,48 +90,48 @@ module array { let upper: Number = length - 1; while (lower < upper) { - let lower_value: Object = Undefined; - let upper_value: Object = Undefined; + let lowerValue: Object = Undefined; + let upperValue: Object = Undefined; // b. Let upperP be ! ToString(upper). // c. Let lowerP be ! ToString(lower). // d. Let lowerExists be ? HasProperty(O, lowerP). - const lower_exists: Boolean = HasProperty(context, object, lower); + const lowerExists: Boolean = HasProperty(context, object, lower); // e. If lowerExists is true, then. - if (lower_exists == True) { + if (lowerExists == True) { // i. Let lowerValue be ? Get(O, lowerP). - lower_value = GetProperty(context, object, lower); + lowerValue = GetProperty(context, object, lower); } // f. Let upperExists be ? HasProperty(O, upperP). - const upper_exists: Boolean = HasProperty(context, object, upper); + const upperExists: Boolean = HasProperty(context, object, upper); // g. If upperExists is true, then. - if (upper_exists == True) { + if (upperExists == True) { // i. Let upperValue be ? Get(O, upperP). - upper_value = GetProperty(context, object, upper); + upperValue = GetProperty(context, object, upper); } // h. If lowerExists is true and upperExists is true, then - if (lower_exists == True && upper_exists == True) { + if (lowerExists == True && upperExists == True) { // i. Perform ? Set(O, lowerP, upperValue, true). - SetProperty(context, object, lower, upper_value); + SetProperty(context, object, lower, upperValue); // ii. Perform ? Set(O, upperP, lowerValue, true). - SetProperty(context, object, upper, lower_value); - } else if (lower_exists == False && upper_exists == True) { + SetProperty(context, object, upper, lowerValue); + } else if (lowerExists == False && upperExists == True) { // i. Perform ? Set(O, lowerP, upperValue, true). - SetProperty(context, object, lower, upper_value); + SetProperty(context, object, lower, upperValue); // ii. Perform ? DeletePropertyOrThrow(O, upperP). DeleteProperty(context, object, upper, kStrict); - } else if (lower_exists == True && upper_exists == False) { + } else if (lowerExists == True && upperExists == False) { // i. Perform ? DeletePropertyOrThrow(O, lowerP). DeleteProperty(context, object, lower, kStrict); // ii. Perform ? Set(O, upperP, lowerValue, true). - SetProperty(context, object, upper, lower_value); + SetProperty(context, object, upper, lowerValue); } // l. Increase lower by 1. @@ -143,29 +143,16 @@ module array { return object; } - macro EnsureWriteableFastElements(array: JSArray) { - const elements: FixedArrayBase = array.elements; - if (elements.map != kCOWMap) return; - - // There are no COW *_DOUBLE_ELEMENTS arrays, so we are allowed to always - // extract FixedArrays and don't have to worry about FixedDoubleArrays. - assert(IsFastSmiOrTaggedElementsKind(array.map.elements_kind)); - - const length: Smi = array.length_fast; - array.elements = ExtractFixedArray( - unsafe_cast(elements), 0, length, length, kFixedArrays); - } - macro TryFastPackedArrayReverse(receiver: Object) labels Slow { - const array: JSArray = cast(receiver) otherwise Slow; - EnsureWriteableFastElements(array); - assert(array.elements.map != kCOWMap); + const array: JSArray = Cast(receiver) otherwise Slow; const kind: ElementsKind = array.map.elements_kind; if (kind == PACKED_SMI_ELEMENTS) { + EnsureWriteableFastElements(array); FastPackedArrayReverse( array.elements, array.length_fast); } else if (kind == PACKED_ELEMENTS) { + EnsureWriteableFastElements(array); FastPackedArrayReverse( array.elements, array.length_fast); } else if (kind == PACKED_DOUBLE_ELEMENTS) { diff --git a/deps/v8/src/builtins/array-splice.tq b/deps/v8/src/builtins/array-splice.tq new file mode 100644 index 00000000000000..82bd48375f102c --- /dev/null +++ b/deps/v8/src/builtins/array-splice.tq @@ -0,0 +1,394 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +module array { + // Given {elements}, we want to create a non-zero length array of type + // FixedArrayType. Most of this behavior is outsourced to ExtractFixedArray(), + // but the special case of wanting to have a FixedDoubleArray when given a + // zero-length input FixedArray is handled here. + macro Extract( + elements: FixedArrayBase, first: Smi, count: Smi, + capacity: Smi): FixedArrayType { + return UnsafeCast( + ExtractFixedArray(elements, first, count, capacity)); + } + + Extract( + elements: FixedArrayBase, first: Smi, count: Smi, + capacity: Smi): FixedDoubleArray { + if (elements == kEmptyFixedArray) { + return AllocateZeroedFixedDoubleArray(Convert(capacity)); + } + return UnsafeCast( + ExtractFixedArray(elements, first, count, capacity)); + } + + macro FastSplice( + args: constexpr Arguments, a: JSArray, length: Smi, newLength: Smi, + lengthDelta: Smi, actualStart: Smi, insertCount: Smi, + actualDeleteCount: Smi): void labels Bailout { + const elements: FixedArrayBase = a.elements; + const elementsMap: Map = elements.map; + + // If the spliced array is larger then the + // source array, then allocate a new FixedArrayType to hold the result. + let newElements: FixedArrayBase = elements; + if (elementsMap == kCOWMap || lengthDelta > 0) { + newElements = + Extract(elements, 0, actualStart, newLength); + if (elementsMap == kCOWMap) { + newElements.map = elementsMap; + } + a.elements = newElements; + } + + // Copy over inserted elements. + let k: Smi = actualStart; + if (insertCount > 0) { + const typedNewElements: FixedArrayType = + UnsafeCast(newElements); + for (let e: Object of args [2: ]) { + // The argument elements were already validated to be an appropriate + // {ElementType} to store in {FixedArrayType}. + typedNewElements[k++] = UnsafeCast(e); + } + } + + // Copy over elements after deleted elements. + let count: Smi = length - actualStart - actualDeleteCount; + while (count > 0) { + const typedElements: FixedArrayType = + UnsafeCast(elements); + const typedNewElements: FixedArrayType = + UnsafeCast(newElements); + CopyArrayElement(typedElements, typedNewElements, k - lengthDelta, k); + k++; + count--; + } + + // Fill rest of spliced FixedArray with the hole, but only if the + // destination FixedArray is the original array's, since otherwise the array + // is pre-filled with holes. + if (elements == newElements) { + const typedNewElements: FixedArrayType = + UnsafeCast(newElements); + const limit: Smi = elements.length; + while (k < limit) { + StoreArrayHole(typedNewElements, k); + k++; + } + } + + // Update the array's length after all the FixedArray shuffling is done. + a.length = newLength; + } + + macro FastArraySplice( + context: Context, args: constexpr Arguments, o: JSReceiver, + originalLengthNumber: Number, actualStartNumber: Number, insertCount: Smi, + actualDeleteCountNumber: Number): Object + labels Bailout { + const originalLength: Smi = + Cast(originalLengthNumber) otherwise Bailout; + const actualStart: Smi = Cast(actualStartNumber) otherwise Bailout; + const actualDeleteCount: Smi = + Cast(actualDeleteCountNumber) otherwise Bailout; + const lengthDelta: Smi = insertCount - actualDeleteCount; + const newLength: Smi = originalLength + lengthDelta; + + const a: JSArray = Cast(o) otherwise Bailout; + + const map: Map = a.map; + if (!IsPrototypeInitialArrayPrototype(context, map)) goto Bailout; + if (IsNoElementsProtectorCellInvalid()) goto Bailout; + if (IsArraySpeciesProtectorCellInvalid()) goto Bailout; + + // Fast path only works on fast elements kind and with writable length. + let elementsKind: ElementsKind = EnsureArrayPushable(map) otherwise Bailout; + if (!IsFastElementsKind(elementsKind)) goto Bailout; + + const oldElementsKind: ElementsKind = elementsKind; + for (let e: Object of args [2: ]) { + if (IsFastSmiElementsKind(elementsKind)) { + if (TaggedIsNotSmi(e)) { + const heapObject: HeapObject = UnsafeCast(e); + elementsKind = IsHeapNumber(heapObject) ? + AllowDoubleElements(elementsKind) : + AllowNonNumberElements(elementsKind); + } + } else if (IsDoubleElementsKind(elementsKind)) { + if (!IsNumber(e)) { + elementsKind = AllowNonNumberElements(elementsKind); + } + } + } + + if (elementsKind != oldElementsKind) { + const smiElementsKind: Smi = Convert(Convert(elementsKind)); + TransitionElementsKindWithKind(context, a, smiElementsKind); + } + + // Make sure that the length hasn't been changed by side-effect. + const length: Smi = Cast(a.length) otherwise Bailout; + if (originalLength != length) goto Bailout; + + const deletedResult: JSArray = + ExtractFastJSArray(context, a, actualStart, actualDeleteCount); + + if (newLength == 0) { + a.elements = kEmptyFixedArray; + a.length = 0; + return deletedResult; + } + + if (IsFastSmiOrTaggedElementsKind(elementsKind)) { + FastSplice( + args, a, length, newLength, lengthDelta, actualStart, insertCount, + actualDeleteCount) otherwise Bailout; + } else { + FastSplice( + args, a, length, newLength, lengthDelta, actualStart, insertCount, + actualDeleteCount) otherwise Bailout; + } + + return deletedResult; + } + + macro FillDeletedElementsArray( + context: Context, o: JSReceiver, actualStart: Number, + actualDeleteCount: Number, a: JSReceiver): Object { + // 10. Let k be 0. + let k: Number = 0; + + // 11. Repeat, while k < actualDeleteCount + while (k < actualDeleteCount) { + // a. Let from be ! ToString(actualStart + k). + const from: Number = actualStart + k; + + // b. Let fromPresent be ? HasProperty(O, from). + const fromPresent: Boolean = HasProperty(context, o, from); + + // c. If fromPresent is true, then + if (fromPresent == True) { + // i. Let fromValue be ? Get(O, from). + const fromValue: Object = GetProperty(context, o, from); + + // ii. Perform ? CreateDataPropertyOrThrow(A, ! ToString(k), fromValue). + CreateDataProperty(context, a, k, fromValue); + } + + // d. Increment k by 1. + k++; + } + // 12. Perform ? Set(A, "length", actualDeleteCount, true). + SetProperty(context, a, kLengthString, actualDeleteCount); + return a; + } + + // HandleForwardCase implements step 15. "If itemCount < actualDeleteCount, + // then..."" + macro HandleForwardCase( + context: Context, o: JSReceiver, len: Number, itemCount: Number, + actualStart: Number, actualDeleteCount: Number): void { + // 15. If itemCount < actualDeleteCount, then + // a. Let k be actualStart. + let k: Number = actualStart; + + // b. Repeat, while k < (len - actualDeleteCount) + while (k < (len - actualDeleteCount)) { + // i. Let from be ! ToString(k + actualDeleteCount). + const from: Number = k + actualDeleteCount; + // ii. Let to be ! ToString(k + itemCount). + const to: Number = k + itemCount; + + // iii. Let fromPresent be ? HasProperty(O, from). + const fromPresent: Boolean = HasProperty(context, o, from); + + // iv. If fromPresent is true, then + if (fromPresent == True) { + // 1. Let fromValue be ? Get(O, from). + const fromValue: Object = GetProperty(context, o, from); + + // 2. Perform ? Set(O, to, fromValue, true). + SetProperty(context, o, to, fromValue); + + // v. Else fromPresent is false, + } else { + // 1. Perform ? DeletePropertyOrThrow(O, to). + DeleteProperty(context, o, to, kStrict); + } + // vi. Increase k by 1. + k++; + } + + // c. Let k be len. + k = len; + + // d. Repeat, while k > (len - actualDeleteCount + itemCount) + while (k > (len - actualDeleteCount + itemCount)) { + // i. Perform ? DeletePropertyOrThrow(O, ! ToString(k - 1)). + DeleteProperty(context, o, k - 1, kStrict); + // ii. Decrease k by 1. + k--; + } + } + + // HandleBackwardCase implements step 16. "Else if itemCount > + // actualDeleteCount, then..." + macro HandleBackwardCase( + context: Context, o: JSReceiver, len: Number, itemCount: Number, + actualStart: Number, actualDeleteCount: Number): void { + // 16. Else if itemCount > actualDeleteCount, then + // a. Let k be (len - actualDeleteCount). + let k: Number = len - actualDeleteCount; + + // b. Repeat, while k > actualStart + while (k > actualStart) { + // i. Let from be ! ToString(k + actualDeleteCount - 1). + const from: Number = k + actualDeleteCount - 1; + + // ii. Let to be ! ToString(k + itemCount - 1). + const to: Number = k + itemCount - 1; + + // iii. Let fromPresent be ? HasProperty(O, from). + const fromPresent: Boolean = HasProperty(context, o, from); + + // iv. If fromPresent is true, then + if (fromPresent == True) { + // 1. Let fromValue be ? Get(O, from). + const fromValue: Object = GetProperty(context, o, from); + + // 2. Perform ? Set(O, to, fromValue, true). + SetProperty(context, o, to, fromValue); + + // v. Else fromPresent is false, + } else { + // 1. Perform ? DeletePropertyOrThrow(O, to). + DeleteProperty(context, o, to, kStrict); + } + + // vi. Decrease k by 1. + k--; + } + } + + macro SlowSplice( + context: Context, arguments: constexpr Arguments, o: JSReceiver, + len: Number, actualStart: Number, insertCount: Smi, + actualDeleteCount: Number): Object { + const affected: Number = len - actualStart - actualDeleteCount; + + // 9. Let A be ? ArraySpeciesCreate(O, actualDeleteCount). + const a: JSReceiver = ArraySpeciesCreate(context, o, actualDeleteCount); + const itemCount: Number = insertCount; + + // Steps 9 through 12: creating the array of deleted elements. + FillDeletedElementsArray(context, o, actualStart, actualDeleteCount, a); + + // 13. Let items be a List whose elements are, in left-to-right order, + // the portion of the actual argument list starting with the third + // argument. The list is empty if fewer than three arguments were + // passed. + // 14. Let itemCount be the Number of elements in items. + // (done above). + + // 15. If itemCount < actualDeleteCount, then + if (itemCount < actualDeleteCount) { + HandleForwardCase( + context, o, len, itemCount, actualStart, actualDeleteCount); + // 16. Else if itemCount > actualDeleteCount, then + } else if (itemCount > actualDeleteCount) { + HandleBackwardCase( + context, o, len, itemCount, actualStart, actualDeleteCount); + } + + // 17. Let k be actualStart. + let k: Number = actualStart; + + // 18. Repeat, while items is not empty + // a. Remove the first element from items and let E be the value of that + // element. + if (arguments.length > 2) { + for (let e: Object of arguments [2: ]) { + // b. Perform ? Set(O, ! ToString(k), E, true). + SetProperty(context, o, k, e); + + // c. Increase k by 1. + k = k + 1; + } + } + + // 19. Perform ? Set(O, "length", len - actualDeleteCount + itemCount, + // true). + SetProperty(context, o, kLengthString, len - actualDeleteCount + itemCount); + + return a; + } + + // https://tc39.github.io/ecma262/#sec-array.prototype.splice + javascript builtin ArraySplice( + context: Context, receiver: Object, ...arguments): Object { + // 1. Let O be ? ToObject(this value). + const o: JSReceiver = ToObject(context, receiver); + + // 2. Let len be ? ToLength(? Get(O, "length")). + const len: Number = GetLengthProperty(context, o); + + // 3. Let relativeStart be ? ToInteger(start). + const start: Object = arguments[0]; + const relativeStart: Number = ToInteger_Inline(context, start); + + // 4. If relativeStart < 0, let actualStart be max((len + relativeStart), + // 0); + // else let actualStart be min(relativeStart, len). + const actualStart: Number = relativeStart < 0 ? + Max((len + relativeStart), 0) : + Min(relativeStart, len); + + let insertCount: Smi; + let actualDeleteCount: Number; + // 5. If the Number of actual arguments is 0, then + if (arguments.length == 0) { + // a. Let insertCount be 0. + insertCount = 0; + // b. Let actualDeleteCount be 0. + actualDeleteCount = 0; + // 6. Else if the Number of actual arguments is 1, then + } else if (arguments.length == 1) { + // a. Let insertCount be 0. + insertCount = 0; + // b. Let actualDeleteCount be len - actualStart. + actualDeleteCount = len - actualStart; + // 7. Else, + } else { + // a. Let insertCount be the Number of actual arguments minus 2. + insertCount = Convert(arguments.length) - 2; + // b. Let dc be ? ToInteger(deleteCount). + const deleteCount: Object = arguments[1]; + const dc: Number = ToInteger_Inline(context, deleteCount); + // c. Let actualDeleteCount be min(max(dc, 0), len - actualStart). + actualDeleteCount = Min(Max(dc, 0), len - actualStart); + } + + // 8. If len + insertCount - actualDeleteCount > 2^53-1, throw a + // Bailout exception. + const newLength: Number = len + insertCount - actualDeleteCount; + if (newLength > kMaxSafeInteger) { + ThrowTypeError(context, kInvalidArrayLength, start); + } + + try { + return FastArraySplice( + context, arguments, o, len, actualStart, insertCount, + actualDeleteCount) otherwise Bailout; + } + label Bailout {} + + // If the fast case fails, just continue with the slow, correct, + // spec-compliant case. + return SlowSplice( + context, arguments, o, len, actualStart, insertCount, + actualDeleteCount); + } +} diff --git a/deps/v8/src/builtins/array-unshift.tq b/deps/v8/src/builtins/array-unshift.tq new file mode 100644 index 00000000000000..6803b279d48451 --- /dev/null +++ b/deps/v8/src/builtins/array-unshift.tq @@ -0,0 +1,106 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +module array { + extern builtin ArrayUnshift(Context, JSFunction, Object, int32); + + macro TryFastArrayUnshift( + context: Context, receiver: Object, arguments: constexpr Arguments): never + labels Slow { + EnsureFastJSArray(context, receiver) otherwise Slow; + const array: JSArray = UnsafeCast(receiver); + EnsureWriteableFastElements(array); + + const map: Map = array.map; + if (!IsExtensibleMap(map)) goto Slow; + EnsureArrayLengthWritable(map) otherwise Slow; + + tail ArrayUnshift( + context, LoadTargetFromFrame(), Undefined, + Convert(arguments.length)); + } + + macro GenericArrayUnshift( + context: Context, receiver: Object, + arguments: constexpr Arguments): Number { + // 1. Let O be ? ToObject(this value). + const object: JSReceiver = ToObject_Inline(context, receiver); + + // 2. Let len be ? ToLength(? Get(O, "length")). + const length: Number = GetLengthProperty(context, object); + + // 3. Let argCount be the number of actual arguments. + const argCount: Smi = Convert(arguments.length); + + // 4. If argCount > 0, then. + if (argCount > 0) { + // a. If len + argCount > 2**53 - 1, throw a TypeError exception. + if (length + argCount > kMaxSafeInteger) { + ThrowTypeError(context, kInvalidArrayLength); + } + + // b. Let k be len. + let k: Number = length; + + // c. Repeat, while k > 0. + while (k > 0) { + // i. Let from be ! ToString(k - 1). + const from: Number = k - 1; + + // ii. Let to be ! ToString(k + argCount - 1). + const to: Number = k + argCount - 1; + + // iii. Let fromPresent be ? HasProperty(O, from). + const fromPresent: Boolean = HasProperty(context, object, from); + + // iv. If fromPresent is true, then + if (fromPresent == True) { + // 1. Let fromValue be ? Get(O, from). + const fromValue: Object = GetProperty(context, object, from); + + // 2. Perform ? Set(O, to, fromValue, true). + SetProperty(context, object, to, fromValue); + } else { + // 1. Perform ? DeletePropertyOrThrow(O, to). + DeleteProperty(context, object, to, kStrict); + } + + // vi. Decrease k by 1. + --k; + } + + // d. Let j be 0. + let j: Smi = 0; + + // e. Let items be a List whose elements are, in left to right order, + // the arguments that were passed to this function invocation. + // f. Repeat, while items is not empty + while (j < argCount) { + // ii .Perform ? Set(O, ! ToString(j), E, true). + SetProperty(context, object, j, arguments[Convert(j)]); + + // iii. Increase j by 1. + ++j; + } + } + + // 5. Perform ? Set(O, "length", len + argCount, true). + const newLength: Number = length + argCount; + SetProperty(context, object, kLengthString, newLength); + + // 6. Return length + argCount. + return newLength; + } + + // https://tc39.github.io/ecma262/#sec-array.prototype.unshift + javascript builtin ArrayPrototypeUnshift( + context: Context, receiver: Object, ...arguments): Object { + try { + TryFastArrayUnshift(context, receiver, arguments) otherwise Baseline; + } + label Baseline { + return GenericArrayUnshift(context, receiver, arguments); + } + } +} diff --git a/deps/v8/src/builtins/array.tq b/deps/v8/src/builtins/array.tq index 590947dd441601..96706efc094be7 100644 --- a/deps/v8/src/builtins/array.tq +++ b/deps/v8/src/builtins/array.tq @@ -16,301 +16,64 @@ module array { macro GetLengthProperty(context: Context, o: Object): Number { if (BranchIfFastJSArray(o, context)) { - let a: JSArray = unsafe_cast(o); + const a: JSArray = UnsafeCast(o); return a.length_fast; } else deferred { - return ToLength_Inline(context, GetProperty(context, o, 'length')); + return ToLength_Inline(context, GetProperty(context, o, kLengthString)); } } - macro FastArraySplice( - context: Context, args: constexpr Arguments, o: Object, - originalLengthNumber: Number, actualStartNumber: Number, insertCount: Smi, - actualDeleteCountNumber: Number): Object - labels Bailout { - let originalLength: Smi = cast(originalLengthNumber) otherwise Bailout; - let actualStart: Smi = cast(actualStartNumber) otherwise Bailout; - let actualDeleteCount: Smi = - cast(actualDeleteCountNumber) otherwise Bailout; - let lengthDelta: Smi = insertCount - actualDeleteCount; - let newLength: Smi = originalLength + lengthDelta; + macro EnsureWriteableFastElements(array: JSArray) { + assert(IsFastElementsKind(array.map.elements_kind)); - let a: JSArray = cast(o) otherwise Bailout; + const elements: FixedArrayBase = array.elements; + if (elements.map != kCOWMap) return; - let map: Map = a.map; - if (!IsPrototypeInitialArrayPrototype(context, map)) goto Bailout; - if (IsNoElementsProtectorCellInvalid()) goto Bailout; - if (IsArraySpeciesProtectorCellInvalid()) goto Bailout; + // There are no COW *_DOUBLE_ELEMENTS arrays, so we are allowed to always + // extract FixedArrays and don't have to worry about FixedDoubleArrays. + assert(IsFastSmiOrTaggedElementsKind(array.map.elements_kind)); - // Fast path only works on fast elements kind and with writable length. - let elementsKind: ElementsKind = EnsureArrayPushable(map) otherwise Bailout; - if (!IsFastElementsKind(elementsKind)) goto Bailout; - - // For now, only support non-double fast elements - if (!IsFastSmiOrTaggedElementsKind(elementsKind)) goto Bailout; - - if (IsFastSmiElementsKind(elementsKind)) { - for (let e: Object of args [2: ]) { - if (TaggedIsNotSmi(e)) goto Bailout; - } - } - - // Make sure that the length hasn't been changed by side-effect. - let length: Smi = cast(a.length) otherwise Bailout; - if (originalLength != length) goto Bailout; - - let deletedResult: JSArray = - ExtractFastJSArray(context, a, actualStart, actualDeleteCount); - - if (newLength == 0) { - a.elements = kEmptyFixedArray; - a.length = 0; - return deletedResult; - } - - let elements: FixedArray = cast(a.elements) otherwise Bailout; - let elementsMap: Map = elements.map; - - // If the source is a COW array or the spliced array is larger then the - // source array, then allocate a new FixedArray to hold the result. - let newElements: FixedArray = elements; - if ((elementsMap == kCOWMap) || (lengthDelta > 0)) { - newElements = ExtractFixedArray( - elements, 0, actualStart, newLength, kAllFixedArrays); - newElements.map = elementsMap; - a.elements = newElements; - } - - // Double check that the array is still in fast elements mode - assert(IsFastSmiElementsKind(a.map.elements_kind)); - - // Copy over inserted elements. - let k: Smi = actualStart; - if (insertCount > 0) { - for (let e: Object of args [2: ]) { - newElements[k++] = e; - } - } - - // Copy over elements after deleted elements. - let count: Smi = length - actualStart - actualDeleteCount; - while (count > 0) { - let e: Object = elements[k - lengthDelta]; - newElements[k++] = e; - count--; - } - - // Fill rest of spliced FixedArray with the hole, but only if the - // destination FixedArray is the original array's, since otherwise the array - // is pre-filled with holes. - if (elements == newElements) { - let limit: Smi = elements.length; - while (k < limit) { - newElements[k++] = Hole; - } - } - - // Update the array's length after all the FixedArray shuffling is done. - a.length = newLength; - - return deletedResult; + const length: Smi = array.length_fast; + array.elements = ExtractFixedArray( + elements, 0, length, length, kFixedArrays); + assert(array.elements.map != kCOWMap); } - // https://tc39.github.io/ecma262/#sec-array.prototype.splice - javascript builtin ArraySpliceTorque( - context: Context, receiver: Object, ...arguments): Object { - // 1. Let O be ? ToObject(this value). - let o: JSReceiver = ToObject(context, receiver); - - // 2. Let len be ? ToLength(? Get(O, "length")). - let len: Number = GetLengthProperty(context, o); - - // 3. Let relativeStart be ? ToInteger(start). - let start: Object = arguments[0]; - let relativeStart: Number = ToInteger_Inline(context, start); - - // 4. If relativeStart < 0, let actualStart be max((len + relativeStart), - // 0); - // else let actualStart be min(relativeStart, len). - let actualStart: Number = relativeStart < 0 ? - max((len + relativeStart), 0) : - min(relativeStart, len); - - let insertCount: Smi; - let actualDeleteCount: Number; - // 5. If the Number of actual arguments is 0, then - if (arguments.length == 0) { - // a. Let insertCount be 0. - insertCount = 0; - // b. Let actualDeleteCount be 0. - actualDeleteCount = 0; - // 6. Else if the Number of actual arguments is 1, then - } else if (arguments.length == 1) { - // a. Let insertCount be 0. - insertCount = 0; - // b. Let actualDeleteCount be len - actualStart. - actualDeleteCount = len - actualStart; - // 7. Else, - } else { - // a. Let insertCount be the Number of actual arguments minus 2. - insertCount = convert(arguments.length) - 2; - // b. Let dc be ? ToInteger(deleteCount). - let deleteCount: Object = arguments[1]; - let dc: Number = ToInteger_Inline(context, deleteCount); - // c. Let actualDeleteCount be min(max(dc, 0), len - actualStart). - actualDeleteCount = min(max(dc, 0), len - actualStart); - } - - // 8. If len + insertCount - actualDeleteCount > 2^53-1, throw a - // Bailout exception. - if (len + insertCount - actualDeleteCount > kMaxSafeInteger) { - ThrowRangeError(context, kInvalidArrayLength); - } - + macro IsJSArray(o: Object): bool { try { - return FastArraySplice( - context, arguments, o, len, actualStart, insertCount, - actualDeleteCount) otherwise Bailout; + const array: JSArray = Cast(o) otherwise NotArray; + return true; } - label Bailout {} - // If the fast case fails, just continue with the slow, correct, - // spec-compliant case. - - // 9. Let A be ? ArraySpeciesCreate(O, actualDeleteCount). - let a: Object = ArraySpeciesCreate(context, o, actualDeleteCount); - - // 10. Let k be 0. - let k: Number = 0; - - // 11. Repeat, while k < actualDeleteCount - while (k < actualDeleteCount) { - // a. Let from be ! ToString(actualStart + k). - let from: String = ToString_Inline(context, actualStart + k); - - // b. Let fromPresent be ? HasProperty(O, from). - let fromPresent: Oddball = HasProperty(context, o, from); - - // c. If fromPresent is true, then - if (fromPresent == True) { - // i. Let fromValue be ? Get(O, from). - let fromValue: Object = GetProperty(context, o, from); - - // ii. Perform ? CreateDataPropertyOrThrow(A, ! ToString(k), fromValue). - CreateDataProperty(context, a, ToString_Inline(context, k), fromValue); - } - - // d. Increment k by 1. - k = k + 1; + label NotArray { + return false; } + } - // 12. Perform ? Set(A, "length", actualDeleteCount, true). - SetProperty(context, a, 'length', actualDeleteCount); - - // 13. Let items be a List whose elements are, in left-to-right order, - // the portion of the actual argument list starting with the third - // argument. The list is empty if fewer than three arguments were - // passed. - // 14. Let itemCount be the Number of elements in items. - let itemCount: Number = insertCount; - - // 15. If itemCount < actualDeleteCount, then - if (itemCount < actualDeleteCount) { - // a. Let k be actualStart. - let k: Number = actualStart; - - // b. Repeat, while k < (len - actualDeleteCount) - while (k < (len - actualDeleteCount)) { - // i. Let from be ! ToString(k + actualDeleteCount). - let from: String = ToString_Inline(context, k + actualDeleteCount); - // ii. Let to be ! ToString(k + itemCount). - let to: String = ToString_Inline(context, k + itemCount); - - // iii. Let fromPresent be ? HasProperty(O, from). - let fromPresent: Oddball = HasProperty(context, o, from); - - // iv. If fromPresent is true, then - if (fromPresent == True) { - // 1. Let fromValue be ? Get(O, from). - let fromValue: Object = GetProperty(context, o, from); - - // 2. Perform ? Set(O, to, fromValue, true). - SetProperty(context, o, to, fromValue); - - // v. Else fromPresent is false, - } else { - // 1. Perform ? DeletePropertyOrThrow(O, to). - DeleteProperty(context, o, to, kStrict); - } - // vi. Increase k by 1. - k = k + 1; - } - - // c. Let k be len. - k = len; - // d. Repeat, while k > (len - actualDeleteCount + itemCount) - while (k > (len - actualDeleteCount + itemCount)) { - // i. Perform ? DeletePropertyOrThrow(O, ! ToString(k - 1)). - DeleteProperty(context, o, ToString_Inline(context, k - 1), kStrict); - - // ii. Decrease k by 1. - k = k - 1; - } - // 16. Else if itemCount > actualDeleteCount, then - } else if (itemCount > actualDeleteCount) { - // a. Let k be (len - actualDeleteCount). - let k: Number = len - actualDeleteCount; - - // b. Repeat, while k > actualStart - while (k > actualStart) { - // i. Let from be ! ToString(k + actualDeleteCount - 1). - let from: String = ToString_Inline(context, k + actualDeleteCount - 1); - - // ii. Let to be ! ToString(k + itemCount - 1). - let to: String = ToString_Inline(context, k + itemCount - 1); - - // iii. Let fromPresent be ? HasProperty(O, from). - let fromPresent: Oddball = HasProperty(context, o, from); - - // iv. If fromPresent is true, then - if (fromPresent == True) { - // 1. Let fromValue be ? Get(O, from). - let fromValue: Object = GetProperty(context, o, from); + macro StoreArrayHole(elements: FixedDoubleArray, k: Smi): void { + StoreFixedDoubleArrayHoleSmi(elements, k); + } - // 2. Perform ? Set(O, to, fromValue, true). - SetProperty(context, o, to, fromValue); + macro StoreArrayHole(elements: FixedArray, k: Smi): void { + elements[k] = Hole; + } - // v. Else fromPresent is false, - } else { - // 1. Perform ? DeletePropertyOrThrow(O, to). - DeleteProperty(context, o, to, kStrict); - } + macro CopyArrayElement( + elements: FixedArray, newElements: FixedArray, from: Smi, to: Smi): void { + const e: Object = elements[from]; + newElements[to] = e; + } - // vi. Decrease k by 1. - k = k - 1; - } + macro CopyArrayElement( + elements: FixedDoubleArray, newElements: FixedDoubleArray, from: Smi, + to: Smi): void { + try { + const floatValue: float64 = LoadDoubleWithHoleCheck(elements, from) + otherwise FoundHole; + newElements[to] = floatValue; } - - // 17. Let k be actualStart. - k = actualStart; - - // 18. Repeat, while items is not empty - // a. Remove the first element from items and let E be the value of that - // element. - if (arguments.length > 2) { - for (let e: Object of arguments [2: ]) { - // b. Perform ? Set(O, ! ToString(k), E, true). - SetProperty(context, o, ToString_Inline(context, k), e); - - // c. Increase k by 1. - k = k + 1; - } + label FoundHole { + StoreArrayHole(newElements, to); } - - // 19. Perform ? Set(O, "length", len - actualDeleteCount + itemCount, - // true). - SetProperty(context, o, 'length', len - actualDeleteCount + itemCount); - - return a; } } diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq index 1b9d577f10170f..937bb8c3de380c 100644 --- a/deps/v8/src/builtins/base.tq +++ b/deps/v8/src/builtins/base.tq @@ -61,7 +61,7 @@ type LanguageMode generates 'TNode' constexpr 'LanguageMode'; type ExtractFixedArrayFlags generates 'TNode' constexpr 'ExtractFixedArrayFlags'; type ParameterMode generates 'TNode' constexpr 'ParameterMode'; -type RootListIndex generates 'TNode' constexpr 'Heap::RootListIndex'; +type RootIndex generates 'TNode' constexpr 'RootIndex'; type WriteBarrierMode generates 'TNode' constexpr 'WriteBarrierMode'; type MessageTemplate constexpr 'MessageTemplate::Template'; @@ -107,15 +107,17 @@ type FixedUint8ClampedArray extends FixedTypedArray; type FixedBigUint64Array extends FixedTypedArray; type FixedBigInt64Array extends FixedTypedArray; +const kFixedDoubleArrays: constexpr ExtractFixedArrayFlags generates +'ExtractFixedArrayFlag::kFixedDoubleArrays'; const kAllFixedArrays: constexpr ExtractFixedArrayFlags generates 'ExtractFixedArrayFlag::kAllFixedArrays'; const kFixedArrays: constexpr ExtractFixedArrayFlags generates 'ExtractFixedArrayFlag::kFixedArrays'; -const kFixedCOWArrayMapRootIndex: constexpr RootListIndex generates -'Heap::kFixedCOWArrayMapRootIndex'; -const kEmptyFixedArrayRootIndex: constexpr RootListIndex generates -'Heap::kEmptyFixedArrayRootIndex'; +const kFixedCOWArrayMapRootIndex: constexpr RootIndex generates +'RootIndex::kFixedCOWArrayMap'; +const kEmptyFixedArrayRootIndex: constexpr RootIndex generates +'RootIndex::kEmptyFixedArray'; const kInvalidArrayLength: constexpr MessageTemplate generates 'MessageTemplate::kInvalidArrayLength'; @@ -149,12 +151,14 @@ extern macro TrueConstant(): Boolean; extern macro FalseConstant(): Boolean; extern macro Int32TrueConstant(): bool; extern macro Int32FalseConstant(): bool; +extern macro LengthStringConstant(): String; const Hole: Oddball = TheHoleConstant(); const Null: Oddball = NullConstant(); const Undefined: Oddball = UndefinedConstant(); const True: Boolean = TrueConstant(); const False: Boolean = FalseConstant(); +const kLengthString: String = LengthStringConstant(); const true: constexpr bool generates 'true'; const false: constexpr bool generates 'false'; @@ -165,9 +169,8 @@ const kSloppy: constexpr LanguageMode generates 'LanguageMode::kSloppy'; const SMI_PARAMETERS: constexpr ParameterMode generates 'SMI_PARAMETERS'; const INTPTR_PARAMETERS: constexpr ParameterMode generates 'INTPTR_PARAMETERS'; - const SKIP_WRITE_BARRIER: constexpr WriteBarrierMode - generates 'SKIP_WRITE_BARRIER'; +generates 'SKIP_WRITE_BARRIER'; extern macro Is64(): constexpr bool; @@ -175,6 +178,7 @@ extern macro SelectBooleanConstant(bool): Boolean; extern macro Print(constexpr string); extern macro Print(constexpr string, Object); +extern macro Comment(constexpr string); extern macro Print(Object); extern macro DebugBreak(); extern macro ToInteger_Inline(Context, Object): Number; @@ -187,14 +191,17 @@ extern macro GetProperty(Context, Object, Object): Object; extern builtin SetProperty(Context, Object, Object, Object); extern builtin DeleteProperty(Context, Object, Object, LanguageMode); extern builtin HasProperty(Context, JSReceiver, Object): Boolean; +extern macro HasProperty_Inline(Context, JSReceiver, Object): Boolean; extern macro ThrowRangeError(Context, constexpr MessageTemplate): never; extern macro ThrowTypeError(Context, constexpr MessageTemplate): never; extern macro ThrowTypeError(Context, constexpr MessageTemplate, Object): never; extern macro ThrowTypeError( Context, constexpr MessageTemplate, Object, Object, Object): never; -extern macro ArraySpeciesCreate(Context, Object, Number): Object; +extern macro ArraySpeciesCreate(Context, Object, Number): JSReceiver; +extern macro InternalArrayCreate(Context, Number): JSArray; extern macro EnsureArrayPushable(Map): ElementsKind labels Bailout; +extern macro EnsureArrayLengthWritable(Map) labels Bailout; extern builtin ToObject(Context, Object): JSReceiver; extern macro ToObject_Inline(Context, Object): JSReceiver; @@ -203,17 +210,19 @@ extern macro IsTheHole(Object): bool; extern macro IsString(HeapObject): bool; extern builtin ToString(Context, Object): String; -extern runtime CreateDataProperty(Context, Object, Object, Object); +extern runtime NormalizeElements(Context, JSObject); +extern runtime TransitionElementsKindWithKind(Context, JSObject, Smi); +extern runtime CreateDataProperty(Context, JSReceiver, Object, Object); -extern macro LoadRoot(constexpr RootListIndex): Object; -extern macro StoreRoot(constexpr RootListIndex, Object): Object; -extern macro LoadAndUntagToWord32Root(constexpr RootListIndex): int32; +extern macro LoadRoot(constexpr RootIndex): Object; +extern macro StoreRoot(constexpr RootIndex, Object): Object; +extern macro LoadAndUntagToWord32Root(constexpr RootIndex): int32; extern runtime StringEqual(Context, String, String): Oddball; extern builtin StringLessThan(Context, String, String): Boolean; extern macro StrictEqual(Object, Object): Boolean; -extern runtime SmiLexicographicCompare(Context, Object, Object): Number; +extern macro SmiLexicographicCompare(Smi, Smi): Smi; extern operator '<' macro Int32LessThan(int32, int32): bool; extern operator '>' macro Int32GreaterThan(int32, int32): bool; @@ -230,6 +239,10 @@ extern operator '>=' macro SmiGreaterThanOrEqual(Smi, Smi): bool; extern operator '==' macro ElementsKindEqual( constexpr ElementsKind, constexpr ElementsKind): constexpr bool; extern operator '==' macro ElementsKindEqual(ElementsKind, ElementsKind): bool; +operator '!=' macro ElementsKindNotEqual( + k1: ElementsKind, k2: ElementsKind): bool { + return !ElementsKindEqual(k1, k2); +} extern macro IsFastElementsKind(constexpr ElementsKind): constexpr bool; extern macro IsDoubleElementsKind(constexpr ElementsKind): constexpr bool; @@ -243,16 +256,21 @@ extern operator '<' macro IntPtrLessThan(intptr, intptr): bool; extern operator '>' macro IntPtrGreaterThan(intptr, intptr): bool; extern operator '<=' macro IntPtrLessThanOrEqual(intptr, intptr): bool; extern operator '>=' macro IntPtrGreaterThanOrEqual(intptr, intptr): bool; +extern operator '>' macro UintPtrGreaterThan(uintptr, uintptr): bool; extern operator '>=' macro UintPtrGreaterThanOrEqual(uintptr, uintptr): bool; extern operator '==' macro Float64Equal(float64, float64): bool; extern operator '!=' macro Float64NotEqual(float64, float64): bool; +extern operator '>' macro Float64GreaterThan(float64, float64): bool; +extern operator +'==' macro BranchIfNumberEqual(Number, Number): never labels Taken, NotTaken; extern operator '<' macro BranchIfNumberLessThan(Number, Number): never labels Taken, NotTaken; extern operator '<=' macro BranchIfNumberLessThanOrEqual(Number, Number): never labels Taken, NotTaken; + extern operator '>' macro BranchIfNumberGreaterThan(Number, Number): never labels Taken, NotTaken; @@ -271,6 +289,7 @@ extern operator '>>>' macro SmiShr(Smi, constexpr int31): Smi; extern operator '<<' macro SmiShl(Smi, constexpr int31): Smi; extern operator '+' macro IntPtrAdd(intptr, intptr): intptr; +extern operator '+' macro UintPtrAdd(uintptr, uintptr): uintptr; extern operator '-' macro IntPtrSub(intptr, intptr): intptr; extern operator '>>>' macro WordShr(uintptr, uintptr): uintptr; extern operator '<<' macro WordShl(intptr, intptr): intptr; @@ -296,14 +315,16 @@ extern operator '<<' macro Word32Shl(uint32, uint32): uint32; extern operator '|' macro Word32Or(int32, int32): int32; extern operator '|' macro Word32Or(uint32, uint32): uint32; +extern operator '+' macro Float64Add(float64, float64): float64; + extern operator '+' macro NumberAdd(Number, Number): Number; extern operator '-' macro NumberSub(Number, Number): Number; extern macro NumberMin(Number, Number): Number; extern macro NumberMax(Number, Number): Number; -macro min(x: Number, y: Number): Number { +macro Min(x: Number, y: Number): Number { return NumberMin(x, y); } -macro max(x: Number, y: Number): Number { +macro Max(x: Number, y: Number): Number { return NumberMax(x, y); } @@ -325,8 +346,8 @@ extern operator '.length' macro GetArgumentsLength(constexpr Arguments): intptr; extern operator '[]' macro GetArgumentValue(constexpr Arguments, intptr): Object; -extern operator 'is' macro TaggedIsSmi(Object): bool; -extern operator 'isnt' macro TaggedIsNotSmi(Object): bool; +extern macro TaggedIsSmi(Object): bool; +extern macro TaggedIsNotSmi(Object): bool; extern macro TaggedIsPositiveSmi(Object): bool; extern macro HeapObjectToJSDataView(HeapObject): JSDataView labels CastError; @@ -334,50 +355,54 @@ extern macro TaggedToHeapObject(Object): HeapObject labels CastError; extern macro TaggedToSmi(Object): Smi labels CastError; extern macro HeapObjectToJSArray(HeapObject): JSArray labels CastError; extern macro HeapObjectToCallable(HeapObject): Callable labels CastError; -extern macro HeapObjectToFixedArray(HeapObject): - FixedArray labels CastError; +extern macro HeapObjectToFixedArray(HeapObject): FixedArray labels CastError; extern macro HeapObjectToFixedDoubleArray(HeapObject): FixedDoubleArray labels CastError; extern macro TaggedToNumber(Object): Number labels CastError; -macro cast_HeapObject(o : HeapObject) : A labels CastError; -cast_HeapObject(o : HeapObject) : HeapObject labels CastError { return o; } -cast_HeapObject(o: HeapObject): FixedArray labels CastError { +macro CastHeapObject(o: HeapObject): A labels CastError; +CastHeapObject(o: HeapObject): HeapObject labels CastError { + return o; +} +CastHeapObject(o: HeapObject): FixedArray labels CastError { return HeapObjectToFixedArray(o) otherwise CastError; } -cast_HeapObject(o: HeapObject): FixedDoubleArray labels CastError { +CastHeapObject(o: HeapObject): + FixedDoubleArray labels CastError { return HeapObjectToFixedDoubleArray(o) otherwise CastError; } -cast_HeapObject(o: HeapObject): JSDataView labels CastError { +CastHeapObject(o: HeapObject): JSDataView labels CastError { return HeapObjectToJSDataView(o) otherwise CastError; } -cast_HeapObject(o: HeapObject): Callable labels CastError { +CastHeapObject(o: HeapObject): Callable labels CastError { return HeapObjectToCallable(o) otherwise CastError; } -cast_HeapObject(o: HeapObject): JSArray labels CastError { +CastHeapObject(o: HeapObject): JSArray labels CastError { return HeapObjectToJSArray(o) otherwise CastError; } -macro cast(o: HeapObject): A labels CastError { - return cast_HeapObject(o) otherwise CastError; +macro Cast(o: HeapObject): A labels CastError { + return CastHeapObject(o) otherwise CastError; } -// cast_HeapObject allows this default-implementation to be non-recursive. +// CastHeapObject allows this default-implementation to be non-recursive. // Otherwise the generated CSA code might run into infinite recursion. -macro cast(o: Object): A labels CastError { - return cast_HeapObject( - TaggedToHeapObject(o) otherwise CastError) otherwise CastError; +macro Cast(o: Object): A labels CastError { + return CastHeapObject(TaggedToHeapObject(o) otherwise CastError) + otherwise CastError; } -cast(o: Object): Smi labels CastError { +Cast(o: Object): Smi labels CastError { return TaggedToSmi(o) otherwise CastError; } -cast(o: Object): Number labels CastError { +Cast(o: Object): Number labels CastError { return TaggedToNumber(o) otherwise CastError; } extern macro AllocateHeapNumberWithValue(float64): HeapNumber; extern macro ChangeInt32ToTagged(int32): Number; extern macro ChangeUint32ToTagged(uint32): Number; +extern macro ChangeUintPtrToFloat64(uintptr): float64; +extern macro ChangeUintPtrToTagged(uintptr): Number; extern macro Unsigned(int32): uint32; extern macro Unsigned(intptr): uintptr; extern macro Unsigned(RawPtr): uintptr; @@ -410,130 +435,143 @@ extern macro StringConstant(constexpr string): String; extern macro LanguageModeConstant(constexpr LanguageMode): LanguageMode; extern macro Int32Constant(constexpr ElementsKind): ElementsKind; -macro from_constexpr(o: constexpr int31): A; -from_constexpr(i: constexpr int31): intptr { +macro FromConstexpr(o: constexpr int31): A; +FromConstexpr(i: constexpr int31): intptr { return IntPtrConstant(i); } -from_constexpr(i: constexpr int31): int31 { +FromConstexpr(i: constexpr int31): int31 { return Int32Constant(i); } -from_constexpr(i: constexpr int31): int32 { +FromConstexpr(i: constexpr int31): int32 { return Int32Constant(i); } -from_constexpr(i: constexpr int31): uint32 { +FromConstexpr(i: constexpr int31): uint32 { return Unsigned(Int32Constant(i)); } -from_constexpr(i: constexpr int31): uintptr { +FromConstexpr(i: constexpr int31): uintptr { return ChangeUint32ToWord(i); } -from_constexpr(i: constexpr int31): Smi { +FromConstexpr(i: constexpr int31): Smi { return SmiConstant(i); } -from_constexpr(i: constexpr int31): Number { +FromConstexpr(i: constexpr int31): Number { return SmiConstant(i); } -from_constexpr(i: constexpr int31): float64 { +FromConstexpr(i: constexpr int31): float64 { return Float64Constant(i); } -macro from_constexpr(o: constexpr int32): A; -from_constexpr(i: constexpr int32): intptr { +macro FromConstexpr(o: constexpr int32): A; +FromConstexpr(i: constexpr int32): intptr { return IntPtrConstant(i); } -from_constexpr(i: constexpr int32): int32 { +FromConstexpr(i: constexpr int32): int32 { return Int32Constant(i); } -from_constexpr(i: constexpr int32): Number { +FromConstexpr(i: constexpr int32): Number { return NumberConstant(i); } -macro from_constexpr(o: constexpr float64): A; -from_constexpr(f: constexpr float64): Number { +macro FromConstexpr(o: constexpr float64): A; +FromConstexpr(f: constexpr float64): Number { return NumberConstant(f); } -macro from_constexpr(b: constexpr bool): A; -from_constexpr(b: constexpr bool): bool { +macro FromConstexpr(b: constexpr bool): A; +FromConstexpr(b: constexpr bool): bool { return BoolConstant(b); } -macro from_constexpr(l: constexpr LanguageMode): A; -from_constexpr(b: constexpr LanguageMode): LanguageMode { +macro FromConstexpr(l: constexpr LanguageMode): A; +FromConstexpr(b: constexpr LanguageMode): LanguageMode { return LanguageModeConstant(b); } -macro from_constexpr(e: constexpr ElementsKind): A; -from_constexpr(e: constexpr ElementsKind): ElementsKind { +macro FromConstexpr(e: constexpr ElementsKind): A; +FromConstexpr(e: constexpr ElementsKind): ElementsKind { return Int32Constant(e); } -macro from_constexpr(s: constexpr string): A; -from_constexpr(s: constexpr string): String { +macro FromConstexpr(s: constexpr string): A; +FromConstexpr(s: constexpr string): String { return StringConstant(s); } -from_constexpr(s: constexpr string): Object { +FromConstexpr(s: constexpr string): Object { return StringConstant(s); } -macro convert(i: constexpr int31): A { +macro Convert(i: constexpr int31): A { return i; } -macro convert(i: int32): A; -convert(i: int32): Number { +extern macro ConvertElementsKindToInt(ElementsKind): int32; + +macro Convert(elementsKind: ElementsKind): A; +Convert(elementsKind: ElementsKind): int32 { + return ConvertElementsKindToInt(elementsKind); +} + +macro Convert(i: int32): A; +Convert(i: int32): Number { return ChangeInt32ToTagged(i); } -convert(i: int32): intptr { +Convert(i: int32): intptr { return ChangeInt32ToIntPtr(i); } -convert(i: int32): Smi { +Convert(i: int32): Smi { return SmiFromInt32(i); } -macro convert(ui: uint32): A; -convert(ui: uint32): Number { +macro Convert(ui: uint32): A; +Convert(ui: uint32): Number { return ChangeUint32ToTagged(ui); } -convert(ui: uint32): Smi { +Convert(ui: uint32): Smi { return SmiFromInt32(Signed(ui)); } -convert(ui: uint32): uintptr { +Convert(ui: uint32): uintptr { return ChangeUint32ToWord(ui); } -macro convert(i: intptr): A; -convert(i: intptr): int32 { +macro Convert(i: intptr): A; +Convert(i: intptr): int32 { return TruncateIntPtrToInt32(i); } -convert(i: intptr): Smi { +Convert(i: intptr): Smi { return SmiTag(i); } -macro convert(ui: uintptr): A; -convert(ui: uintptr): uint32 { +macro Convert(ui: uintptr): A; +Convert(ui: uintptr): uint32 { return Unsigned(TruncateIntPtrToInt32(Signed(ui))); } -macro convert(s: Smi): A; -convert(s: Smi): intptr { +macro Convert(s: Smi): A; +Convert(s: Smi): intptr { return SmiUntag(s); } -convert(s: Smi): int32 { +Convert(s: Smi): int32 { return SmiToInt32(s); } -macro convert(h: HeapNumber): A; -convert(h: HeapNumber): float64 { +macro Convert(h: HeapNumber): A; +Convert(h: HeapNumber): float64 { return LoadHeapNumberValue(h); } -macro convert(n: Number): A; -convert(n: Number): float64 { +macro Convert(n: Number): A; +Convert(n: Number): float64 { return ChangeNumberToFloat64(n); } -macro convert(f: float32): A; -convert(f: float32): float64 { +macro Convert(f: float32): A; +Convert(f: float32): float64 { return ChangeFloat32ToFloat64(f); } -macro convert(d: float64): A; -convert(d: float64): Number { +macro Convert(d: float64): A; +Convert(d: float64): Number { return AllocateHeapNumberWithValue(d); } -convert(d: float64): uintptr { +Convert(ui: uintptr): float64 { + return ChangeUintPtrToFloat64(ui); +} +Convert(ui: uintptr): Number { + return ChangeUintPtrToTagged(ui); +} +Convert(d: float64): uintptr { return ChangeFloat64ToUintPtr(d); } -macro convert(r: RawPtr): A; -convert(r: RawPtr): uintptr { +macro Convert(r: RawPtr): A; +Convert(r: RawPtr): uintptr { return Unsigned(r); } -convert(r: RawPtr): intptr { +Convert(r: RawPtr): intptr { return Signed(r); } @@ -553,62 +591,69 @@ extern macro UnsafeCastObjectToJSReceiver(Object): JSReceiver; extern macro UnsafeCastObjectToJSObject(Object): JSObject; extern macro UnsafeCastObjectToMap(Object): Map; -macro unsafe_cast(n: Number): A; -unsafe_cast(n: Number): HeapNumber { +macro UnsafeCast(n: Number): A; +UnsafeCast(n: Number): HeapNumber { return UnsafeCastNumberToHeapNumber(n); } -macro unsafe_cast(o: Object): A; -unsafe_cast(o: Object): FixedArray { +macro UnsafeCast(o: Object): A; +UnsafeCast(o: Object): Object { + return o; +} +UnsafeCast(o: Object): FixedArray { return UnsafeCastObjectToFixedArray(o); } -unsafe_cast(o: Object): FixedDoubleArray { +UnsafeCast(o: Object): FixedDoubleArray { return UnsafeCastObjectToFixedDoubleArray(o); } -unsafe_cast(o: Object): HeapNumber { +UnsafeCast(o: Object): HeapNumber { return UnsafeCastObjectToHeapNumber(o); } -unsafe_cast(o: Object): Callable { +UnsafeCast(o: Object): Callable { return UnsafeCastObjectToCallable(o); } -unsafe_cast(o: Object): Smi { +UnsafeCast(o: Object): Smi { return UnsafeCastObjectToSmi(o); } -unsafe_cast(o: Object): Number { +UnsafeCast(o: Object): Number { return UnsafeCastObjectToNumber(o); } -unsafe_cast(o: Object): HeapObject { +UnsafeCast(o: Object): HeapObject { return UnsafeCastObjectToHeapObject(o); } -unsafe_cast(o: Object): JSArray { +UnsafeCast(o: Object): JSArray { return UnsafeCastObjectToJSArray(o); } -unsafe_cast(o: Object): FixedTypedArrayBase { +UnsafeCast(o: Object): FixedTypedArrayBase { return UnsafeCastObjectToFixedTypedArrayBase(o); } -unsafe_cast(o: Object): NumberDictionary { +UnsafeCast(o: Object): NumberDictionary { return UnsafeCastObjectToNumberDictionary(o); } -unsafe_cast(o: Object): JSReceiver { +UnsafeCast(o: Object): JSReceiver { return UnsafeCastObjectToJSReceiver(o); } -unsafe_cast(o: Object): JSObject { +UnsafeCast(o: Object): JSObject { return UnsafeCastObjectToJSObject(o); } -unsafe_cast(o: Object): Map { +UnsafeCast(o: Object): Map { return UnsafeCastObjectToMap(o); } -unsafe_cast(o: Object): FixedArrayBase { +UnsafeCast(o: Object): FixedArrayBase { return UnsafeCastObjectToFixedArrayBase(o); } -const kCOWMap: Map = unsafe_cast(LoadRoot(kFixedCOWArrayMapRootIndex)); +const kCOWMap: Map = UnsafeCast(LoadRoot(kFixedCOWArrayMapRootIndex)); const kEmptyFixedArray: FixedArrayBase = - unsafe_cast(LoadRoot(kEmptyFixedArrayRootIndex)); + UnsafeCast(LoadRoot(kEmptyFixedArrayRootIndex)); extern macro BranchIfFastJSArray(Object, Context): never labels Taken, NotTaken; extern macro BranchIfNotFastJSArray(Object, Context): never labels Taken, NotTaken; +macro EnsureFastJSArray(context: Context, object: Object) labels Bailout { + if (BranchIfNotFastJSArray(object, context)) goto Bailout; +} + extern macro IsPrototypeInitialArrayPrototype(Context, Map): bool; extern macro IsNoElementsProtectorCellInvalid(): bool; extern macro IsArraySpeciesProtectorCellInvalid(): bool; @@ -627,7 +672,7 @@ extern operator extern operator '.elements' macro LoadElements(JSObject): FixedArrayBase; extern operator '.elements=' macro StoreElements(JSObject, FixedArrayBase); -extern operator '.length' macro LoadTypedArrayLength(JSTypedArray): Smi; +extern operator '.length' macro LoadJSTypedArrayLength(JSTypedArray): Smi; extern operator '.length' macro LoadJSArrayLength(JSArray): Number; extern operator '.length_fast' macro LoadFastJSArrayLength(JSArray): Smi; extern operator '.length=' macro StoreJSArrayLength(JSArray, Smi); @@ -640,13 +685,16 @@ extern operator extern operator '[]=' macro StoreFixedArrayElement(FixedArray, intptr, Object): void; extern operator -'[]=' macro StoreFixedArrayElement( - FixedArray, constexpr int31, Object): void; +'[]=' macro StoreFixedArrayElement(FixedArray, constexpr int31, Object): void; extern operator '[]=' macro StoreFixedArrayElementSmi(FixedArray, Smi, Object): void; +operator '[]=' macro StoreFixedDoubleArrayNumber( + a: FixedDoubleArray, index: Smi, value: Number): void { + a[index] = Convert(value); +} -extern macro StoreFixedArrayElementSmi(FixedArray, Smi, Object, - constexpr WriteBarrierMode): void; +extern macro StoreFixedArrayElementSmi( + FixedArray, Smi, Object, constexpr WriteBarrierMode): void; extern operator '.instance_type' macro LoadMapInstanceType(Map): int32; @@ -672,6 +720,28 @@ extern macro IsFastSmiOrTaggedElementsKind(ElementsKind): bool; extern macro IsFastSmiElementsKind(ElementsKind): bool; extern macro IsHoleyFastElementsKind(ElementsKind): bool; +macro AllowDoubleElements(kind: ElementsKind): ElementsKind { + if (kind == PACKED_SMI_ELEMENTS) { + return PACKED_DOUBLE_ELEMENTS; + } else if (kind == HOLEY_SMI_ELEMENTS) { + return HOLEY_DOUBLE_ELEMENTS; + } + return kind; +} + +macro AllowNonNumberElements(kind: ElementsKind): ElementsKind { + if (kind == PACKED_SMI_ELEMENTS) { + return PACKED_ELEMENTS; + } else if (kind == HOLEY_SMI_ELEMENTS) { + return HOLEY_ELEMENTS; + } else if (kind == PACKED_DOUBLE_ELEMENTS) { + return PACKED_ELEMENTS; + } else if (kind == HOLEY_DOUBLE_ELEMENTS) { + return HOLEY_ELEMENTS; + } + return kind; +} + extern macro AllocateZeroedFixedArray(intptr): FixedArray; extern macro AllocateZeroedFixedDoubleArray(intptr): FixedDoubleArray; @@ -687,8 +757,13 @@ extern macro AllocateJSArray(constexpr ElementsKind, Map, Smi, Smi): JSArray; extern macro IsElementsKindGreaterThan( ElementsKind, constexpr ElementsKind): bool; +extern operator +'[]=' macro StoreFixedDoubleArrayElementSmi( + FixedDoubleArray, Smi, float64): void; + extern macro LoadDoubleWithHoleCheck(FixedDoubleArray, Smi): float64 labels IfHole; +extern macro StoreFixedDoubleArrayHoleSmi(FixedDoubleArray, Smi): void; extern macro Call(Context, Callable, Object): Object; extern macro Call(Context, Callable, Object, Object): Object; @@ -699,8 +774,9 @@ extern macro Call( extern macro Call( Context, Callable, Object, Object, Object, Object, Object, Object): Object; -extern macro ExtractFixedArray( - FixedArray, Smi, Smi, Smi, constexpr ExtractFixedArrayFlags): FixedArray; +extern macro ExtractFixedArray(FixedArrayBase, Smi, Smi, Smi): FixedArrayBase; +extern macro ExtractFixedArray(FixedArrayBase, Smi, Smi, Smi, + constexpr ExtractFixedArrayFlags): FixedArrayBase; extern builtin ExtractFastJSArray(Context, JSArray, Smi, Smi): JSArray; @@ -710,7 +786,7 @@ LoadElementNoHole(a: JSArray, index: Smi): Object labels IfHole { try { let elements: FixedArray = - cast(a.elements) otherwise Unexpected; + Cast(a.elements) otherwise Unexpected; let e: Object = elements[index]; if (e == Hole) { goto IfHole; @@ -726,7 +802,7 @@ LoadElementNoHole(a: JSArray, index: Smi): Object labels IfHole { try { let elements: FixedDoubleArray = - cast(a.elements) otherwise Unexpected; + Cast(a.elements) otherwise Unexpected; let e: float64 = LoadDoubleWithHoleCheck(elements, index) otherwise IfHole; return AllocateHeapNumberWithValue(e); } @@ -735,15 +811,22 @@ labels IfHole { } } +extern macro TransitionElementsKind( + JSObject, Map, ElementsKind, ElementsKind): void labels Bailout; + extern macro IsCallable(HeapObject): bool; extern macro IsJSArray(HeapObject): bool; +extern macro IsJSReceiver(HeapObject): bool; extern macro TaggedIsCallable(Object): bool; extern macro IsDetachedBuffer(JSArrayBuffer): bool; extern macro IsHeapNumber(HeapObject): bool; extern macro IsFixedArray(HeapObject): bool; +extern macro IsNumber(Object): bool; extern macro IsExtensibleMap(Map): bool; extern macro IsCustomElementsReceiverInstanceType(int32): bool; +extern macro IsFastJSArray(Object, Context): bool; extern macro Typeof(Object): Object; +extern macro LoadTargetFromFrame(): JSFunction; // Return true iff number is NaN. macro NumberIsNaN(number: Number): bool { @@ -751,9 +834,9 @@ macro NumberIsNaN(number: Number): bool { case (Smi) { return false; } case (hn : HeapNumber) { - let value: float64 = convert(hn); - return value != value; - } + let value: float64 = Convert(hn); + return value != value; + } } } diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc index 95f5f2ebbd38f5..8bc5c0b5ac200f 100644 --- a/deps/v8/src/builtins/builtins-api.cc +++ b/deps/v8/src/builtins/builtins-api.cc @@ -149,7 +149,7 @@ class RelocatableArguments : public BuiltinArguments, public Relocatable { RelocatableArguments(Isolate* isolate, int length, Object** arguments) : BuiltinArguments(length, arguments), Relocatable(isolate) {} - virtual inline void IterateInstance(RootVisitor* v) { + inline void IterateInstance(RootVisitor* v) override { if (length() == 0) return; v->VisitRootPointers(Root::kRelocatable, nullptr, lowest_address(), highest_address() + 1); diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc index c82cef391940c0..0e22db2598d0cb 100644 --- a/deps/v8/src/builtins/builtins-arguments-gen.cc +++ b/deps/v8/src/builtins/builtins-arguments-gen.cc @@ -89,7 +89,7 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map, Node* result = Allocate(size); Comment("Initialize arguments object"); StoreMapNoWriteBarrier(result, map); - Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex); + Node* empty_fixed_array = LoadRoot(RootIndex::kEmptyFixedArray); StoreObjectField(result, JSArray::kPropertiesOrHashOffset, empty_fixed_array); Node* smi_arguments_count = ParameterToTagged(arguments_count, mode); StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset, @@ -99,7 +99,7 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map, arguments = InnerAllocate(result, elements_offset); StoreObjectFieldNoWriteBarrier(arguments, FixedArray::kLengthOffset, smi_arguments_count); - Node* fixed_array_map = LoadRoot(Heap::kFixedArrayMapRootIndex); + Node* fixed_array_map = LoadRoot(RootIndex::kFixedArrayMap); StoreMapNoWriteBarrier(arguments, fixed_array_map); } Node* parameter_map = nullptr; @@ -110,7 +110,7 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map, StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset, parameter_map); Node* sloppy_elements_map = - LoadRoot(Heap::kSloppyArgumentsElementsMapRootIndex); + LoadRoot(RootIndex::kSloppyArgumentsElementsMap); StoreMapNoWriteBarrier(parameter_map, sloppy_elements_map); parameter_map_count = ParameterToTagged(parameter_map_count, mode); StoreObjectFieldNoWriteBarrier(parameter_map, FixedArray::kLengthOffset, diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc index d61a5164228c7a..1cc8c7ce8d977a 100644 --- a/deps/v8/src/builtins/builtins-array-gen.cc +++ b/deps/v8/src/builtins/builtins-array-gen.cc @@ -212,7 +212,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) { context(), original_array, length, method_name); // In the Spec and our current implementation, the length check is already // performed in TypedArraySpeciesCreate. - CSA_ASSERT(this, SmiLessThanOrEqual(CAST(len_), LoadTypedArrayLength(a))); + CSA_ASSERT(this, SmiLessThanOrEqual(CAST(len_), LoadJSTypedArrayLength(a))); fast_typed_array_target_ = Word32Equal(LoadInstanceType(LoadElements(original_array)), LoadInstanceType(LoadElements(a))); @@ -530,10 +530,11 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) { TNode typed_array = CAST(receiver_); o_ = typed_array; - TNode array_buffer = LoadArrayBufferViewBuffer(typed_array); + TNode array_buffer = + LoadJSArrayBufferViewBuffer(typed_array); ThrowIfArrayBufferIsDetached(context_, array_buffer, name_); - len_ = LoadTypedArrayLength(typed_array); + len_ = LoadJSTypedArrayLength(typed_array); Label throw_not_callable(this, Label::kDeferred); Label distinguish_types(this); @@ -964,8 +965,7 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) { // 3) Check that the elements backing store isn't copy-on-write. Node* elements = LoadElements(array_receiver); - GotoIf(WordEqual(LoadMap(elements), - LoadRoot(Heap::kFixedCOWArrayMapRootIndex)), + GotoIf(WordEqual(LoadMap(elements), LoadRoot(RootIndex::kFixedCOWArrayMap)), &runtime); Node* new_length = IntPtrSub(length, IntPtrConstant(1)); @@ -1524,19 +1524,23 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) { { TNode array_receiver = CAST(receiver); CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array_receiver))); + + // 2) Ensure that the length is writable. + // This check needs to happen before the check for length zero. + // The spec requires a "SetProperty(array, 'length', 0)" call when + // the length is zero. This must throw an exception in the case of a + // read-only length. + EnsureArrayLengthWritable(LoadMap(array_receiver), &runtime); + Node* length = LoadAndUntagObjectField(array_receiver, JSArray::kLengthOffset); Label return_undefined(this), fast_elements_tagged(this), fast_elements_smi(this); GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined); - // 2) Ensure that the length is writable. - EnsureArrayLengthWritable(LoadMap(array_receiver), &runtime); - // 3) Check that the elements backing store isn't copy-on-write. Node* elements = LoadElements(array_receiver); - GotoIf(WordEqual(LoadMap(elements), - LoadRoot(Heap::kFixedCOWArrayMapRootIndex)), + GotoIf(WordEqual(LoadMap(elements), LoadRoot(RootIndex::kFixedCOWArrayMap)), &runtime); Node* new_length = IntPtrSub(length, IntPtrConstant(1)); @@ -1679,10 +1683,12 @@ TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinsAssembler) { TF_BUILTIN(CloneFastJSArray, ArrayBuiltinsAssembler) { TNode context = CAST(Parameter(Descriptor::kContext)); - Node* array = Parameter(Descriptor::kSource); + TNode array = CAST(Parameter(Descriptor::kSource)); - CSA_ASSERT(this, IsJSArray(array)); - CSA_ASSERT(this, Word32BinaryNot(IsNoElementsProtectorCellInvalid())); + CSA_ASSERT(this, + Word32Or(Word32BinaryNot(IsHoleyFastElementsKind( + LoadMapElementsKind(LoadMap(array)))), + Word32BinaryNot(IsNoElementsProtectorCellInvalid()))); ParameterMode mode = OptimalParameterMode(); Return(CloneFastJSArray(context, array, mode)); @@ -3654,7 +3660,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { // [[ArrayIteratorNextIndex]] anymore, since a JSTypedArray's // length cannot change anymore, so this {iterator} will never // produce values again anyways. - TNode length = LoadTypedArrayLength(CAST(array)); + TNode length = LoadJSTypedArrayLength(CAST(array)); GotoIfNot(SmiBelow(CAST(index), length), &allocate_iterator_result); StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset, SmiInc(CAST(index))); @@ -3701,8 +3707,6 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { } } -namespace { - class ArrayFlattenAssembler : public CodeStubAssembler { public: explicit ArrayFlattenAssembler(compiler::CodeAssemblerState* state) @@ -3843,8 +3847,6 @@ class ArrayFlattenAssembler : public CodeStubAssembler { } }; -} // namespace - // https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray TF_BUILTIN(FlattenIntoArray, ArrayFlattenAssembler) { Node* const context = Parameter(Descriptor::kContext); diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc index ceeee5f37db86e..1e9de3dbe3df74 100644 --- a/deps/v8/src/builtins/builtins-array.cc +++ b/deps/v8/src/builtins/builtins-array.cc @@ -23,34 +23,6 @@ namespace internal { namespace { -inline bool ClampedToInteger(Isolate* isolate, Object* object, int* out) { - // This is an extended version of ECMA-262 7.1.11 handling signed values - // Try to convert object to a number and clamp values to [kMinInt, kMaxInt] - if (object->IsSmi()) { - *out = Smi::ToInt(object); - return true; - } else if (object->IsHeapNumber()) { - double value = HeapNumber::cast(object)->value(); - if (std::isnan(value)) { - *out = 0; - } else if (value > kMaxInt) { - *out = kMaxInt; - } else if (value < kMinInt) { - *out = kMinInt; - } else { - *out = static_cast(value); - } - return true; - } else if (object->IsNullOrUndefined(isolate)) { - *out = 0; - return true; - } else if (object->IsBoolean()) { - *out = object->IsTrue(isolate); - return true; - } - return false; -} - inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate, JSArray* receiver) { return JSObject::PrototypeHasNoElements(isolate, receiver); @@ -79,36 +51,20 @@ inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver* receiver) { return true; } -// Returns |false| if not applicable. -// TODO(szuend): Refactor this function because it is getting hard to -// understand what each call-site actually checks. -V8_WARN_UNUSED_RESULT -inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate, - Handle receiver, - BuiltinArguments* args, - int first_arg_index, - int num_arguments) { - if (!receiver->IsJSArray()) return false; - Handle array = Handle::cast(receiver); - ElementsKind origin_kind = array->GetElementsKind(); - if (IsDictionaryElementsKind(origin_kind)) return false; - if (!array->map()->is_extensible()) return false; - if (args == nullptr) return true; - - // If there may be elements accessors in the prototype chain, the fast path - // cannot be used if there arguments to add to the array. - if (!IsJSArrayFastElementMovingAllowed(isolate, *array)) return false; +// This method may transition the elements kind of the JSArray once, to make +// sure that all elements provided as arguments in the specified range can be +// added without further elements kinds transitions. +void MatchArrayElementsKindToArguments(Isolate* isolate, Handle array, + BuiltinArguments* args, + int first_arg_index, int num_arguments) { + int args_length = args->length(); + if (first_arg_index >= args_length) return; - // Adding elements to the array prototype would break code that makes sure - // it has no elements. Handle that elsewhere. - if (isolate->IsAnyInitialArrayPrototype(array)) return false; + ElementsKind origin_kind = array->GetElementsKind(); - // Need to ensure that the arguments passed in args can be contained in - // the array. - int args_length = args->length(); - if (first_arg_index >= args_length) return true; + // We do not need to transition for PACKED/HOLEY_ELEMENTS. + if (IsObjectElementsKind(origin_kind)) return; - if (IsObjectElementsKind(origin_kind)) return true; ElementsKind target_kind = origin_kind; { DisallowHeapAllocation no_gc; @@ -131,20 +87,37 @@ inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate, HandleScope scope(isolate); JSObject::TransitionElementsKind(array, target_kind); } - return true; } -V8_WARN_UNUSED_RESULT static Object* CallJsIntrinsic( - Isolate* isolate, Handle function, BuiltinArguments args) { - HandleScope handleScope(isolate); - int argc = args.length() - 1; - ScopedVector> argv(argc); - for (int i = 0; i < argc; ++i) { - argv[i] = args.at(i + 1); - } - RETURN_RESULT_OR_FAILURE( - isolate, - Execution::Call(isolate, function, args.receiver(), argc, argv.start())); +// Returns |false| if not applicable. +// TODO(szuend): Refactor this function because it is getting hard to +// understand what each call-site actually checks. +V8_WARN_UNUSED_RESULT +inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate, + Handle receiver, + BuiltinArguments* args, + int first_arg_index, + int num_arguments) { + if (!receiver->IsJSArray()) return false; + Handle array = Handle::cast(receiver); + ElementsKind origin_kind = array->GetElementsKind(); + if (IsDictionaryElementsKind(origin_kind)) return false; + if (!array->map()->is_extensible()) return false; + if (args == nullptr) return true; + + // If there may be elements accessors in the prototype chain, the fast path + // cannot be used if there arguments to add to the array. + if (!IsJSArrayFastElementMovingAllowed(isolate, *array)) return false; + + // Adding elements to the array prototype would break code that makes sure + // it has no elements. Handle that elsewhere. + if (isolate->IsAnyInitialArrayPrototype(array)) return false; + + // Need to ensure that the arguments passed in args can be contained in + // the array. + MatchArrayElementsKindToArguments(isolate, array, args, first_arg_index, + num_arguments); + return true; } // If |index| is Undefined, returns init_if_undefined. @@ -189,6 +162,24 @@ V8_WARN_UNUSED_RESULT Maybe GetLengthProperty( return Just(raw_length_number->Number()); } +// Set "length" property, has "fast-path" for JSArrays. +// Returns Nothing if something went wrong. +V8_WARN_UNUSED_RESULT MaybeHandle SetLengthProperty( + Isolate* isolate, Handle receiver, double length) { + if (receiver->IsJSArray()) { + Handle array = Handle::cast(receiver); + if (!JSArray::HasReadOnlyLength(array)) { + DCHECK_LE(length, kMaxUInt32); + JSArray::SetLength(array, static_cast(length)); + return receiver; + } + } + + return Object::SetProperty( + isolate, receiver, isolate->factory()->length_string(), + isolate->factory()->NewNumber(length), LanguageMode::kStrict); +} + V8_WARN_UNUSED_RESULT Object* GenericArrayFill(Isolate* isolate, Handle receiver, Handle value, @@ -350,7 +341,7 @@ V8_WARN_UNUSED_RESULT Object* GenericArrayPush(Isolate* isolate, // Must succeed since we always pass a valid key. DCHECK(success); MAYBE_RETURN(Object::SetProperty(&it, element, LanguageMode::kStrict, - Object::MAY_BE_STORE_FROM_KEYED), + StoreOrigin::kMaybeKeyed), ReadOnlyRoots(isolate).exception()); } @@ -485,110 +476,141 @@ BUILTIN(ArrayPop) { return *result; } -BUILTIN(ArrayShift) { - HandleScope scope(isolate); - Heap* heap = isolate->heap(); - Handle receiver = args.receiver(); +namespace { + +// Returns true, iff we can use ElementsAccessor for shifting. +V8_WARN_UNUSED_RESULT bool CanUseFastArrayShift(Isolate* isolate, + Handle receiver) { if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, nullptr, 0, 0) || !IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) { - return CallJsIntrinsic(isolate, isolate->array_shift(), args); + return false; } + Handle array = Handle::cast(receiver); + return !JSArray::HasReadOnlyLength(array); +} - int len = Smi::ToInt(array->length()); - if (len == 0) return ReadOnlyRoots(heap).undefined_value(); +V8_WARN_UNUSED_RESULT Object* GenericArrayShift(Isolate* isolate, + Handle receiver, + double length) { + // 4. Let first be ? Get(O, "0"). + Handle first; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, first, + Object::GetElement(isolate, receiver, 0)); + + // 5. Let k be 1. + double k = 1; + + // 6. Repeat, while k < len. + while (k < length) { + // a. Let from be ! ToString(k). + Handle from = + isolate->factory()->NumberToString(isolate->factory()->NewNumber(k)); + + // b. Let to be ! ToString(k-1). + Handle to = isolate->factory()->NumberToString( + isolate->factory()->NewNumber(k - 1)); + + // c. Let fromPresent be ? HasProperty(O, from). + bool from_present; + MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, from_present, JSReceiver::HasProperty(receiver, from)); + + // d. If fromPresent is true, then. + if (from_present) { + // i. Let fromVal be ? Get(O, from). + Handle from_val; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, from_val, + Object::GetPropertyOrElement(isolate, receiver, from)); + + // ii. Perform ? Set(O, to, fromVal, true). + RETURN_FAILURE_ON_EXCEPTION( + isolate, Object::SetPropertyOrElement(isolate, receiver, to, from_val, + LanguageMode::kStrict)); + } else { // e. Else fromPresent is false, + // i. Perform ? DeletePropertyOrThrow(O, to). + MAYBE_RETURN(JSReceiver::DeletePropertyOrElement(receiver, to, + LanguageMode::kStrict), + ReadOnlyRoots(isolate).exception()); + } - if (JSArray::HasReadOnlyLength(array)) { - return CallJsIntrinsic(isolate, isolate->array_shift(), args); + // f. Increase k by 1. + ++k; } - Handle first = array->GetElementsAccessor()->Shift(array); + // 7. Perform ? DeletePropertyOrThrow(O, ! ToString(len-1)). + Handle new_length = isolate->factory()->NumberToString( + isolate->factory()->NewNumber(length - 1)); + MAYBE_RETURN(JSReceiver::DeletePropertyOrElement(receiver, new_length, + LanguageMode::kStrict), + ReadOnlyRoots(isolate).exception()); + + // 8. Perform ? Set(O, "length", len-1, true). + RETURN_FAILURE_ON_EXCEPTION(isolate, + SetLengthProperty(isolate, receiver, length - 1)); + + // 9. Return first. return *first; } +} // namespace -BUILTIN(ArrayUnshift) { +BUILTIN(ArrayShift) { HandleScope scope(isolate); - Handle receiver = args.receiver(); - if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1, - args.length() - 1)) { - return CallJsIntrinsic(isolate, isolate->array_unshift(), args); - } - Handle array = Handle::cast(receiver); - int to_add = args.length() - 1; - if (to_add == 0) return array->length(); - // Currently fixed arrays cannot grow too big, so we should never hit this. - DCHECK_LE(to_add, Smi::kMaxValue - Smi::ToInt(array->length())); + // 1. Let O be ? ToObject(this value). + Handle receiver; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, receiver, Object::ToObject(isolate, args.receiver())); - if (JSArray::HasReadOnlyLength(array)) { - return CallJsIntrinsic(isolate, isolate->array_unshift(), args); + // 2. Let len be ? ToLength(? Get(O, "length")). + double length; + MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, length, GetLengthProperty(isolate, receiver)); + + // 3. If len is zero, then. + if (length == 0) { + // a. Perform ? Set(O, "length", 0, true). + RETURN_FAILURE_ON_EXCEPTION(isolate, + SetLengthProperty(isolate, receiver, length)); + + // b. Return undefined. + return ReadOnlyRoots(isolate).undefined_value(); } - ElementsAccessor* accessor = array->GetElementsAccessor(); - int new_length = accessor->Unshift(array, &args, to_add); - return Smi::FromInt(new_length); + if (CanUseFastArrayShift(isolate, receiver)) { + Handle array = Handle::cast(receiver); + return *array->GetElementsAccessor()->Shift(array); + } + + return GenericArrayShift(isolate, receiver, length); } -BUILTIN(ArraySplice) { +BUILTIN(ArrayUnshift) { HandleScope scope(isolate); - Handle receiver = args.receiver(); - if (V8_UNLIKELY( - !EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3, - args.length() - 3) || - // If this is a subclass of Array, then call out to JS. - !Handle::cast(receiver)->HasArrayPrototype(isolate) || - // If anything with @@species has been messed with, call out to JS. - !isolate->IsArraySpeciesLookupChainIntact())) { - return CallJsIntrinsic(isolate, isolate->array_splice(), args); - } - Handle array = Handle::cast(receiver); + DCHECK(args.receiver()->IsJSArray()); + Handle array = Handle::cast(args.receiver()); - int argument_count = args.length() - 1; - int relative_start = 0; - if (argument_count > 0) { - DisallowHeapAllocation no_gc; - if (!ClampedToInteger(isolate, args[1], &relative_start)) { - AllowHeapAllocation allow_allocation; - return CallJsIntrinsic(isolate, isolate->array_splice(), args); - } - } - int len = Smi::ToInt(array->length()); - // clip relative start to [0, len] - int actual_start = (relative_start < 0) ? Max(len + relative_start, 0) - : Min(relative_start, len); - - int actual_delete_count; - if (argument_count == 1) { - // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is - // given as a request to delete all the elements from the start. - // And it differs from the case of undefined delete count. - // This does not follow ECMA-262, but we do the same for compatibility. - DCHECK_GE(len - actual_start, 0); - actual_delete_count = len - actual_start; - } else { - int delete_count = 0; - DisallowHeapAllocation no_gc; - if (argument_count > 1) { - if (!ClampedToInteger(isolate, args[2], &delete_count)) { - AllowHeapAllocation allow_allocation; - return CallJsIntrinsic(isolate, isolate->array_splice(), args); - } - } - actual_delete_count = Min(Max(delete_count, 0), len - actual_start); - } + // These are checked in the Torque builtin. + DCHECK(array->map()->is_extensible()); + DCHECK(!IsDictionaryElementsKind(array->GetElementsKind())); + DCHECK(IsJSArrayFastElementMovingAllowed(isolate, *array)); + DCHECK(!isolate->IsAnyInitialArrayPrototype(array)); - int add_count = (argument_count > 1) ? (argument_count - 2) : 0; - int new_length = len - actual_delete_count + add_count; + MatchArrayElementsKindToArguments(isolate, array, &args, 1, + args.length() - 1); + + int to_add = args.length() - 1; + if (to_add == 0) return array->length(); + + // Currently fixed arrays cannot grow too big, so we should never hit this. + DCHECK_LE(to_add, Smi::kMaxValue - Smi::ToInt(array->length())); + DCHECK(!JSArray::HasReadOnlyLength(array)); - if (new_length != len && JSArray::HasReadOnlyLength(array)) { - AllowHeapAllocation allow_allocation; - return CallJsIntrinsic(isolate, isolate->array_splice(), args); - } ElementsAccessor* accessor = array->GetElementsAccessor(); - Handle result_array = accessor->Splice( - array, actual_start, actual_delete_count, &args, add_count); - return *result_array; + int new_length = accessor->Unshift(array, &args, to_add); + return Smi::FromInt(new_length); } // Array Concat ------------------------------------------------------------- diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc index 808c34e43b9419..a4de98eb97b011 100644 --- a/deps/v8/src/builtins/builtins-arraybuffer.cc +++ b/deps/v8/src/builtins/builtins-arraybuffer.cc @@ -31,10 +31,12 @@ Object* ConstructBuffer(Isolate* isolate, Handle target, Handle new_target, Handle length, bool initialize) { Handle result; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, - JSObject::New(target, new_target)); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + JSObject::New(target, new_target, Handle::null())); size_t byte_length; - if (!TryNumberToSize(*length, &byte_length)) { + if (!TryNumberToSize(*length, &byte_length) || + byte_length > JSArrayBuffer::kMaxByteLength) { THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength)); } @@ -98,7 +100,7 @@ BUILTIN(ArrayBufferPrototypeGetByteLength) { CHECK_SHARED(false, array_buffer, kMethodName); // TODO(franzih): According to the ES6 spec, we should throw a TypeError // here if the JSArrayBuffer is detached. - return array_buffer->byte_length(); + return *isolate->factory()->NewNumberFromSize(array_buffer->byte_length()); } // ES7 sharedmem 6.3.4.1 get SharedArrayBuffer.prototype.byteLength @@ -108,7 +110,7 @@ BUILTIN(SharedArrayBufferPrototypeGetByteLength) { CHECK_RECEIVER(JSArrayBuffer, array_buffer, "get SharedArrayBuffer.prototype.byteLength"); CHECK_SHARED(true, array_buffer, kMethodName); - return array_buffer->byte_length(); + return *isolate->factory()->NewNumberFromSize(array_buffer->byte_length()); } // ES6 section 24.1.3.1 ArrayBuffer.isView ( arg ) @@ -143,7 +145,7 @@ static Object* SliceHelper(BuiltinArguments args, Isolate* isolate, // * [AB] Let len be O.[[ArrayBufferByteLength]]. // * [SAB] Let len be O.[[ArrayBufferByteLength]]. - double const len = array_buffer->byte_length()->Number(); + double const len = array_buffer->byte_length(); // * Let relativeStart be ? ToInteger(start). Handle relative_start; @@ -242,7 +244,7 @@ static Object* SliceHelper(BuiltinArguments args, Isolate* isolate, } // * If new.[[ArrayBufferByteLength]] < newLen, throw a TypeError exception. - if (new_array_buffer->byte_length()->Number() < new_len) { + if (new_array_buffer->byte_length() < new_len) { THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewTypeError(is_shared ? MessageTemplate::kSharedArrayBufferTooShort @@ -264,10 +266,10 @@ static Object* SliceHelper(BuiltinArguments args, Isolate* isolate, size_t first_size = 0, new_len_size = 0; CHECK(TryNumberToSize(*first_obj, &first_size)); CHECK(TryNumberToSize(*new_len_obj, &new_len_size)); - DCHECK(NumberToSize(new_array_buffer->byte_length()) >= new_len_size); + DCHECK(new_array_buffer->byte_length() >= new_len_size); if (new_len_size != 0) { - size_t from_byte_length = NumberToSize(array_buffer->byte_length()); + size_t from_byte_length = array_buffer->byte_length(); USE(from_byte_length); DCHECK(first_size <= from_byte_length); DCHECK(from_byte_length - first_size >= new_len_size); diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc index 4568507a9f37d1..a0a0878823f979 100644 --- a/deps/v8/src/builtins/builtins-async-gen.cc +++ b/deps/v8/src/builtins/builtins-async-gen.cc @@ -275,11 +275,11 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context, STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize); StoreMapNoWriteBarrier(function, function_map); StoreObjectFieldRoot(function, JSObject::kPropertiesOrHashOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldRoot(function, JSObject::kElementsOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldRoot(function, JSFunction::kFeedbackCellOffset, - Heap::kManyClosuresCellRootIndex); + RootIndex::kManyClosuresCell); Node* shared_info = LoadContextElement(native_context, context_index); CSA_ASSERT(this, IsSharedFunctionInfo(shared_info)); diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc index bbb25716912036..97471e038b49ed 100644 --- a/deps/v8/src/builtins/builtins-async-generator-gen.cc +++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc @@ -207,7 +207,7 @@ Node* AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest( Node* promise) { CSA_SLOW_ASSERT(this, HasInstanceType(promise, JS_PROMISE_TYPE)); Node* request = Allocate(AsyncGeneratorRequest::kSize); - StoreMapNoWriteBarrier(request, Heap::kAsyncGeneratorRequestMapRootIndex); + StoreMapNoWriteBarrier(request, RootIndex::kAsyncGeneratorRequestMap); StoreObjectFieldNoWriteBarrier(request, AsyncGeneratorRequest::kNextOffset, UndefinedConstant()); StoreObjectFieldNoWriteBarrier(request, @@ -218,7 +218,7 @@ Node* AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest( StoreObjectFieldNoWriteBarrier(request, AsyncGeneratorRequest::kPromiseOffset, promise); StoreObjectFieldRoot(request, AsyncGeneratorRequest::kNextOffset, - Heap::kUndefinedValueRootIndex); + RootIndex::kUndefinedValue); return request; } @@ -519,9 +519,9 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) { Context::ITERATOR_RESULT_MAP_INDEX); StoreMapNoWriteBarrier(iter_result, map); StoreObjectFieldRoot(iter_result, JSIteratorResult::kPropertiesOrHashOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldRoot(iter_result, JSIteratorResult::kElementsOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldNoWriteBarrier(iter_result, JSIteratorResult::kValueOffset, value); StoreObjectFieldNoWriteBarrier(iter_result, JSIteratorResult::kDoneOffset, diff --git a/deps/v8/src/builtins/builtins-boolean.cc b/deps/v8/src/builtins/builtins-boolean.cc index 52645cbaa02b92..52ed0563c651ae 100644 --- a/deps/v8/src/builtins/builtins-boolean.cc +++ b/deps/v8/src/builtins/builtins-boolean.cc @@ -26,8 +26,9 @@ BUILTIN(BooleanConstructor) { Handle new_target = Handle::cast(args.new_target()); DCHECK(*target == target->native_context()->boolean_function()); Handle result; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, - JSObject::New(target, new_target)); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + JSObject::New(target, new_target, Handle::null())); Handle::cast(result)->set_value( isolate->heap()->ToBoolean(value->BooleanValue(isolate))); return *result; diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc index 4defe28cb7c4ac..b51ad5c50a3242 100644 --- a/deps/v8/src/builtins/builtins-call-gen.cc +++ b/deps/v8/src/builtins/builtins-call-gen.cc @@ -17,45 +17,75 @@ namespace internal { void Builtins::Generate_CallFunction_ReceiverIsNullOrUndefined( MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined); } void Builtins::Generate_CallFunction_ReceiverIsNotNullOrUndefined( MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined); } void Builtins::Generate_CallFunction_ReceiverIsAny(MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif Generate_CallFunction(masm, ConvertReceiverMode::kAny); } void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif Generate_CallBoundFunctionImpl(masm); } void Builtins::Generate_Call_ReceiverIsNullOrUndefined(MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined); } void Builtins::Generate_Call_ReceiverIsNotNullOrUndefined( MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined); } void Builtins::Generate_Call_ReceiverIsAny(MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif Generate_Call(masm, ConvertReceiverMode::kAny); } void Builtins::Generate_CallVarargs(MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif Generate_CallOrConstructVarargs(masm, masm->isolate()->builtins()->Call()); } void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif Generate_CallOrConstructForwardVarargs(masm, CallOrConstructMode::kCall, masm->isolate()->builtins()->Call()); } void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif Generate_CallOrConstructForwardVarargs( masm, CallOrConstructMode::kCall, masm->isolate()->builtins()->CallFunction()); @@ -211,11 +241,11 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike( { if (new_target == nullptr) { Callable callable = CodeFactory::CallVarargs(isolate()); - TailCallStub(callable, context, target, args_count, elements, length); + TailCallStub(callable, context, target, args_count, length, elements); } else { Callable callable = CodeFactory::ConstructVarargs(isolate()); - TailCallStub(callable, context, target, new_target, args_count, - elements, length); + TailCallStub(callable, context, target, new_target, args_count, length, + elements); } } @@ -266,11 +296,11 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructDoubleVarargs( { if (new_target == nullptr) { Callable callable = CodeFactory::CallVarargs(isolate()); - TailCallStub(callable, context, target, args_count, new_elements, length); + TailCallStub(callable, context, target, args_count, length, new_elements); } else { Callable callable = CodeFactory::ConstructVarargs(isolate()); - TailCallStub(callable, context, target, new_target, args_count, - new_elements, length); + TailCallStub(callable, context, target, new_target, args_count, length, + new_elements); } } } @@ -299,7 +329,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread( // Check that the Array.prototype hasn't been modified in a way that would // affect iteration. TNode protector_cell = - CAST(LoadRoot(Heap::kArrayIteratorProtectorRootIndex)); + CAST(LoadRoot(RootIndex::kArrayIteratorProtector)); GotoIf(WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset), SmiConstant(Isolate::kProtectorInvalid)), &if_generic); @@ -325,8 +355,9 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread( TNode iterator_fn = GetProperty(context, spread, IteratorSymbolConstant()); GotoIfNot(TaggedIsCallable(iterator_fn), &if_iterator_fn_not_callable); - TNode list = CAST( - CallBuiltin(Builtins::kIterableToList, context, spread, iterator_fn)); + TNode list = + CAST(CallBuiltin(Builtins::kIterableToListMayPreserveHoles, context, + spread, iterator_fn)); var_length = LoadAndUntagToWord32ObjectField(list, JSArray::kLengthOffset); var_elements = LoadElements(list); @@ -346,11 +377,11 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread( if (new_target == nullptr) { Callable callable = CodeFactory::CallVarargs(isolate()); - TailCallStub(callable, context, target, args_count, elements, length); + TailCallStub(callable, context, target, args_count, length, elements); } else { Callable callable = CodeFactory::ConstructVarargs(isolate()); - TailCallStub(callable, context, target, new_target, args_count, elements, - length); + TailCallStub(callable, context, target, new_target, args_count, length, + elements); } } diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc index 5808d2a98caacd..33aecc0c453f5d 100644 --- a/deps/v8/src/builtins/builtins-collections-gen.cc +++ b/deps/v8/src/builtins/builtins-collections-gen.cc @@ -24,7 +24,7 @@ class BaseCollectionsAssembler : public CodeStubAssembler { explicit BaseCollectionsAssembler(compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} - virtual ~BaseCollectionsAssembler() {} + virtual ~BaseCollectionsAssembler() = default; protected: enum Variant { kMap, kSet, kWeakMap, kWeakSet }; @@ -628,7 +628,7 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler { Node* AllocateJSCollectionIterator(Node* context, int map_index, Node* collection); TNode AllocateTable(Variant variant, TNode context, - TNode at_least_space_for); + TNode at_least_space_for) override; Node* GetHash(Node* const key); Node* CallGetHashRaw(Node* const key); Node* CallGetOrCreateHashRaw(Node* const key); @@ -689,7 +689,7 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler { Node* key_tagged, Variable* result, Label* entry_found, Label* not_found); - Node* ComputeIntegerHashForString(Node* context, Node* string_key); + Node* ComputeStringHash(Node* context, Node* string_key); void SameValueZeroString(Node* context, Node* key_string, Node* candidate_key, Label* if_same, Label* if_not_same); @@ -731,9 +731,9 @@ Node* CollectionsBuiltinsAssembler::AllocateJSCollectionIterator( Node* const iterator = AllocateInNewSpace(IteratorType::kSize); StoreMapNoWriteBarrier(iterator, iterator_map); StoreObjectFieldRoot(iterator, IteratorType::kPropertiesOrHashOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldRoot(iterator, IteratorType::kElementsOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldNoWriteBarrier(iterator, IteratorType::kTableOffset, table); StoreObjectFieldNoWriteBarrier(iterator, IteratorType::kIndexOffset, SmiConstant(0)); @@ -846,8 +846,7 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForSmiKey( Node* table, Node* smi_key, Variable* result, Label* entry_found, Label* not_found) { Node* const key_untagged = SmiUntag(smi_key); - Node* const hash = - ChangeInt32ToIntPtr(ComputeIntegerHash(key_untagged, Int32Constant(0))); + Node* const hash = ChangeInt32ToIntPtr(ComputeUnseededHash(key_untagged)); CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0))); result->Bind(hash); FindOrderedHashTableEntry( @@ -862,7 +861,7 @@ template void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForStringKey( Node* context, Node* table, Node* key_tagged, Variable* result, Label* entry_found, Label* not_found) { - Node* const hash = ComputeIntegerHashForString(context, key_tagged); + Node* const hash = ComputeStringHash(context, key_tagged); CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0))); result->Bind(hash); FindOrderedHashTableEntry( @@ -920,8 +919,8 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForOtherKey( result, entry_found, not_found); } -Node* CollectionsBuiltinsAssembler::ComputeIntegerHashForString( - Node* context, Node* string_key) { +Node* CollectionsBuiltinsAssembler::ComputeStringHash(Node* context, + Node* string_key) { VARIABLE(var_result, MachineType::PointerRepresentation()); Label hash_not_computed(this), done(this, &var_result); @@ -1700,7 +1699,7 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) { BIND(&return_end); { StoreObjectFieldRoot(receiver, JSMapIterator::kTableOffset, - Heap::kEmptyOrderedHashMapRootIndex); + RootIndex::kEmptyOrderedHashMap); Goto(&return_value); } } @@ -1908,7 +1907,7 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) { BIND(&return_end); { StoreObjectFieldRoot(receiver, JSSetIterator::kTableOffset, - Heap::kEmptyOrderedHashSetRootIndex); + RootIndex::kEmptyOrderedHashSet); Goto(&return_value); } } @@ -1987,7 +1986,7 @@ class WeakCollectionsBuiltinsAssembler : public BaseCollectionsAssembler { TNode number_of_elements); TNode AllocateTable(Variant variant, TNode context, - TNode at_least_space_for); + TNode at_least_space_for) override; // Generates and sets the identity for a JSRececiver. TNode CreateIdentityHash(TNode receiver); @@ -2063,8 +2062,8 @@ TNode WeakCollectionsBuiltinsAssembler::AllocateTable( TNode table = CAST( AllocateFixedArray(HOLEY_ELEMENTS, length, kAllowLargeObjectAllocation)); - Heap::RootListIndex map_root_index = static_cast( - EphemeronHashTableShape::GetMapRootIndex()); + RootIndex map_root_index = + static_cast(EphemeronHashTableShape::GetMapRootIndex()); StoreMapNoWriteBarrier(table, map_root_index); StoreFixedArrayElement(table, EphemeronHashTable::kNumberOfElementsIndex, SmiConstant(0), SKIP_WRITE_BARRIER); @@ -2076,7 +2075,7 @@ TNode WeakCollectionsBuiltinsAssembler::AllocateTable( TNode start = KeyIndexFromEntry(IntPtrConstant(0)); FillFixedArrayWithValue(HOLEY_ELEMENTS, table, start, length, - Heap::kUndefinedValueRootIndex); + RootIndex::kUndefinedValue); return table; } diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc index 8e54c4c36961a7..26c97ba6813f69 100644 --- a/deps/v8/src/builtins/builtins-constructor-gen.cc +++ b/deps/v8/src/builtins/builtins-constructor-gen.cc @@ -19,17 +19,26 @@ namespace v8 { namespace internal { void Builtins::Generate_ConstructVarargs(MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif Generate_CallOrConstructVarargs(masm, BUILTIN_CODE(masm->isolate(), Construct)); } void Builtins::Generate_ConstructForwardVarargs(MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif Generate_CallOrConstructForwardVarargs( masm, CallOrConstructMode::kConstruct, BUILTIN_CODE(masm->isolate(), Construct)); } void Builtins::Generate_ConstructFunctionForwardVarargs(MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif Generate_CallOrConstructForwardVarargs( masm, CallOrConstructMode::kConstruct, BUILTIN_CODE(masm->isolate(), ConstructFunction)); @@ -77,11 +86,11 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) { Goto(&cell_done); BIND(&no_closures); - StoreMapNoWriteBarrier(feedback_cell, Heap::kOneClosureCellMapRootIndex); + StoreMapNoWriteBarrier(feedback_cell, RootIndex::kOneClosureCellMap); Goto(&cell_done); BIND(&one_closure); - StoreMapNoWriteBarrier(feedback_cell, Heap::kManyClosuresCellMapRootIndex); + StoreMapNoWriteBarrier(feedback_cell, RootIndex::kManyClosuresCellMap); Goto(&cell_done); BIND(&cell_done); @@ -116,9 +125,9 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) { // Initialize the rest of the function. StoreObjectFieldRoot(result, JSObject::kPropertiesOrHashOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldRoot(result, JSObject::kElementsOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); { // Set function prototype if necessary. Label done(this), init_prototype(this); @@ -127,7 +136,7 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) { BIND(&init_prototype); StoreObjectFieldRoot(result, JSFunction::kPrototypeOrInitialMapOffset, - Heap::kTheHoleValueRootIndex); + RootIndex::kTheHoleValue); Goto(&done); BIND(&done); } @@ -236,13 +245,13 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext( TNode function_context = UncheckedCast(AllocateInNewSpace(size)); - Heap::RootListIndex context_type; + RootIndex context_type; switch (scope_type) { case EVAL_SCOPE: - context_type = Heap::kEvalContextMapRootIndex; + context_type = RootIndex::kEvalContextMap; break; case FUNCTION_SCOPE: - context_type = Heap::kFunctionContextMapRootIndex; + context_type = RootIndex::kFunctionContextMap; break; default: UNREACHABLE(); diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc index 7bdc2759c4ed5f..b8980566583391 100644 --- a/deps/v8/src/builtins/builtins-conversion-gen.cc +++ b/deps/v8/src/builtins/builtins-conversion-gen.cc @@ -48,10 +48,8 @@ void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive( if_resultisnotprimitive(this, Label::kDeferred); GotoIf(TaggedIsSmi(result), &if_resultisprimitive); Node* result_instance_type = LoadInstanceType(result); - STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE); - Branch(Int32LessThanOrEqual(result_instance_type, - Int32Constant(LAST_PRIMITIVE_TYPE)), - &if_resultisprimitive, &if_resultisnotprimitive); + Branch(IsPrimitiveInstanceType(result_instance_type), &if_resultisprimitive, + &if_resultisnotprimitive); BIND(&if_resultisprimitive); { @@ -108,7 +106,62 @@ TF_BUILTIN(ToName, CodeStubAssembler) { Node* context = Parameter(Descriptor::kContext); Node* input = Parameter(Descriptor::kArgument); - Return(ToName(context, input)); + VARIABLE(var_input, MachineRepresentation::kTagged, input); + Label loop(this, &var_input); + Goto(&loop); + BIND(&loop); + { + // Load the current {input} value. + Node* input = var_input.value(); + + // Dispatch based on the type of the {input.} + Label if_inputisbigint(this), if_inputisname(this), if_inputisnumber(this), + if_inputisoddball(this), if_inputisreceiver(this, Label::kDeferred); + GotoIf(TaggedIsSmi(input), &if_inputisnumber); + Node* input_instance_type = LoadInstanceType(input); + STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE); + GotoIf(IsNameInstanceType(input_instance_type), &if_inputisname); + GotoIf(IsJSReceiverInstanceType(input_instance_type), &if_inputisreceiver); + GotoIf(IsHeapNumberInstanceType(input_instance_type), &if_inputisnumber); + Branch(IsBigIntInstanceType(input_instance_type), &if_inputisbigint, + &if_inputisoddball); + + BIND(&if_inputisbigint); + { + // We don't have a fast-path for BigInt currently, so just + // tail call to the %ToString runtime function here for now. + TailCallRuntime(Runtime::kToString, context, input); + } + + BIND(&if_inputisname); + { + // The {input} is already a Name. + Return(input); + } + + BIND(&if_inputisnumber); + { + // Convert the String {input} to a Number. + TailCallBuiltin(Builtins::kNumberToString, context, input); + } + + BIND(&if_inputisoddball); + { + // Just return the {input}'s string representation. + CSA_ASSERT(this, IsOddballInstanceType(input_instance_type)); + Return(LoadObjectField(input, Oddball::kToStringOffset)); + } + + BIND(&if_inputisreceiver); + { + // Convert the JSReceiver {input} to a primitive first, + // and then run the loop again with the new {input}, + // which is then a primitive value. + var_input.Bind(CallBuiltin(Builtins::kNonPrimitiveToPrimitive_String, + context, input)); + Goto(&loop); + } + } } TF_BUILTIN(NonNumberToNumber, CodeStubAssembler) { @@ -205,10 +258,7 @@ void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive( // Return the {result} if it is a primitive. GotoIf(TaggedIsSmi(result), &return_result); Node* result_instance_type = LoadInstanceType(result); - STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE); - GotoIf(Int32LessThanOrEqual(result_instance_type, - Int32Constant(LAST_PRIMITIVE_TYPE)), - &return_result); + GotoIf(IsPrimitiveInstanceType(result_instance_type), &return_result); } // Just continue with the next {name} if the {method} is not callable. @@ -384,9 +434,9 @@ TF_BUILTIN(ToObject, CodeStubAssembler) { Node* js_value = Allocate(JSValue::kSize); StoreMapNoWriteBarrier(js_value, initial_map); StoreObjectFieldRoot(js_value, JSValue::kPropertiesOrHashOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldRoot(js_value, JSObject::kElementsOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectField(js_value, JSValue::kValueOffset, object); Return(js_value); diff --git a/deps/v8/src/builtins/builtins-data-view-gen.h b/deps/v8/src/builtins/builtins-data-view-gen.h index 6c755c4d08b319..4a55a90eef3eec 100644 --- a/deps/v8/src/builtins/builtins-data-view-gen.h +++ b/deps/v8/src/builtins/builtins-data-view-gen.h @@ -17,25 +17,17 @@ class DataViewBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler { explicit DataViewBuiltinsAssembler(compiler::CodeAssemblerState* state) : BaseBuiltinsFromDSLAssembler(state) {} - TNode LoadDataViewByteOffset(TNode data_view) { - return CAST(LoadObjectField(data_view, JSDataView::kByteOffsetOffset)); - } - - TNode LoadDataViewByteLength(TNode data_view) { - return CAST(LoadObjectField(data_view, JSDataView::kByteLengthOffset)); - } - - TNode LoadUint8(TNode data_pointer, TNode offset) { + TNode LoadUint8(TNode data_pointer, TNode offset) { return UncheckedCast( Load(MachineType::Uint8(), data_pointer, offset)); } - TNode LoadInt8(TNode data_pointer, TNode offset) { + TNode LoadInt8(TNode data_pointer, TNode offset) { return UncheckedCast( Load(MachineType::Int8(), data_pointer, offset)); } - void StoreWord8(TNode data_pointer, TNode offset, + void StoreWord8(TNode data_pointer, TNode offset, TNode value) { StoreNoWriteBarrier(MachineRepresentation::kWord8, data_pointer, offset, value); diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc index 72ea68598257d2..f40cd0f68e2f6f 100644 --- a/deps/v8/src/builtins/builtins-dataview.cc +++ b/deps/v8/src/builtins/builtins-dataview.cc @@ -43,51 +43,52 @@ BUILTIN(DataViewConstructor) { Handle array_buffer = Handle::cast(buffer); // 4. Let offset be ? ToIndex(byteOffset). - Handle offset; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, offset, + isolate, byte_offset, Object::ToIndex(isolate, byte_offset, MessageTemplate::kInvalidOffset)); + size_t view_byte_offset = byte_offset->Number(); // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception. // We currently violate the specification at this point. TODO: Fix that. // 6. Let bufferByteLength be the value of buffer's // [[ArrayBufferByteLength]] internal slot. - double const buffer_byte_length = array_buffer->byte_length()->Number(); + size_t const buffer_byte_length = array_buffer->byte_length(); // 7. If offset > bufferByteLength, throw a RangeError exception. - if (offset->Number() > buffer_byte_length) { + if (view_byte_offset > buffer_byte_length) { THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewRangeError(MessageTemplate::kInvalidOffset, offset)); + isolate, NewRangeError(MessageTemplate::kInvalidOffset, byte_offset)); } - Handle view_byte_length; + size_t view_byte_length; if (byte_length->IsUndefined(isolate)) { // 8. If byteLength is either not present or undefined, then // a. Let viewByteLength be bufferByteLength - offset. - view_byte_length = - isolate->factory()->NewNumber(buffer_byte_length - offset->Number()); + view_byte_length = buffer_byte_length - view_byte_offset; } else { // 9. Else, // a. Let viewByteLength be ? ToIndex(byteLength). // b. If offset+viewByteLength > bufferByteLength, throw a // RangeError exception. ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, view_byte_length, + isolate, byte_length, Object::ToIndex(isolate, byte_length, MessageTemplate::kInvalidDataViewLength)); - if (offset->Number() + view_byte_length->Number() > buffer_byte_length) { + if (view_byte_offset + byte_length->Number() > buffer_byte_length) { THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewRangeError(MessageTemplate::kInvalidDataViewLength)); } + view_byte_length = byte_length->Number(); } // 10. Let O be ? OrdinaryCreateFromConstructor(NewTarget, // "%DataViewPrototype%", «[[DataView]], [[ViewedArrayBuffer]], // [[ByteLength]], [[ByteOffset]]»). Handle result; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, - JSObject::New(target, new_target)); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + JSObject::New(target, new_target, Handle::null())); for (int i = 0; i < ArrayBufferView::kEmbedderFieldCount; ++i) { Handle::cast(result)->SetEmbedderField(i, Smi::kZero); } @@ -96,10 +97,10 @@ BUILTIN(DataViewConstructor) { Handle::cast(result)->set_buffer(*array_buffer); // 12. Set O's [[ByteLength]] internal slot to viewByteLength. - Handle::cast(result)->set_byte_length(*view_byte_length); + Handle::cast(result)->set_byte_length(view_byte_length); // 13. Set O's [[ByteOffset]] internal slot to offset. - Handle::cast(result)->set_byte_offset(*offset); + Handle::cast(result)->set_byte_offset(view_byte_offset); // 14. Return O. return *result; diff --git a/deps/v8/src/builtins/builtins-date-gen.cc b/deps/v8/src/builtins/builtins-date-gen.cc index 4d3c7faa530211..e0cb199920bfb8 100644 --- a/deps/v8/src/builtins/builtins-date-gen.cc +++ b/deps/v8/src/builtins/builtins-date-gen.cc @@ -193,11 +193,11 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) { hint_is_invalid(this, Label::kDeferred); // Fast cases for internalized strings. - Node* number_string = LoadRoot(Heap::knumber_stringRootIndex); + Node* number_string = LoadRoot(RootIndex::knumber_string); GotoIf(WordEqual(hint, number_string), &hint_is_number); - Node* default_string = LoadRoot(Heap::kdefault_stringRootIndex); + Node* default_string = LoadRoot(RootIndex::kdefault_string); GotoIf(WordEqual(hint, default_string), &hint_is_string); - Node* string_string = LoadRoot(Heap::kstring_stringRootIndex); + Node* string_string = LoadRoot(RootIndex::kstring_string); GotoIf(WordEqual(hint, string_string), &hint_is_string); // Slow-case with actual string comparisons. diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc index 569a5807e22c2a..e59daeab2cd570 100644 --- a/deps/v8/src/builtins/builtins-date.cc +++ b/deps/v8/src/builtins/builtins-date.cc @@ -10,6 +10,10 @@ #include "src/counters.h" #include "src/dateparser-inl.h" #include "src/objects-inl.h" +#ifdef V8_INTL_SUPPORT +#include "src/objects/intl-objects.h" +#include "src/objects/js-date-time-format.h" +#endif namespace v8 { namespace internal { @@ -835,6 +839,53 @@ BUILTIN(DatePrototypeToTimeString) { isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer))); } +#ifdef V8_INTL_SUPPORT +// ecma402 #sup-date.prototype.tolocaledatestring +BUILTIN(DatePrototypeToLocaleDateString) { + HandleScope scope(isolate); + CHECK_RECEIVER(JSDate, date, "Date.prototype.toLocaleDateString"); + RETURN_RESULT_OR_FAILURE( + isolate, JSDateTimeFormat::ToLocaleDateTime( + isolate, + date, // date + args.atOrUndefined(isolate, 1), // locales + args.atOrUndefined(isolate, 2), // options + JSDateTimeFormat::RequiredOption::kDate, // required + JSDateTimeFormat::DefaultsOption::kDate, // defaults + "dateformatdate")); // service +} + +// ecma402 #sup-date.prototype.tolocalestring +BUILTIN(DatePrototypeToLocaleString) { + HandleScope scope(isolate); + CHECK_RECEIVER(JSDate, date, "Date.prototype.toLocaleString"); + RETURN_RESULT_OR_FAILURE( + isolate, JSDateTimeFormat::ToLocaleDateTime( + isolate, + date, // date + args.atOrUndefined(isolate, 1), // locales + args.atOrUndefined(isolate, 2), // options + JSDateTimeFormat::RequiredOption::kAny, // required + JSDateTimeFormat::DefaultsOption::kAll, // defaults + "dateformatall")); // service +} + +// ecma402 #sup-date.prototype.tolocaletimestring +BUILTIN(DatePrototypeToLocaleTimeString) { + HandleScope scope(isolate); + CHECK_RECEIVER(JSDate, date, "Date.prototype.toLocaleTimeString"); + RETURN_RESULT_OR_FAILURE( + isolate, JSDateTimeFormat::ToLocaleDateTime( + isolate, + date, // date + args.atOrUndefined(isolate, 1), // locales + args.atOrUndefined(isolate, 2), // options + JSDateTimeFormat::RequiredOption::kTime, // required + JSDateTimeFormat::DefaultsOption::kTime, // defaults + "dateformattime")); // service +} +#endif // V8_INTL_SUPPORT + // ES6 section 20.3.4.43 Date.prototype.toUTCString ( ) BUILTIN(DatePrototypeToUTCString) { HandleScope scope(isolate); diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h index 62765b802fd17e..fc53bcdef5caff 100644 --- a/deps/v8/src/builtins/builtins-definitions.h +++ b/deps/v8/src/builtins/builtins-definitions.h @@ -5,7 +5,7 @@ #ifndef V8_BUILTINS_BUILTINS_DEFINITIONS_H_ #define V8_BUILTINS_BUILTINS_DEFINITIONS_H_ -#include "src/interpreter/bytecodes.h" +#include "builtins-generated/bytecodes-builtins-list.h" // include generated header #include "torque-generated/builtin-definitions-from-dsl.h" @@ -26,11 +26,13 @@ namespace internal { // TFH: Handlers in Turbofan, with CodeStub linkage. // Args: name, interface descriptor // BCH: Bytecode Handlers, with bytecode dispatch linkage. -// Args: name +// Args: name, OperandScale, Bytecode +// DLH: Deserialize Lazy Handlers, with bytecode dispatch linkage. +// Args: name, OperandScale // ASM: Builtin in platform-dependent assembly. // Args: name -#define BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM) \ +#define BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, DLH, ASM) \ /* GC write barrirer */ \ TFC(RecordWrite, RecordWrite, 1) \ \ @@ -127,6 +129,10 @@ namespace internal { TFC(CompileLazy, JSTrampoline, 1) \ TFC(CompileLazyDeoptimizedCode, JSTrampoline, 1) \ TFC(DeserializeLazy, JSTrampoline, 1) \ + /* The three lazy bytecode handlers do not declare a bytecode. */ \ + DLH(DeserializeLazyHandler, interpreter::OperandScale::kSingle) \ + DLH(DeserializeLazyWideHandler, interpreter::OperandScale::kDouble) \ + DLH(DeserializeLazyExtraWideHandler, interpreter::OperandScale::kQuadruple) \ ASM(InstantiateAsmJs) \ ASM(NotifyDeoptimized) \ \ @@ -155,8 +161,6 @@ namespace internal { ASM(ContinueToJavaScriptBuiltin) \ ASM(ContinueToJavaScriptBuiltinWithResult) \ \ - ASM(OnStackReplacement) \ - \ /* API callback handling */ \ API(HandleApiCall) \ API(HandleApiCallAsFunction) \ @@ -204,7 +208,6 @@ namespace internal { TFC(ToBooleanLazyDeoptContinuation, TypeConversionStackParameter, 1) \ \ /* Handlers */ \ - TFH(KeyedLoadIC_Megamorphic, LoadWithVector) \ TFH(KeyedLoadIC_PolymorphicName, LoadWithVector) \ TFH(KeyedLoadIC_Slow, LoadWithVector) \ TFH(KeyedStoreIC_Megamorphic, StoreWithVector) \ @@ -319,8 +322,6 @@ namespace internal { TFJ(ArrayPrototypeShift, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ /* ES6 #sec-array.prototype.slice */ \ TFJ(ArrayPrototypeSlice, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ - /* ES6 #sec-array.prototype.splice */ \ - CPP(ArraySplice) \ /* ES6 #sec-array.prototype.unshift */ \ CPP(ArrayUnshift) \ /* Support for Array.from and other array-copying idioms */ \ @@ -629,10 +630,14 @@ namespace internal { \ /* ICs */ \ TFH(LoadIC, LoadWithVector) \ + TFH(LoadIC_Megamorphic, LoadWithVector) \ TFH(LoadIC_Noninlined, LoadWithVector) \ TFH(LoadICTrampoline, Load) \ + TFH(LoadICTrampoline_Megamorphic, Load) \ TFH(KeyedLoadIC, LoadWithVector) \ + TFH(KeyedLoadIC_Megamorphic, LoadWithVector) \ TFH(KeyedLoadICTrampoline, Load) \ + TFH(KeyedLoadICTrampoline_Megamorphic, Load) \ TFH(StoreGlobalIC, StoreGlobalWithVector) \ TFH(StoreGlobalICTrampoline, StoreGlobal) \ TFH(StoreIC, StoreWithVector) \ @@ -645,6 +650,13 @@ namespace internal { TFH(LoadGlobalICTrampoline, LoadGlobal) \ TFH(LoadGlobalICInsideTypeofTrampoline, LoadGlobal) \ TFH(CloneObjectIC, CloneObjectWithVector) \ + TFH(CloneObjectIC_Slow, CloneObjectWithVector) \ + \ + /* IterableToList */ \ + /* ES #sec-iterabletolist */ \ + TFS(IterableToList, kIterable, kIteratorFn) \ + TFS(IterableToListWithSymbolLookup, kIterable) \ + TFS(IterableToListMayPreserveHoles, kIterable, kIteratorFn) \ \ /* Map */ \ TFS(FindOrderedHashMapEntry, kTable, kKey) \ @@ -1013,6 +1025,7 @@ namespace internal { TFJ(AtomicsAnd, 3, kReceiver, kArray, kIndex, kValue) \ TFJ(AtomicsOr, 3, kReceiver, kArray, kIndex, kValue) \ TFJ(AtomicsXor, 3, kReceiver, kArray, kIndex, kValue) \ + CPP(AtomicsNotify) \ CPP(AtomicsIsLockFree) \ CPP(AtomicsWait) \ CPP(AtomicsWake) \ @@ -1129,7 +1142,6 @@ namespace internal { TFJ(SymbolPrototypeValueOf, 0, kReceiver) \ \ /* TypedArray */ \ - TFS(IterableToList, kIterable, kIteratorFn) \ TFS(TypedArrayInitialize, kHolder, kLength, kElementSize, kInitialize, \ kBufferConstructor) \ TFS(TypedArrayInitializeWithBuffer, kHolder, kLength, kBuffer, kElementSize, \ @@ -1208,7 +1220,6 @@ namespace internal { /* Wasm */ \ ASM(WasmCompileLazy) \ TFC(WasmAllocateHeapNumber, AllocateHeapNumber, 1) \ - TFC(WasmArgumentsAdaptor, ArgumentAdaptor, 1) \ TFC(WasmCallJavaScript, CallTrampoline, 1) \ TFC(WasmGrowMemory, WasmGrowMemory, 1) \ TFC(WasmStackGuard, NoContext, 1) \ @@ -1305,10 +1316,9 @@ namespace internal { ASM(CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit) \ \ /* String helpers */ \ - TFS(StringAdd_CheckNone_NotTenured, kLeft, kRight) \ - TFS(StringAdd_CheckNone_Tenured, kLeft, kRight) \ - TFS(StringAdd_ConvertLeft_NotTenured, kLeft, kRight) \ - TFS(StringAdd_ConvertRight_NotTenured, kLeft, kRight) \ + TFS(StringAdd_CheckNone, kLeft, kRight) \ + TFS(StringAdd_ConvertLeft, kLeft, kRight) \ + TFS(StringAdd_ConvertRight, kLeft, kRight) \ TFS(SubString, kString, kFrom, kTo) \ \ /* Miscellaneous */ \ @@ -1328,73 +1338,111 @@ namespace internal { #define BUILTIN_LIST_INTL(CPP, TFJ, TFS) \ /* ecma402 #sec-intl.collator */ \ CPP(CollatorConstructor) \ - TFS(StringToLowerCaseIntl, kString) \ - /* ES #sec-string.prototype.tolowercase */ \ - TFJ(StringPrototypeToLowerCaseIntl, 0, kReceiver) \ - /* ES #sec-string.prototype.touppercase */ \ - CPP(StringPrototypeToUpperCaseIntl) \ - /* ES #sec-string.prototype.normalize */ \ - CPP(StringPrototypeNormalizeIntl) \ - /* ecma402 #sec-intl.numberformat.prototype.formattoparts */ \ - CPP(NumberFormatPrototypeFormatToParts) \ + /* ecma 402 #sec-collator-compare-functions*/ \ + CPP(CollatorInternalCompare) \ + /* ecma402 #sec-intl.collator.prototype.compare */ \ + CPP(CollatorPrototypeCompare) \ + /* ecma402 #sec-intl.collator.supportedlocalesof */ \ + CPP(CollatorSupportedLocalesOf) \ + CPP(CollatorPrototypeResolvedOptions) \ + /* ecma402 #sup-date.prototype.tolocaledatestring */ \ + CPP(DatePrototypeToLocaleDateString) \ + /* ecma402 #sup-date.prototype.tolocalestring */ \ + CPP(DatePrototypeToLocaleString) \ + /* ecma402 #sup-date.prototype.tolocaletimestring */ \ + CPP(DatePrototypeToLocaleTimeString) \ + /* ecma402 #sec-intl.datetimeformat */ \ + CPP(DateTimeFormatConstructor) \ + /* ecma402 #sec-datetime-format-functions */ \ + CPP(DateTimeFormatInternalFormat) \ + /* ecma402 #sec-intl.datetimeformat.prototype.format */ \ + CPP(DateTimeFormatPrototypeFormat) \ /* ecma402 #sec-intl.datetimeformat.prototype.formattoparts */ \ CPP(DateTimeFormatPrototypeFormatToParts) \ - /* ecma402 #new proposal */ \ + /* ecma402 #sec-intl.datetimeformat.supportedlocalesof */ \ + CPP(DateTimeFormatSupportedLocalesOf) \ /* ecma402 #sec-intl-listformat-constructor */ \ CPP(ListFormatConstructor) \ - /* ecma402 #sec-intl.listformat.prototype.resolvedoptions */ \ - CPP(ListFormatPrototypeResolvedOptions) \ /* ecma402 #sec-intl-list-format.prototype.format */ \ TFJ(ListFormatPrototypeFormat, \ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ /* ecma402 #sec-intl-list-format.prototype.formattoparts */ \ TFJ(ListFormatPrototypeFormatToParts, \ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ + /* ecma402 #sec-intl.listformat.prototype.resolvedoptions */ \ + CPP(ListFormatPrototypeResolvedOptions) \ + /* ecma402 #sec-intl.ListFormat.supportedlocalesof */ \ + CPP(ListFormatSupportedLocalesOf) \ /* ecma402 #sec-intl-locale-constructor */ \ CPP(LocaleConstructor) \ - CPP(LocalePrototypeLanguage) \ - CPP(LocalePrototypeScript) \ - CPP(LocalePrototypeRegion) \ CPP(LocalePrototypeBaseName) \ CPP(LocalePrototypeCalendar) \ CPP(LocalePrototypeCaseFirst) \ CPP(LocalePrototypeCollation) \ CPP(LocalePrototypeHourCycle) \ - CPP(LocalePrototypeNumeric) \ - CPP(LocalePrototypeNumberingSystem) \ - CPP(LocalePrototypeToString) \ + CPP(LocalePrototypeLanguage) \ /* ecma402 #sec-Intl.Locale.prototype.maximize */ \ CPP(LocalePrototypeMaximize) \ /* ecma402 #sec-Intl.Locale.prototype.minimize */ \ CPP(LocalePrototypeMinimize) \ + CPP(LocalePrototypeNumeric) \ + CPP(LocalePrototypeNumberingSystem) \ + CPP(LocalePrototypeRegion) \ + CPP(LocalePrototypeScript) \ + CPP(LocalePrototypeToString) \ + /* ecma402 #sec-intl.numberformat */ \ + CPP(NumberFormatConstructor) \ /* ecma402 #sec-number-format-functions */ \ CPP(NumberFormatInternalFormatNumber) \ /* ecma402 #sec-intl.numberformat.prototype.format */ \ CPP(NumberFormatPrototypeFormatNumber) \ - /* ecma402 #sec-datetime-format-functions */ \ - CPP(DateTimeFormatInternalFormat) \ - /* ecma402 #sec-intl.datetimeformat.prototype.format */ \ - CPP(DateTimeFormatPrototypeFormat) \ + /* ecma402 #sec-intl.numberformat.prototype.formattoparts */ \ + CPP(NumberFormatPrototypeFormatToParts) \ + /* ecma402 #sec-intl.numberformat.prototype.resolvedoptions */ \ + CPP(NumberFormatPrototypeResolvedOptions) \ + /* ecma402 #sec-intl.numberformat.supportedlocalesof */ \ + CPP(NumberFormatSupportedLocalesOf) \ /* ecma402 #sec-intl.pluralrules */ \ CPP(PluralRulesConstructor) \ + CPP(PluralRulesPrototypeResolvedOptions) \ + /* ecma402 #sec-intl.pluralrules.prototype.select */ \ + CPP(PluralRulesPrototypeSelect) \ + /* ecma402 #sec-intl.pluralrules.supportedlocalesof */ \ + CPP(PluralRulesSupportedLocalesOf) \ /* ecma402 #sec-intl.RelativeTimeFormat.constructor */ \ CPP(RelativeTimeFormatConstructor) \ - /* ecma402 #sec-intl.RelativeTimeFormat.prototype.resolvedOptions */ \ - CPP(RelativeTimeFormatPrototypeResolvedOptions) \ /* ecma402 #sec-intl.RelativeTimeFormat.prototype.format */ \ CPP(RelativeTimeFormatPrototypeFormat) \ /* ecma402 #sec-intl.RelativeTimeFormat.prototype.formatToParts */ \ CPP(RelativeTimeFormatPrototypeFormatToParts) \ + /* ecma402 #sec-intl.RelativeTimeFormat.prototype.resolvedOptions */ \ + CPP(RelativeTimeFormatPrototypeResolvedOptions) \ + /* ecma402 #sec-intl.RelativeTimeFormat.supportedlocalesof */ \ + CPP(RelativeTimeFormatSupportedLocalesOf) \ + /* ES #sec-string.prototype.normalize */ \ + CPP(StringPrototypeNormalizeIntl) \ /* ecma402 #sup-string.prototype.tolocalelowercase */ \ CPP(StringPrototypeToLocaleLowerCase) \ /* ecma402 #sup-string.prototype.tolocaleuppercase */ \ CPP(StringPrototypeToLocaleUpperCase) \ - /* ecma402 #sec-intl.collator.prototype.compare */ \ - CPP(CollatorPrototypeCompare) \ - /* ecma 402 #sec-collator-compare-functions*/ \ - CPP(CollatorInternalCompare) \ - CPP(BreakIteratorInternalAdoptText) \ - CPP(BreakIteratorPrototypeAdoptText) + /* ES #sec-string.prototype.tolowercase */ \ + TFJ(StringPrototypeToLowerCaseIntl, 0, kReceiver) \ + /* ES #sec-string.prototype.touppercase */ \ + CPP(StringPrototypeToUpperCaseIntl) \ + TFS(StringToLowerCaseIntl, kString) \ + CPP(V8BreakIteratorConstructor) \ + CPP(V8BreakIteratorInternalAdoptText) \ + CPP(V8BreakIteratorInternalBreakType) \ + CPP(V8BreakIteratorInternalCurrent) \ + CPP(V8BreakIteratorInternalFirst) \ + CPP(V8BreakIteratorInternalNext) \ + CPP(V8BreakIteratorPrototypeAdoptText) \ + CPP(V8BreakIteratorPrototypeBreakType) \ + CPP(V8BreakIteratorPrototypeCurrent) \ + CPP(V8BreakIteratorPrototypeFirst) \ + CPP(V8BreakIteratorPrototypeNext) \ + CPP(V8BreakIteratorPrototypeResolvedOptions) \ + CPP(V8BreakIteratorSupportedLocalesOf) #else #define BUILTIN_LIST_INTL(CPP, TFJ, TFS) \ /* no-op fallback version */ \ @@ -1409,16 +1457,10 @@ namespace internal { CPP(StringPrototypeToUpperCase) #endif // V8_INTL_SUPPORT -#ifdef V8_EMBEDDED_BYTECODE_HANDLERS -#define BUILTIN_LIST_BYTECODE_HANDLERS(BCH) BYTECODE_LIST(BCH) -#else -#define BUILTIN_LIST_BYTECODE_HANDLERS(BCH) -#endif // V8_EMBEDDED_BYTECODE_HANDLERS - -#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, BCH, ASM) \ - BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM) \ - BUILTIN_LIST_FROM_DSL(CPP, API, TFJ, TFC, TFS, TFH, ASM) \ - BUILTIN_LIST_INTL(CPP, TFJ, TFS) \ +#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, BCH, DLH, ASM) \ + BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, DLH, ASM) \ + BUILTIN_LIST_FROM_DSL(CPP, API, TFJ, TFC, TFS, TFH, ASM) \ + BUILTIN_LIST_INTL(CPP, TFJ, TFS) \ BUILTIN_LIST_BYTECODE_HANDLERS(BCH) // The exception thrown in the following builtins are caught @@ -1449,7 +1491,6 @@ namespace internal { #define WASM_RUNTIME_STUB_LIST(V, VTRAP) \ FOREACH_WASM_TRAPREASON(VTRAP) \ V(WasmAllocateHeapNumber) \ - V(WasmArgumentsAdaptor) \ V(WasmCallJavaScript) \ V(WasmGrowMemory) \ V(WasmStackGuard) \ @@ -1464,23 +1505,27 @@ namespace internal { #define BUILTIN_LIST_C(V) \ BUILTIN_LIST(V, V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \ - IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN) + IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN) #define BUILTIN_LIST_A(V) \ BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \ - IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V) + IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \ + V) #define BUILTIN_LIST_TFS(V) \ BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \ - V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN) + V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \ + IGNORE_BUILTIN) -#define BUILTIN_LIST_TFJ(V) \ - BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \ - IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN) +#define BUILTIN_LIST_TFJ(V) \ + BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \ + IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \ + IGNORE_BUILTIN) -#define BUILTIN_LIST_TFC(V) \ - BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V, \ - IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN) +#define BUILTIN_LIST_TFC(V) \ + BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V, \ + IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \ + IGNORE_BUILTIN) } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-descriptors.h b/deps/v8/src/builtins/builtins-descriptors.h index 97b85bc2952aff..2961a61f6356a8 100644 --- a/deps/v8/src/builtins/builtins-descriptors.h +++ b/deps/v8/src/builtins/builtins-descriptors.h @@ -43,7 +43,8 @@ namespace internal { BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, DEFINE_TFJ_INTERFACE_DESCRIPTOR, DEFINE_TFC_INTERFACE_DESCRIPTOR, DEFINE_TFS_INTERFACE_DESCRIPTOR, - DEFINE_TFH_INTERFACE_DESCRIPTOR, IGNORE_BUILTIN, IGNORE_BUILTIN) + DEFINE_TFH_INTERFACE_DESCRIPTOR, IGNORE_BUILTIN, IGNORE_BUILTIN, + IGNORE_BUILTIN) #undef DEFINE_TFJ_INTERFACE_DESCRIPTOR #undef DEFINE_TFC_INTERFACE_DESCRIPTOR diff --git a/deps/v8/src/builtins/builtins-function-gen.cc b/deps/v8/src/builtins/builtins-function-gen.cc index eb4ace31e4c1a2..2f3c876852ddba 100644 --- a/deps/v8/src/builtins/builtins-function-gen.cc +++ b/deps/v8/src/builtins/builtins-function-gen.cc @@ -62,7 +62,7 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) { const int length_index = JSFunction::kLengthDescriptorIndex; TNode maybe_length = CAST(LoadWeakFixedArrayElement( descriptors, DescriptorArray::ToKeyIndex(length_index))); - GotoIf(WordNotEqual(maybe_length, LoadRoot(Heap::klength_stringRootIndex)), + GotoIf(WordNotEqual(maybe_length, LoadRoot(RootIndex::klength_string)), &slow); TNode maybe_length_accessor = CAST(LoadWeakFixedArrayElement( @@ -74,8 +74,7 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) { const int name_index = JSFunction::kNameDescriptorIndex; TNode maybe_name = CAST(LoadWeakFixedArrayElement( descriptors, DescriptorArray::ToKeyIndex(name_index))); - GotoIf(WordNotEqual(maybe_name, LoadRoot(Heap::kname_stringRootIndex)), - &slow); + GotoIf(WordNotEqual(maybe_name, LoadRoot(RootIndex::kname_string)), &slow); TNode maybe_name_accessor = CAST(LoadWeakFixedArrayElement( descriptors, DescriptorArray::ToValueIndex(name_index))); diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc index 29422ab72c277a..8b5dc182cf1fa1 100644 --- a/deps/v8/src/builtins/builtins-handler-gen.cc +++ b/deps/v8/src/builtins/builtins-handler-gen.cc @@ -28,7 +28,7 @@ TF_BUILTIN(KeyedLoadIC_Slow, CodeStubAssembler) { Node* name = Parameter(Descriptor::kName); Node* context = Parameter(Descriptor::kContext); - TailCallRuntime(Runtime::kKeyedGetProperty, context, receiver, name); + TailCallRuntime(Runtime::kGetProperty, context, receiver, name); } void Builtins::Generate_KeyedStoreIC_Megamorphic( diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc index bbfabc7a0dd540..94d75a8f327eca 100644 --- a/deps/v8/src/builtins/builtins-ic-gen.cc +++ b/deps/v8/src/builtins/builtins-ic-gen.cc @@ -21,13 +21,16 @@ namespace internal { } IC_BUILTIN(LoadIC) +IC_BUILTIN(LoadIC_Megamorphic) IC_BUILTIN(LoadIC_Noninlined) IC_BUILTIN(LoadIC_Uninitialized) -IC_BUILTIN(KeyedLoadIC) IC_BUILTIN(LoadICTrampoline) -IC_BUILTIN(KeyedLoadICTrampoline) +IC_BUILTIN(LoadICTrampoline_Megamorphic) +IC_BUILTIN(KeyedLoadIC) IC_BUILTIN(KeyedLoadIC_Megamorphic) IC_BUILTIN(KeyedLoadIC_PolymorphicName) +IC_BUILTIN(KeyedLoadICTrampoline) +IC_BUILTIN(KeyedLoadICTrampoline_Megamorphic) IC_BUILTIN(StoreGlobalIC) IC_BUILTIN(StoreGlobalICTrampoline) IC_BUILTIN(StoreIC) @@ -36,6 +39,7 @@ IC_BUILTIN(KeyedStoreIC) IC_BUILTIN(KeyedStoreICTrampoline) IC_BUILTIN(StoreInArrayLiteralIC) IC_BUILTIN(CloneObjectIC) +IC_BUILTIN(CloneObjectIC_Slow) IC_BUILTIN_PARAM(LoadGlobalIC, LoadGlobalIC, NOT_INSIDE_TYPEOF) IC_BUILTIN_PARAM(LoadGlobalICInsideTypeof, LoadGlobalIC, INSIDE_TYPEOF) diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc index 7ff88c5a538519..44a18099bf4c00 100644 --- a/deps/v8/src/builtins/builtins-internal-gen.cc +++ b/deps/v8/src/builtins/builtins-internal-gen.cc @@ -24,10 +24,16 @@ using TNode = compiler::TNode; // Interrupt and stack checks. void Builtins::Generate_InterruptCheck(MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif masm->TailCallRuntime(Runtime::kInterrupt); } void Builtins::Generate_StackCheck(MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif masm->TailCallRuntime(Runtime::kStackGuard); } @@ -350,18 +356,11 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler { }; TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) { - Node* object = BitcastTaggedToWord(Parameter(Descriptor::kObject)); - Node* slot = Parameter(Descriptor::kSlot); - Node* isolate = Parameter(Descriptor::kIsolate); - Node* remembered_set = Parameter(Descriptor::kRememberedSet); - Node* fp_mode = Parameter(Descriptor::kFPMode); - - Node* value = Load(MachineType::Pointer(), slot); - Label generational_wb(this); Label incremental_wb(this); Label exit(this); + Node* remembered_set = Parameter(Descriptor::kRememberedSet); Branch(ShouldEmitRememberSet(remembered_set), &generational_wb, &incremental_wb); @@ -369,40 +368,58 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) { { Label test_old_to_new_flags(this); Label store_buffer_exit(this), store_buffer_incremental_wb(this); + // When incremental marking is not on, we skip cross generation pointer // checking here, because there are checks for // `kPointersFromHereAreInterestingMask` and // `kPointersToHereAreInterestingMask` in // `src/compiler//code-generator-.cc` before calling this stub, // which serves as the cross generation checking. + Node* slot = Parameter(Descriptor::kSlot); Branch(IsMarking(), &test_old_to_new_flags, &store_buffer_exit); BIND(&test_old_to_new_flags); { + Node* value = Load(MachineType::Pointer(), slot); + // TODO(albertnetymk): Try to cache the page flag for value and object, // instead of calling IsPageFlagSet each time. Node* value_in_new_space = IsPageFlagSet(value, MemoryChunk::kIsInNewSpaceMask); GotoIfNot(value_in_new_space, &incremental_wb); + Node* object = BitcastTaggedToWord(Parameter(Descriptor::kObject)); Node* object_in_new_space = IsPageFlagSet(object, MemoryChunk::kIsInNewSpaceMask); - GotoIf(object_in_new_space, &incremental_wb); - - Goto(&store_buffer_incremental_wb); + Branch(object_in_new_space, &incremental_wb, + &store_buffer_incremental_wb); } BIND(&store_buffer_exit); - { InsertToStoreBufferAndGoto(isolate, slot, fp_mode, &exit); } + { + Node* isolate_constant = + ExternalConstant(ExternalReference::isolate_address(isolate())); + Node* fp_mode = Parameter(Descriptor::kFPMode); + InsertToStoreBufferAndGoto(isolate_constant, slot, fp_mode, &exit); + } BIND(&store_buffer_incremental_wb); - { InsertToStoreBufferAndGoto(isolate, slot, fp_mode, &incremental_wb); } + { + Node* isolate_constant = + ExternalConstant(ExternalReference::isolate_address(isolate())); + Node* fp_mode = Parameter(Descriptor::kFPMode); + InsertToStoreBufferAndGoto(isolate_constant, slot, fp_mode, + &incremental_wb); + } } BIND(&incremental_wb); { Label call_incremental_wb(this); + Node* slot = Parameter(Descriptor::kSlot); + Node* value = Load(MachineType::Pointer(), slot); + // There are two cases we need to call incremental write barrier. // 1) value_is_white GotoIf(IsWhite(value), &call_incremental_wb); @@ -411,20 +428,23 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) { // is_compacting = true when is_marking = true GotoIfNot(IsPageFlagSet(value, MemoryChunk::kEvacuationCandidateMask), &exit); - GotoIf( - IsPageFlagSet(object, MemoryChunk::kSkipEvacuationSlotsRecordingMask), - &exit); - Goto(&call_incremental_wb); + Node* object = BitcastTaggedToWord(Parameter(Descriptor::kObject)); + Branch( + IsPageFlagSet(object, MemoryChunk::kSkipEvacuationSlotsRecordingMask), + &exit, &call_incremental_wb); BIND(&call_incremental_wb); { Node* function = ExternalConstant( ExternalReference::incremental_marking_record_write_function()); + Node* isolate_constant = + ExternalConstant(ExternalReference::isolate_address(isolate())); + Node* fp_mode = Parameter(Descriptor::kFPMode); CallCFunction3WithCallerSavedRegistersMode( MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(), - MachineType::Pointer(), function, object, slot, isolate, fp_mode, - &exit); + MachineType::Pointer(), function, object, slot, isolate_constant, + fp_mode, &exit); } } @@ -454,7 +474,7 @@ class DeletePropertyBaseAssembler : public AccessorAssembler { dont_delete); // Overwrite the entry itself (see NameDictionary::SetEntry). TNode filler = TheHoleConstant(); - DCHECK(Heap::RootIsImmortalImmovable(Heap::kTheHoleValueRootIndex)); + DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kTheHoleValue)); StoreFixedArrayElement(properties, key_index, filler, SKIP_WRITE_BARRIER); StoreValueByKeyIndex(properties, key_index, filler, SKIP_WRITE_BARRIER); @@ -609,11 +629,14 @@ class InternalBuiltinsAssembler : public CodeStubAssembler { explicit InternalBuiltinsAssembler(compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} - TNode GetPendingMicrotaskCount(); - void SetPendingMicrotaskCount(TNode count); - - TNode GetMicrotaskQueue(); - void SetMicrotaskQueue(TNode queue); + TNode GetDefaultMicrotaskQueue(); + TNode GetPendingMicrotaskCount( + TNode microtask_queue); + void SetPendingMicrotaskCount(TNode microtask_queue, + TNode new_num_tasks); + TNode GetQueuedMicrotasks(TNode microtask_queue); + void SetQueuedMicrotasks(TNode microtask_queue, + TNode new_queue); TNode GetCurrentContext(); void SetCurrentContext(TNode context); @@ -700,37 +723,34 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, InternalBuiltinsAssembler) { GenerateAdaptorWithExitFrameType(Builtins::BUILTIN_EXIT); } -TNode InternalBuiltinsAssembler::GetPendingMicrotaskCount() { - auto ref = ExternalReference::pending_microtask_count_address(isolate()); - if (kIntSize == 8) { - return TNode::UncheckedCast( - Load(MachineType::Int64(), ExternalConstant(ref))); - } else { - Node* const value = Load(MachineType::Int32(), ExternalConstant(ref)); - return ChangeInt32ToIntPtr(value); - } +TNode InternalBuiltinsAssembler::GetDefaultMicrotaskQueue() { + return TNode::UncheckedCast( + LoadRoot(RootIndex::kDefaultMicrotaskQueue)); } -void InternalBuiltinsAssembler::SetPendingMicrotaskCount(TNode count) { - auto ref = ExternalReference::pending_microtask_count_address(isolate()); - auto rep = kIntSize == 8 ? MachineRepresentation::kWord64 - : MachineRepresentation::kWord32; - if (kIntSize == 4 && kPointerSize == 8) { - Node* const truncated_count = - TruncateInt64ToInt32(TNode::UncheckedCast(count)); - StoreNoWriteBarrier(rep, ExternalConstant(ref), truncated_count); - } else { - StoreNoWriteBarrier(rep, ExternalConstant(ref), count); - } +TNode InternalBuiltinsAssembler::GetPendingMicrotaskCount( + TNode microtask_queue) { + TNode result = LoadAndUntagObjectField( + microtask_queue, MicrotaskQueue::kPendingMicrotaskCountOffset); + return result; } -TNode InternalBuiltinsAssembler::GetMicrotaskQueue() { - return TNode::UncheckedCast( - LoadRoot(Heap::kMicrotaskQueueRootIndex)); +void InternalBuiltinsAssembler::SetPendingMicrotaskCount( + TNode microtask_queue, TNode new_num_tasks) { + StoreObjectField(microtask_queue, + MicrotaskQueue::kPendingMicrotaskCountOffset, + SmiFromIntPtr(new_num_tasks)); } -void InternalBuiltinsAssembler::SetMicrotaskQueue(TNode queue) { - StoreRoot(Heap::kMicrotaskQueueRootIndex, queue); +TNode InternalBuiltinsAssembler::GetQueuedMicrotasks( + TNode microtask_queue) { + return LoadObjectField(microtask_queue, + MicrotaskQueue::kQueueOffset); +} + +void InternalBuiltinsAssembler::SetQueuedMicrotasks( + TNode microtask_queue, TNode new_queue) { + StoreObjectField(microtask_queue, MicrotaskQueue::kQueueOffset, new_queue); } TNode InternalBuiltinsAssembler::GetCurrentContext() { @@ -819,9 +839,10 @@ void InternalBuiltinsAssembler::RunPromiseHook( TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) { Node* microtask = Parameter(Descriptor::kMicrotask); - TNode num_tasks = GetPendingMicrotaskCount(); + TNode microtask_queue = GetDefaultMicrotaskQueue(); + TNode num_tasks = GetPendingMicrotaskCount(microtask_queue); TNode new_num_tasks = IntPtrAdd(num_tasks, IntPtrConstant(1)); - TNode queue = GetMicrotaskQueue(); + TNode queue = GetQueuedMicrotasks(microtask_queue); TNode queue_length = LoadAndUntagFixedArrayBaseLength(queue); Label if_append(this), if_grow(this), done(this); @@ -851,8 +872,8 @@ TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) { StoreFixedArrayElement(new_queue, num_tasks, microtask, SKIP_WRITE_BARRIER); FillFixedArrayWithValue(PACKED_ELEMENTS, new_queue, new_num_tasks, - new_queue_length, Heap::kUndefinedValueRootIndex); - SetMicrotaskQueue(new_queue); + new_queue_length, RootIndex::kUndefinedValue); + SetQueuedMicrotasks(microtask_queue, new_queue); Goto(&done); } @@ -865,8 +886,8 @@ TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) { CopyFixedArrayElements(PACKED_ELEMENTS, queue, new_queue, num_tasks); StoreFixedArrayElement(new_queue, num_tasks, microtask); FillFixedArrayWithValue(PACKED_ELEMENTS, new_queue, new_num_tasks, - new_queue_length, Heap::kUndefinedValueRootIndex); - SetMicrotaskQueue(new_queue); + new_queue_length, RootIndex::kUndefinedValue); + SetQueuedMicrotasks(microtask_queue, new_queue); Goto(&done); } } @@ -878,13 +899,14 @@ TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) { } BIND(&done); - SetPendingMicrotaskCount(new_num_tasks); + SetPendingMicrotaskCount(microtask_queue, new_num_tasks); Return(UndefinedConstant()); } TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) { // Load the current context from the isolate. TNode current_context = GetCurrentContext(); + TNode microtask_queue = GetDefaultMicrotaskQueue(); Label init_queue_loop(this); Goto(&init_queue_loop); @@ -893,17 +915,17 @@ TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) { TVARIABLE(IntPtrT, index, IntPtrConstant(0)); Label loop(this, &index), loop_next(this); - TNode num_tasks = GetPendingMicrotaskCount(); + TNode num_tasks = GetPendingMicrotaskCount(microtask_queue); ReturnIf(IntPtrEqual(num_tasks, IntPtrConstant(0)), UndefinedConstant()); - TNode queue = GetMicrotaskQueue(); + TNode queue = GetQueuedMicrotasks(microtask_queue); CSA_ASSERT(this, IntPtrGreaterThanOrEqual( LoadAndUntagFixedArrayBaseLength(queue), num_tasks)); CSA_ASSERT(this, IntPtrGreaterThan(num_tasks, IntPtrConstant(0))); - SetPendingMicrotaskCount(IntPtrConstant(0)); - SetMicrotaskQueue(EmptyFixedArrayConstant()); + SetQueuedMicrotasks(microtask_queue, EmptyFixedArrayConstant()); + SetPendingMicrotaskCount(microtask_queue, IntPtrConstant(0)); Goto(&loop); BIND(&loop); @@ -1099,20 +1121,20 @@ TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) { } TF_BUILTIN(AllocateInNewSpace, CodeStubAssembler) { - TNode requested_size = - UncheckedCast(Parameter(Descriptor::kRequestedSize)); + TNode requested_size = + UncheckedCast(Parameter(Descriptor::kRequestedSize)); TailCallRuntime(Runtime::kAllocateInNewSpace, NoContextConstant(), - SmiFromInt32(requested_size)); + SmiFromIntPtr(requested_size)); } TF_BUILTIN(AllocateInOldSpace, CodeStubAssembler) { - TNode requested_size = - UncheckedCast(Parameter(Descriptor::kRequestedSize)); + TNode requested_size = + UncheckedCast(Parameter(Descriptor::kRequestedSize)); int flags = AllocateTargetSpace::encode(OLD_SPACE); TailCallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(), - SmiFromInt32(requested_size), SmiConstant(flags)); + SmiFromIntPtr(requested_size), SmiConstant(flags)); } TF_BUILTIN(Abort, CodeStubAssembler) { @@ -1178,6 +1200,9 @@ void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit( } void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif // CallApiGetterStub only exists as a stub to avoid duplicating code between // here and code-stubs-.cc. For example, see CallApiFunctionAndReturn. // Here we abuse the instantiated stub to generate code. @@ -1186,6 +1211,9 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { } void Builtins::Generate_CallApiCallback_Argc0(MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif // The common variants of CallApiCallbackStub (i.e. all that are embedded into // the snapshot) are generated as builtins. The rest remain available as code // stubs. Here we abuse the instantiated stub to generate code and avoid @@ -1196,6 +1224,9 @@ void Builtins::Generate_CallApiCallback_Argc0(MacroAssembler* masm) { } void Builtins::Generate_CallApiCallback_Argc1(MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif // The common variants of CallApiCallbackStub (i.e. all that are embedded into // the snapshot) are generated as builtins. The rest remain available as code // stubs. Here we abuse the instantiated stub to generate code and avoid @@ -1207,27 +1238,23 @@ void Builtins::Generate_CallApiCallback_Argc1(MacroAssembler* masm) { // ES6 [[Get]] operation. TF_BUILTIN(GetProperty, CodeStubAssembler) { - Label call_runtime(this, Label::kDeferred), return_undefined(this), end(this); - Node* object = Parameter(Descriptor::kObject); Node* key = Parameter(Descriptor::kKey); Node* context = Parameter(Descriptor::kContext); - VARIABLE(var_result, MachineRepresentation::kTagged); + Label if_notfound(this), if_proxy(this, Label::kDeferred), + if_slow(this, Label::kDeferred); CodeStubAssembler::LookupInHolder lookup_property_in_holder = - [=, &var_result, &end](Node* receiver, Node* holder, Node* holder_map, - Node* holder_instance_type, Node* unique_name, - Label* next_holder, Label* if_bailout) { + [=](Node* receiver, Node* holder, Node* holder_map, + Node* holder_instance_type, Node* unique_name, Label* next_holder, + Label* if_bailout) { VARIABLE(var_value, MachineRepresentation::kTagged); Label if_found(this); TryGetOwnProperty(context, receiver, holder, holder_map, holder_instance_type, unique_name, &if_found, &var_value, next_holder, if_bailout); BIND(&if_found); - { - var_result.Bind(var_value.value()); - Goto(&end); - } + Return(var_value.value()); }; CodeStubAssembler::LookupInHolder lookup_element_in_holder = @@ -1240,23 +1267,26 @@ TF_BUILTIN(GetProperty, CodeStubAssembler) { }; TryPrototypeChainLookup(object, key, lookup_property_in_holder, - lookup_element_in_holder, &return_undefined, - &call_runtime); + lookup_element_in_holder, &if_notfound, &if_slow, + &if_proxy); - BIND(&return_undefined); - { - var_result.Bind(UndefinedConstant()); - Goto(&end); - } + BIND(&if_notfound); + Return(UndefinedConstant()); + + BIND(&if_slow); + TailCallRuntime(Runtime::kGetProperty, context, object, key); - BIND(&call_runtime); + BIND(&if_proxy); { - var_result.Bind(CallRuntime(Runtime::kGetProperty, context, object, key)); - Goto(&end); + // Convert the {key} to a Name first. + Node* name = CallBuiltin(Builtins::kToName, context, key); + + // The {object} is a JSProxy instance, look up the {name} on it, passing + // {object} both as receiver and holder. If {name} is absent we can safely + // return undefined from here. + TailCallBuiltin(Builtins::kProxyGetProperty, context, object, name, object, + SmiConstant(OnNonExistent::kReturnUndefined)); } - - BIND(&end); - Return(var_result.value()); } // ES6 [[Set]] operation. diff --git a/deps/v8/src/builtins/builtins-interpreter-gen.cc b/deps/v8/src/builtins/builtins-interpreter-gen.cc index f0d5160330e8da..fa1684c54b677f 100644 --- a/deps/v8/src/builtins/builtins-interpreter-gen.cc +++ b/deps/v8/src/builtins/builtins-interpreter-gen.cc @@ -10,12 +10,18 @@ namespace v8 { namespace internal { void Builtins::Generate_InterpreterPushArgsThenCall(MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif return Generate_InterpreterPushArgsThenCallImpl( masm, ConvertReceiverMode::kAny, InterpreterPushArgsMode::kOther); } void Builtins::Generate_InterpreterPushUndefinedAndArgsThenCall( MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif return Generate_InterpreterPushArgsThenCallImpl( masm, ConvertReceiverMode::kNullOrUndefined, InterpreterPushArgsMode::kOther); @@ -23,24 +29,36 @@ void Builtins::Generate_InterpreterPushUndefinedAndArgsThenCall( void Builtins::Generate_InterpreterPushArgsThenCallWithFinalSpread( MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif return Generate_InterpreterPushArgsThenCallImpl( masm, ConvertReceiverMode::kAny, InterpreterPushArgsMode::kWithFinalSpread); } void Builtins::Generate_InterpreterPushArgsThenConstruct(MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif return Generate_InterpreterPushArgsThenConstructImpl( masm, InterpreterPushArgsMode::kOther); } void Builtins::Generate_InterpreterPushArgsThenConstructWithFinalSpread( MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif return Generate_InterpreterPushArgsThenConstructImpl( masm, InterpreterPushArgsMode::kWithFinalSpread); } void Builtins::Generate_InterpreterPushArgsThenConstructArrayFunction( MacroAssembler* masm) { +#ifdef V8_TARGET_ARCH_IA32 + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif return Generate_InterpreterPushArgsThenConstructImpl( masm, InterpreterPushArgsMode::kArrayFunction); } diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc index 77e2e81a6c9196..49405141c1086c 100644 --- a/deps/v8/src/builtins/builtins-intl-gen.cc +++ b/deps/v8/src/builtins/builtins-intl-gen.cc @@ -41,8 +41,8 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) { Label call_c(this), return_string(this), runtime(this, Label::kDeferred); // Early exit on empty strings. - TNode const length = LoadStringLengthAsSmi(string); - GotoIf(SmiEqual(length, SmiConstant(0)), &return_string); + TNode const length = LoadStringLengthAsWord32(string); + GotoIf(Word32Equal(length, Uint32Constant(0)), &return_string); // Unpack strings if possible, and bail to runtime unless we get a one-byte // flat string. @@ -60,7 +60,8 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) { Node* const dst = AllocateSeqOneByteString(context, length); const int kMaxShortStringLength = 24; // Determined empirically. - GotoIf(SmiGreaterThan(length, SmiConstant(kMaxShortStringLength)), &call_c); + GotoIf(Uint32GreaterThan(length, Uint32Constant(kMaxShortStringLength)), + &call_c); { Node* const dst_ptr = PointerToSeqStringData(dst); @@ -69,7 +70,7 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) { Node* const start_address = to_direct.PointerToData(&call_c); TNode const end_address = - Signed(IntPtrAdd(start_address, SmiUntag(length))); + Signed(IntPtrAdd(start_address, ChangeUint32ToWord(length))); Node* const to_lower_table_addr = ExternalConstant(ExternalReference::intl_to_latin1_lower_table()); @@ -177,10 +178,8 @@ void IntlBuiltinsAssembler::ListFormatCommon(TNode context, BIND(&has_list); { // 5. Let x be ? IterableToList(list). - IteratorBuiltinsAssembler iterator_assembler(state()); - // TODO(adamk): Consider exposing IterableToList as a buitin and calling - // it from here instead of inlining the operation. - TNode x = iterator_assembler.IterableToList(context, list); + TNode x = + CallBuiltin(Builtins::kIterableToListWithSymbolLookup, context, list); // 6. Return ? FormatList(lf, x). args.PopAndReturn(CallRuntime(format_func_id, context, list_format, x)); diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc index 1d54d0da80d18a..4f7b0b1848df13 100644 --- a/deps/v8/src/builtins/builtins-intl.cc +++ b/deps/v8/src/builtins/builtins-intl.cc @@ -10,7 +10,6 @@ #include #include -#include "src/builtins/builtins-intl.h" #include "src/builtins/builtins-utils-inl.h" #include "src/builtins/builtins.h" #include "src/date.h" @@ -19,11 +18,15 @@ #include "src/objects-inl.h" #include "src/objects/intl-objects.h" #include "src/objects/js-array-inl.h" +#include "src/objects/js-break-iterator-inl.h" #include "src/objects/js-collator-inl.h" +#include "src/objects/js-date-time-format-inl.h" #include "src/objects/js-list-format-inl.h" #include "src/objects/js-locale-inl.h" +#include "src/objects/js-number-format-inl.h" #include "src/objects/js-plural-rules-inl.h" #include "src/objects/js-relative-time-format-inl.h" +#include "src/property-descriptor.h" #include "unicode/datefmt.h" #include "unicode/decimfmt.h" @@ -32,12 +35,10 @@ #include "unicode/listformatter.h" #include "unicode/normalizer2.h" #include "unicode/numfmt.h" -#include "unicode/reldatefmt.h" #include "unicode/smpdtfmt.h" #include "unicode/udat.h" #include "unicode/ufieldpositer.h" #include "unicode/unistr.h" -#include "unicode/ureldatefmt.h" #include "unicode/ustring.h" namespace v8 { @@ -130,327 +131,48 @@ BUILTIN(StringPrototypeNormalizeIntl) { namespace { -// The list comes from third_party/icu/source/i18n/unicode/unum.h. -// They're mapped to NumberFormat part types mentioned throughout -// https://tc39.github.io/ecma402/#sec-partitionnumberpattern . -Handle IcuNumberFieldIdToNumberType(int32_t field_id, double number, - Isolate* isolate) { - switch (static_cast(field_id)) { - case UNUM_INTEGER_FIELD: - if (std::isfinite(number)) return isolate->factory()->integer_string(); - if (std::isnan(number)) return isolate->factory()->nan_string(); - return isolate->factory()->infinity_string(); - case UNUM_FRACTION_FIELD: - return isolate->factory()->fraction_string(); - case UNUM_DECIMAL_SEPARATOR_FIELD: - return isolate->factory()->decimal_string(); - case UNUM_GROUPING_SEPARATOR_FIELD: - return isolate->factory()->group_string(); - case UNUM_CURRENCY_FIELD: - return isolate->factory()->currency_string(); - case UNUM_PERCENT_FIELD: - return isolate->factory()->percentSign_string(); - case UNUM_SIGN_FIELD: - return number < 0 ? isolate->factory()->minusSign_string() - : isolate->factory()->plusSign_string(); - - case UNUM_EXPONENT_SYMBOL_FIELD: - case UNUM_EXPONENT_SIGN_FIELD: - case UNUM_EXPONENT_FIELD: - // We should never get these because we're not using any scientific - // formatter. - UNREACHABLE(); - return Handle(); - - case UNUM_PERMILL_FIELD: - // We're not creating any permill formatter, and it's not even clear how - // that would be possible with the ICU API. - UNREACHABLE(); - return Handle(); - - default: - UNREACHABLE(); - return Handle(); - } -} - -// The list comes from third_party/icu/source/i18n/unicode/udat.h. -// They're mapped to DateTimeFormat components listed at -// https://tc39.github.io/ecma402/#sec-datetimeformat-abstracts . - -Handle IcuDateFieldIdToDateType(int32_t field_id, Isolate* isolate) { - switch (field_id) { - case -1: - return isolate->factory()->literal_string(); - case UDAT_YEAR_FIELD: - case UDAT_EXTENDED_YEAR_FIELD: - case UDAT_YEAR_NAME_FIELD: - return isolate->factory()->year_string(); - case UDAT_MONTH_FIELD: - case UDAT_STANDALONE_MONTH_FIELD: - return isolate->factory()->month_string(); - case UDAT_DATE_FIELD: - return isolate->factory()->day_string(); - case UDAT_HOUR_OF_DAY1_FIELD: - case UDAT_HOUR_OF_DAY0_FIELD: - case UDAT_HOUR1_FIELD: - case UDAT_HOUR0_FIELD: - return isolate->factory()->hour_string(); - case UDAT_MINUTE_FIELD: - return isolate->factory()->minute_string(); - case UDAT_SECOND_FIELD: - return isolate->factory()->second_string(); - case UDAT_DAY_OF_WEEK_FIELD: - case UDAT_DOW_LOCAL_FIELD: - case UDAT_STANDALONE_DAY_FIELD: - return isolate->factory()->weekday_string(); - case UDAT_AM_PM_FIELD: - return isolate->factory()->dayperiod_string(); - case UDAT_TIMEZONE_FIELD: - case UDAT_TIMEZONE_RFC_FIELD: - case UDAT_TIMEZONE_GENERIC_FIELD: - case UDAT_TIMEZONE_SPECIAL_FIELD: - case UDAT_TIMEZONE_LOCALIZED_GMT_OFFSET_FIELD: - case UDAT_TIMEZONE_ISO_FIELD: - case UDAT_TIMEZONE_ISO_LOCAL_FIELD: - return isolate->factory()->timeZoneName_string(); - case UDAT_ERA_FIELD: - return isolate->factory()->era_string(); - default: - // Other UDAT_*_FIELD's cannot show up because there is no way to specify - // them via options of Intl.DateTimeFormat. - UNREACHABLE(); - // To prevent MSVC from issuing C4715 warning. - return Handle(); - } -} - -bool cmp_NumberFormatSpan(const NumberFormatSpan& a, - const NumberFormatSpan& b) { - // Regions that start earlier should be encountered earlier. - if (a.begin_pos < b.begin_pos) return true; - if (a.begin_pos > b.begin_pos) return false; - // For regions that start in the same place, regions that last longer should - // be encountered earlier. - if (a.end_pos < b.end_pos) return false; - if (a.end_pos > b.end_pos) return true; - // For regions that are exactly the same, one of them must be the "literal" - // backdrop we added, which has a field_id of -1, so consider higher field_ids - // to be later. - return a.field_id < b.field_id; -} - -MaybeHandle FormatNumberToParts(Isolate* isolate, - icu::NumberFormat* fmt, double number) { +MaybeHandle SupportedLocalesOfCommon(Isolate* isolate, + const char* service_in, + BuiltinArguments args) { Factory* factory = isolate->factory(); + Handle service = factory->NewStringFromAsciiChecked(service_in); + Handle locales = args.atOrUndefined(isolate, 1); + Handle options = args.atOrUndefined(isolate, 2); - icu::UnicodeString formatted; - icu::FieldPositionIterator fp_iter; - UErrorCode status = U_ZERO_ERROR; - fmt->format(number, formatted, &fp_iter, status); - if (U_FAILURE(status)) { - THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), Object); - } - - Handle result = factory->NewJSArray(0); - int32_t length = formatted.length(); - if (length == 0) return result; - - std::vector regions; - // Add a "literal" backdrop for the entire string. This will be used if no - // other region covers some part of the formatted string. It's possible - // there's another field with exactly the same begin and end as this backdrop, - // in which case the backdrop's field_id of -1 will give it lower priority. - regions.push_back(NumberFormatSpan(-1, 0, formatted.length())); - - { - icu::FieldPosition fp; - while (fp_iter.next(fp)) { - regions.push_back(NumberFormatSpan(fp.getField(), fp.getBeginIndex(), - fp.getEndIndex())); - } - } - - std::vector parts = FlattenRegionsToParts(®ions); - - int index = 0; - for (auto it = parts.begin(); it < parts.end(); it++) { - NumberFormatSpan part = *it; - Handle field_type_string = - part.field_id == -1 - ? isolate->factory()->literal_string() - : IcuNumberFieldIdToNumberType(part.field_id, number, isolate); - Handle substring; - ASSIGN_RETURN_ON_EXCEPTION( - isolate, substring, - Intl::ToString(isolate, formatted, part.begin_pos, part.end_pos), - Object); - Intl::AddElement(isolate, result, index, field_type_string, substring); - ++index; - } - JSObject::ValidateElements(*result); - - return result; + MaybeHandle result = + Intl::SupportedLocalesOf(isolate, service, locales, options); + Handle elements; + ASSIGN_RETURN_ON_EXCEPTION(isolate, elements, result, JSObject); + return elements; } -MaybeHandle FormatDateToParts(Isolate* isolate, icu::DateFormat* format, - double date_value) { - Factory* factory = isolate->factory(); +} // namespace - icu::UnicodeString formatted; - icu::FieldPositionIterator fp_iter; - icu::FieldPosition fp; - UErrorCode status = U_ZERO_ERROR; - format->format(date_value, formatted, &fp_iter, status); - if (U_FAILURE(status)) { - THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), Object); +BUILTIN(V8BreakIteratorSupportedLocalesOf) { + HandleScope scope(isolate); + // 1. If NewTarget is defined, throw a TypeError exception. + if (!args.new_target()->IsUndefined(isolate)) { // [[Call]] + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, + NewTypeError(MessageTemplate::kOrdinaryFunctionCalledAsConstructor, + isolate->factory()->NewStringFromStaticChars( + "Intl.v8BreakIterator"))); } - Handle result = factory->NewJSArray(0); - int32_t length = formatted.length(); - if (length == 0) return result; - - int index = 0; - int32_t previous_end_pos = 0; - Handle substring; - while (fp_iter.next(fp)) { - int32_t begin_pos = fp.getBeginIndex(); - int32_t end_pos = fp.getEndIndex(); - - if (previous_end_pos < begin_pos) { - ASSIGN_RETURN_ON_EXCEPTION( - isolate, substring, - Intl::ToString(isolate, formatted, previous_end_pos, begin_pos), - Object); - Intl::AddElement(isolate, result, index, - IcuDateFieldIdToDateType(-1, isolate), substring); - ++index; - } - ASSIGN_RETURN_ON_EXCEPTION( - isolate, substring, - Intl::ToString(isolate, formatted, begin_pos, end_pos), Object); - Intl::AddElement(isolate, result, index, - IcuDateFieldIdToDateType(fp.getField(), isolate), - substring); - previous_end_pos = end_pos; - ++index; - } - if (previous_end_pos < length) { - ASSIGN_RETURN_ON_EXCEPTION( - isolate, substring, - Intl::ToString(isolate, formatted, previous_end_pos, length), Object); - Intl::AddElement(isolate, result, index, - IcuDateFieldIdToDateType(-1, isolate), substring); - } - JSObject::ValidateElements(*result); - return result; + RETURN_RESULT_OR_FAILURE( + isolate, SupportedLocalesOfCommon(isolate, "breakiterator", args)); } -} // namespace - -// Flattens a list of possibly-overlapping "regions" to a list of -// non-overlapping "parts". At least one of the input regions must span the -// entire space of possible indexes. The regions parameter will sorted in-place -// according to some criteria; this is done for performance to avoid copying the -// input. -std::vector FlattenRegionsToParts( - std::vector* regions) { - // The intention of this algorithm is that it's used to translate ICU "fields" - // to JavaScript "parts" of a formatted string. Each ICU field and JavaScript - // part has an integer field_id, which corresponds to something like "grouping - // separator", "fraction", or "percent sign", and has a begin and end - // position. Here's a diagram of: - - // var nf = new Intl.NumberFormat(['de'], {style:'currency',currency:'EUR'}); - // nf.formatToParts(123456.78); - - // : 6 - // input regions: 0000000211 7 - // ('-' means -1): ------------ - // formatted string: "123.456,78 €" - // output parts: 0006000211-7 - - // To illustrate the requirements of this algorithm, here's a contrived and - // convoluted example of inputs and expected outputs: - - // : 4 - // : 22 33 3 - // : 11111 22 - // input regions: 0000000 111 - // : ------------ - // formatted string: "abcdefghijkl" - // output parts: 0221340--231 - // (The characters in the formatted string are irrelevant to this function.) - - // We arrange the overlapping input regions like a mountain range where - // smaller regions are "on top" of larger regions, and we output a birds-eye - // view of the mountains, so that smaller regions take priority over larger - // regions. - std::sort(regions->begin(), regions->end(), cmp_NumberFormatSpan); - std::vector overlapping_region_index_stack; - // At least one item in regions must be a region spanning the entire string. - // Due to the sorting above, the first item in the vector will be one of them. - overlapping_region_index_stack.push_back(0); - NumberFormatSpan top_region = regions->at(0); - size_t region_iterator = 1; - int32_t entire_size = top_region.end_pos; - - std::vector out_parts; - - // The "climber" is a cursor that advances from left to right climbing "up" - // and "down" the mountains. Whenever the climber moves to the right, that - // represents an item of output. - int32_t climber = 0; - while (climber < entire_size) { - int32_t next_region_begin_pos; - if (region_iterator < regions->size()) { - next_region_begin_pos = regions->at(region_iterator).begin_pos; - } else { - // finish off the rest of the input by proceeding to the end. - next_region_begin_pos = entire_size; - } - - if (climber < next_region_begin_pos) { - while (top_region.end_pos < next_region_begin_pos) { - if (climber < top_region.end_pos) { - // step down - out_parts.push_back(NumberFormatSpan(top_region.field_id, climber, - top_region.end_pos)); - climber = top_region.end_pos; - } else { - // drop down - } - overlapping_region_index_stack.pop_back(); - top_region = regions->at(overlapping_region_index_stack.back()); - } - if (climber < next_region_begin_pos) { - // cross a plateau/mesa/valley - out_parts.push_back(NumberFormatSpan(top_region.field_id, climber, - next_region_begin_pos)); - climber = next_region_begin_pos; - } - } - if (region_iterator < regions->size()) { - overlapping_region_index_stack.push_back(region_iterator++); - top_region = regions->at(overlapping_region_index_stack.back()); - } - } - return out_parts; +BUILTIN(NumberFormatSupportedLocalesOf) { + HandleScope scope(isolate); + RETURN_RESULT_OR_FAILURE( + isolate, SupportedLocalesOfCommon(isolate, "numberformat", args)); } BUILTIN(NumberFormatPrototypeFormatToParts) { const char* const method = "Intl.NumberFormat.prototype.formatToParts"; HandleScope handle_scope(isolate); - CHECK_RECEIVER(JSObject, number_format_holder, method); - - if (!Intl::IsObjectOfType(isolate, number_format_holder, - Intl::Type::kNumberFormat)) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, - NewTypeError(MessageTemplate::kIncompatibleMethodReceiver, - isolate->factory()->NewStringFromAsciiChecked(method), - number_format_holder)); - } + CHECK_RECEIVER(JSNumberFormat, number_format, method); Handle x; if (args.length() >= 2) { @@ -460,12 +182,14 @@ BUILTIN(NumberFormatPrototypeFormatToParts) { x = isolate->factory()->nan_value(); } - icu::DecimalFormat* number_format = - NumberFormat::UnpackNumberFormat(number_format_holder); - CHECK_NOT_NULL(number_format); + RETURN_RESULT_OR_FAILURE(isolate, JSNumberFormat::FormatToParts( + isolate, number_format, x->Number())); +} +BUILTIN(DateTimeFormatSupportedLocalesOf) { + HandleScope scope(isolate); RETURN_RESULT_OR_FAILURE( - isolate, FormatNumberToParts(isolate, number_format, x->Number())); + isolate, SupportedLocalesOfCommon(isolate, "dateformat", args)); } BUILTIN(DateTimeFormatPrototypeFormatToParts) { @@ -474,13 +198,14 @@ BUILTIN(DateTimeFormatPrototypeFormatToParts) { CHECK_RECEIVER(JSObject, date_format_holder, method); Factory* factory = isolate->factory(); - if (!Intl::IsObjectOfType(isolate, date_format_holder, - Intl::Type::kDateTimeFormat)) { + if (!date_format_holder->IsJSDateTimeFormat()) { THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver, factory->NewStringFromAsciiChecked(method), date_format_holder)); } + Handle dtf = + Handle::cast(date_format_holder); Handle x = args.atOrUndefined(isolate, 1); if (x->IsUndefined(isolate)) { @@ -496,12 +221,139 @@ BUILTIN(DateTimeFormatPrototypeFormatToParts) { isolate, NewRangeError(MessageTemplate::kInvalidTimeValue)); } - icu::SimpleDateFormat* date_format = - DateFormat::UnpackDateFormat(date_format_holder); - CHECK_NOT_NULL(date_format); + RETURN_RESULT_OR_FAILURE( + isolate, JSDateTimeFormat::FormatToParts(isolate, dtf, date_value)); +} + +namespace { +Handle CreateBoundFunction(Isolate* isolate, + Handle object, + Builtins::Name builtin_id, int len) { + Handle native_context(isolate->context()->native_context(), + isolate); + Handle context = isolate->factory()->NewBuiltinContext( + native_context, + static_cast(Intl::BoundFunctionContextSlot::kLength)); + + context->set(static_cast(Intl::BoundFunctionContextSlot::kBoundFunction), + *object); - RETURN_RESULT_OR_FAILURE(isolate, - FormatDateToParts(isolate, date_format, date_value)); + Handle info = + isolate->factory()->NewSharedFunctionInfoForBuiltin( + isolate->factory()->empty_string(), builtin_id, kNormalFunction); + info->set_internal_formal_parameter_count(len); + info->set_length(len); + + Handle map = isolate->strict_function_without_prototype_map(); + + Handle new_bound_function = + isolate->factory()->NewFunctionFromSharedFunctionInfo(map, info, context); + return new_bound_function; +} + +/** + * Common code shared between DateTimeFormatConstructor and + * NumberFormatConstrutor + */ +template +Object* FormatConstructor(BuiltinArguments args, Isolate* isolate, + Handle constructor, const char* method) { + Handle new_target; + // 1. If NewTarget is undefined, let newTarget be the active + // function object, else let newTarget be NewTarget. + if (args.new_target()->IsUndefined(isolate)) { + new_target = args.target(); + } else { + new_target = Handle::cast(args.new_target()); + } + + // [[Construct]] + Handle target = args.target(); + + Handle locales = args.atOrUndefined(isolate, 1); + Handle options = args.atOrUndefined(isolate, 2); + + // 2. Let format be ? OrdinaryCreateFromConstructor(newTarget, + // "%Prototype%", ...). + + Handle format_obj; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, format_obj, + JSObject::New(target, new_target, Handle::null())); + Handle format = Handle::cast(format_obj); + + // 3. Perform ? Initialize(Format, locales, options). + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, format, T::Initialize(isolate, format, locales, options)); + // 4. Let this be the this value. + Handle receiver = args.receiver(); + + // 5. If NewTarget is undefined and ? InstanceofOperator(this, %%) + // is true, then + // + // Look up the intrinsic value that has been stored on the context. + // Call the instanceof function + Handle is_instance_of_obj; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, is_instance_of_obj, + Object::InstanceOf(isolate, receiver, constructor)); + + // Get the boolean value of the result + bool is_instance_of = is_instance_of_obj->BooleanValue(isolate); + + if (args.new_target()->IsUndefined(isolate) && is_instance_of) { + if (!receiver->IsJSReceiver()) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, + NewTypeError(MessageTemplate::kIncompatibleMethodReceiver, + isolate->factory()->NewStringFromAsciiChecked(method), + receiver)); + } + Handle rec = Handle::cast(receiver); + // a. Perform ? DefinePropertyOrThrow(this, + // %Intl%.[[FallbackSymbol]], PropertyDescriptor{ [[Value]]: format, + // [[Writable]]: false, [[Enumerable]]: false, [[Configurable]]: false }). + PropertyDescriptor desc; + desc.set_value(format); + desc.set_writable(false); + desc.set_enumerable(false); + desc.set_configurable(false); + Maybe success = JSReceiver::DefineOwnProperty( + isolate, rec, isolate->factory()->intl_fallback_symbol(), &desc, + kThrowOnError); + MAYBE_RETURN(success, ReadOnlyRoots(isolate).exception()); + CHECK(success.FromJust()); + // b. b. Return this. + return *receiver; + } + // 6. Return format. + return *format; +} + +} // namespace + +BUILTIN(NumberFormatConstructor) { + HandleScope scope(isolate); + return FormatConstructor( + args, isolate, isolate->intl_number_format_function(), + "Intl.NumberFormat"); +} + +BUILTIN(NumberFormatPrototypeResolvedOptions) { + HandleScope scope(isolate); + const char* const method = "Intl.NumberFormat.prototype.resolvedOptions"; + + // 1. Let nf be the this value. + // 2. If Type(nf) is not Object, throw a TypeError exception. + CHECK_RECEIVER(JSReceiver, number_format_holder, method); + + // 3. Let nf be ? UnwrapNumberFormat(nf) + Handle number_format; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, number_format, + JSNumberFormat::UnwrapNumberFormat(isolate, number_format_holder)); + + return *JSNumberFormat::ResolvedOptions(isolate, number_format); } BUILTIN(NumberFormatPrototypeFormatNumber) { @@ -513,17 +365,12 @@ BUILTIN(NumberFormatPrototypeFormatNumber) { CHECK_RECEIVER(JSReceiver, receiver, method); // 3. Let nf be ? UnwrapNumberFormat(nf). - Handle number_format_holder; + Handle number_format; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, number_format_holder, - NumberFormat::Unwrap(isolate, receiver, method)); + isolate, number_format, + JSNumberFormat::UnwrapNumberFormat(isolate, receiver)); - DCHECK(Intl::IsObjectOfType(isolate, number_format_holder, - Intl::Type::kNumberFormat)); - - Handle bound_format = Handle( - number_format_holder->GetEmbedderField(NumberFormat::kBoundFormatIndex), - isolate); + Handle bound_format(number_format->bound_format(), isolate); // 4. If nf.[[BoundFormat]] is undefined, then if (!bound_format->IsUndefined(isolate)) { @@ -532,29 +379,11 @@ BUILTIN(NumberFormatPrototypeFormatNumber) { return *bound_format; } - Handle native_context(isolate->context()->native_context(), - isolate); - - Handle context = isolate->factory()->NewBuiltinContext( - native_context, NumberFormat::ContextSlot::kLength); - - // 4. b. Set F.[[NumberFormat]] to nf. - context->set(NumberFormat::ContextSlot::kNumberFormat, *number_format_holder); - - Handle info = Handle( - native_context->number_format_internal_format_number_shared_fun(), - isolate); - - Handle map = isolate->strict_function_without_prototype_map(); - - // 4. a. Let F be a new built-in function object as defined in - // Number Format Functions (11.1.4). - Handle new_bound_format_function = - isolate->factory()->NewFunctionFromSharedFunctionInfo(map, info, context); + Handle new_bound_format_function = CreateBoundFunction( + isolate, number_format, Builtins::kNumberFormatInternalFormatNumber, 1); // 4. c. Set nf.[[BoundFormat]] to F. - number_format_holder->SetEmbedderField(NumberFormat::kBoundFormatIndex, - *new_bound_format_function); + number_format->set_bound_format(*new_bound_format_function); // 5. Return nf.[[BoundFormat]]. return *new_bound_format_function; @@ -566,14 +395,12 @@ BUILTIN(NumberFormatInternalFormatNumber) { Handle context = Handle(isolate->context(), isolate); // 1. Let nf be F.[[NumberFormat]]. - Handle number_format_holder = Handle( - JSObject::cast(context->get(NumberFormat::ContextSlot::kNumberFormat)), - isolate); - // 2. Assert: Type(nf) is Object and nf has an // [[InitializedNumberFormat]] internal slot. - DCHECK(Intl::IsObjectOfType(isolate, number_format_holder, - Intl::Type::kNumberFormat)); + Handle number_format = Handle( + JSNumberFormat::cast(context->get( + static_cast(Intl::BoundFunctionContextSlot::kBoundFunction))), + isolate); // 3. If value is not provided, let value be undefined. Handle value = args.atOrUndefined(isolate, 1); @@ -590,8 +417,15 @@ BUILTIN(NumberFormatInternalFormatNumber) { double number = number_obj->Number(); // Return FormatNumber(nf, x). - RETURN_RESULT_OR_FAILURE(isolate, NumberFormat::FormatNumber( - isolate, number_format_holder, number)); + RETURN_RESULT_OR_FAILURE( + isolate, JSNumberFormat::FormatNumber(isolate, number_format, number)); +} + +BUILTIN(DateTimeFormatConstructor) { + HandleScope scope(isolate); + return FormatConstructor( + args, isolate, isolate->intl_date_time_format_function(), + "Intl.DateTimeFormat"); } BUILTIN(DateTimeFormatPrototypeFormat) { @@ -603,16 +437,12 @@ BUILTIN(DateTimeFormatPrototypeFormat) { CHECK_RECEIVER(JSReceiver, receiver, method); // 3. Let dtf be ? UnwrapDateTimeFormat(dtf). - Handle date_format_holder; + Handle format; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, date_format_holder, - DateFormat::Unwrap(isolate, receiver, method)); - DCHECK(Intl::IsObjectOfType(isolate, date_format_holder, - Intl::Type::kDateTimeFormat)); + isolate, format, + JSDateTimeFormat::UnwrapDateTimeFormat(isolate, receiver)); - Handle bound_format = Handle( - date_format_holder->GetEmbedderField(DateFormat::kBoundFormatIndex), - isolate); + Handle bound_format = Handle(format->bound_format(), isolate); // 4. If dtf.[[BoundFormat]] is undefined, then if (!bound_format->IsUndefined(isolate)) { @@ -621,26 +451,11 @@ BUILTIN(DateTimeFormatPrototypeFormat) { return *bound_format; } - Handle native_context(isolate->context()->native_context(), - isolate); - Handle context = isolate->factory()->NewBuiltinContext( - native_context, DateFormat::ContextSlot::kLength); - - // 4.b. Set F.[[DateTimeFormat]] to dtf. - context->set(DateFormat::ContextSlot::kDateFormat, *date_format_holder); - - Handle info = Handle( - native_context->date_format_internal_format_shared_fun(), isolate); - Handle map = isolate->strict_function_without_prototype_map(); - - // 4.a. Let F be a new built-in function object as defined in DateTime Format - // Functions (12.1.5). - Handle new_bound_format_function = - isolate->factory()->NewFunctionFromSharedFunctionInfo(map, info, context); + Handle new_bound_format_function = CreateBoundFunction( + isolate, format, Builtins::kDateTimeFormatInternalFormat, 1); // 4.c. Set dtf.[[BoundFormat]] to F. - date_format_holder->SetEmbedderField(DateFormat::kBoundFormatIndex, - *new_bound_format_function); + format->set_bound_format(*new_bound_format_function); // 5. Return dtf.[[BoundFormat]]. return *new_bound_format_function; @@ -651,19 +466,17 @@ BUILTIN(DateTimeFormatInternalFormat) { Handle context = Handle(isolate->context(), isolate); // 1. Let dtf be F.[[DateTimeFormat]]. - Handle date_format_holder = Handle( - JSObject::cast(context->get(DateFormat::ContextSlot::kDateFormat)), - isolate); - // 2. Assert: Type(dtf) is Object and dtf has an [[InitializedDateTimeFormat]] // internal slot. - DCHECK(Intl::IsObjectOfType(isolate, date_format_holder, - Intl::Type::kDateTimeFormat)); + Handle date_format_holder = Handle( + JSDateTimeFormat::cast(context->get( + static_cast(Intl::BoundFunctionContextSlot::kBoundFunction))), + isolate); Handle date = args.atOrUndefined(isolate, 1); - RETURN_RESULT_OR_FAILURE( - isolate, DateFormat::DateTimeFormat(isolate, date_format_holder, date)); + RETURN_RESULT_OR_FAILURE(isolate, JSDateTimeFormat::DateTimeFormat( + isolate, date_format_holder, date)); } BUILTIN(ListFormatConstructor) { @@ -682,8 +495,9 @@ BUILTIN(ListFormatConstructor) { Handle result; // 2. Let listFormat be OrdinaryCreateFromConstructor(NewTarget, // "%ListFormatPrototype%"). - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, - JSObject::New(target, new_target)); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + JSObject::New(target, new_target, Handle::null())); Handle format = Handle::cast(result); format->set_flags(0); @@ -702,6 +516,12 @@ BUILTIN(ListFormatPrototypeResolvedOptions) { return *JSListFormat::ResolvedOptions(isolate, format_holder); } +BUILTIN(ListFormatSupportedLocalesOf) { + HandleScope scope(isolate); + RETURN_RESULT_OR_FAILURE( + isolate, SupportedLocalesOfCommon(isolate, "listformat", args)); +} + namespace { MaybeHandle CreateLocale(Isolate* isolate, @@ -709,8 +529,10 @@ MaybeHandle CreateLocale(Isolate* isolate, Handle new_target, Handle tag, Handle options) { Handle result; - ASSIGN_RETURN_ON_EXCEPTION(isolate, result, - JSObject::New(constructor, new_target), JSLocale); + ASSIGN_RETURN_ON_EXCEPTION( + isolate, result, + JSObject::New(constructor, new_target, Handle::null()), + JSLocale); // First parameter is a locale, as a string/object. Can't be empty. if (!tag->IsString() && !tag->IsJSReceiver()) { @@ -786,186 +608,12 @@ BUILTIN(LocalePrototypeMinimize) { isolate->factory()->NewJSObjectWithNullProto())); } -namespace { - -MaybeHandle GenerateRelativeTimeFormatParts( - Isolate* isolate, icu::UnicodeString formatted, - icu::UnicodeString integer_part, Handle unit) { - Factory* factory = isolate->factory(); - Handle array = factory->NewJSArray(0); - int32_t found = formatted.indexOf(integer_part); - - Handle substring; - if (found < 0) { - // Cannot find the integer_part in the formatted. - // Return [{'type': 'literal', 'value': formatted}] - ASSIGN_RETURN_ON_EXCEPTION(isolate, substring, - Intl::ToString(isolate, formatted), JSArray); - Intl::AddElement(isolate, array, - 0, // index - factory->literal_string(), // field_type_string - substring); - } else { - // Found the formatted integer in the result. - int index = 0; - - // array.push({ - // 'type': 'literal', - // 'value': formatted.substring(0, found)}) - if (found > 0) { - ASSIGN_RETURN_ON_EXCEPTION(isolate, substring, - Intl::ToString(isolate, formatted, 0, found), - JSArray); - Intl::AddElement(isolate, array, index++, - factory->literal_string(), // field_type_string - substring); - } - - // array.push({ - // 'type': 'integer', - // 'value': formatted.substring(found, found + integer_part.length), - // 'unit': unit}) - ASSIGN_RETURN_ON_EXCEPTION(isolate, substring, - Intl::ToString(isolate, formatted, found, - found + integer_part.length()), - JSArray); - Intl::AddElement(isolate, array, index++, - factory->integer_string(), // field_type_string - substring, factory->unit_string(), unit); - - // array.push({ - // 'type': 'literal', - // 'value': formatted.substring( - // found + integer_part.length, formatted.length)}) - if (found + integer_part.length() < formatted.length()) { - ASSIGN_RETURN_ON_EXCEPTION( - isolate, substring, - Intl::ToString(isolate, formatted, found + integer_part.length(), - formatted.length()), - JSArray); - Intl::AddElement(isolate, array, index, - factory->literal_string(), // field_type_string - substring); - } - } - return array; -} - -bool GetURelativeDateTimeUnit(Handle unit, - URelativeDateTimeUnit* unit_enum) { - std::unique_ptr unit_str = unit->ToCString(); - if ((strcmp("second", unit_str.get()) == 0) || - (strcmp("seconds", unit_str.get()) == 0)) { - *unit_enum = UDAT_REL_UNIT_SECOND; - } else if ((strcmp("minute", unit_str.get()) == 0) || - (strcmp("minutes", unit_str.get()) == 0)) { - *unit_enum = UDAT_REL_UNIT_MINUTE; - } else if ((strcmp("hour", unit_str.get()) == 0) || - (strcmp("hours", unit_str.get()) == 0)) { - *unit_enum = UDAT_REL_UNIT_HOUR; - } else if ((strcmp("day", unit_str.get()) == 0) || - (strcmp("days", unit_str.get()) == 0)) { - *unit_enum = UDAT_REL_UNIT_DAY; - } else if ((strcmp("week", unit_str.get()) == 0) || - (strcmp("weeks", unit_str.get()) == 0)) { - *unit_enum = UDAT_REL_UNIT_WEEK; - } else if ((strcmp("month", unit_str.get()) == 0) || - (strcmp("months", unit_str.get()) == 0)) { - *unit_enum = UDAT_REL_UNIT_MONTH; - } else if ((strcmp("quarter", unit_str.get()) == 0) || - (strcmp("quarters", unit_str.get()) == 0)) { - *unit_enum = UDAT_REL_UNIT_QUARTER; - } else if ((strcmp("year", unit_str.get()) == 0) || - (strcmp("years", unit_str.get()) == 0)) { - *unit_enum = UDAT_REL_UNIT_YEAR; - } else { - return false; - } - return true; -} - -MaybeHandle RelativeTimeFormatPrototypeFormatCommon( - BuiltinArguments args, Isolate* isolate, - Handle format_holder, const char* func_name, - bool to_parts) { - Factory* factory = isolate->factory(); - Handle value_obj = args.atOrUndefined(isolate, 1); - Handle unit_obj = args.atOrUndefined(isolate, 2); - - // 3. Let value be ? ToNumber(value). - Handle value; - ASSIGN_RETURN_ON_EXCEPTION(isolate, value, - Object::ToNumber(isolate, value_obj), Object); - double number = value->Number(); - // 4. Let unit be ? ToString(unit). - Handle unit; - ASSIGN_RETURN_ON_EXCEPTION(isolate, unit, Object::ToString(isolate, unit_obj), - Object); - - // 4. If isFinite(value) is false, then throw a RangeError exception. - if (!std::isfinite(number)) { - THROW_NEW_ERROR( - isolate, - NewRangeError(MessageTemplate::kNotFiniteNumber, - isolate->factory()->NewStringFromAsciiChecked(func_name)), - Object); - } - - icu::RelativeDateTimeFormatter* formatter = - JSRelativeTimeFormat::UnpackFormatter(format_holder); - CHECK_NOT_NULL(formatter); - - URelativeDateTimeUnit unit_enum; - if (!GetURelativeDateTimeUnit(unit, &unit_enum)) { - THROW_NEW_ERROR( - isolate, - NewRangeError(MessageTemplate::kInvalidUnit, - isolate->factory()->NewStringFromAsciiChecked(func_name), - unit), - Object); - } - - UErrorCode status = U_ZERO_ERROR; - icu::UnicodeString formatted; - if (unit_enum == UDAT_REL_UNIT_QUARTER) { - // ICU have not yet implement UDAT_REL_UNIT_QUARTER. - } else { - if (format_holder->numeric() == JSRelativeTimeFormat::Numeric::ALWAYS) { - formatter->formatNumeric(number, unit_enum, formatted, status); - } else { - DCHECK_EQ(JSRelativeTimeFormat::Numeric::AUTO, format_holder->numeric()); - formatter->format(number, unit_enum, formatted, status); - } - } - - if (U_FAILURE(status)) { - THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), Object); - } - - if (to_parts) { - icu::UnicodeString integer; - icu::FieldPosition pos; - formatter->getNumberFormat().format(std::abs(number), integer, pos, status); - if (U_FAILURE(status)) { - THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), - Object); - } - - Handle elements; - ASSIGN_RETURN_ON_EXCEPTION( - isolate, elements, - GenerateRelativeTimeFormatParts(isolate, formatted, integer, unit), - Object); - return elements; - } - - return factory->NewStringFromTwoByte(Vector( - reinterpret_cast(formatted.getBuffer()), - formatted.length())); +BUILTIN(RelativeTimeFormatSupportedLocalesOf) { + HandleScope scope(isolate); + RETURN_RESULT_OR_FAILURE( + isolate, SupportedLocalesOfCommon(isolate, "relativetimeformat", args)); } -} // namespace - BUILTIN(RelativeTimeFormatPrototypeFormat) { HandleScope scope(isolate); // 1. Let relativeTimeFormat be the this value. @@ -974,9 +622,12 @@ BUILTIN(RelativeTimeFormatPrototypeFormat) { // true, throw a TypeError exception. CHECK_RECEIVER(JSRelativeTimeFormat, format_holder, "Intl.RelativeTimeFormat.prototype.format"); - RETURN_RESULT_OR_FAILURE(isolate, - RelativeTimeFormatPrototypeFormatCommon( - args, isolate, format_holder, "format", false)); + Handle value_obj = args.atOrUndefined(isolate, 1); + Handle unit_obj = args.atOrUndefined(isolate, 2); + + RETURN_RESULT_OR_FAILURE( + isolate, JSRelativeTimeFormat::Format(isolate, value_obj, unit_obj, + format_holder, "format", false)); } BUILTIN(RelativeTimeFormatPrototypeFormatToParts) { @@ -987,9 +638,11 @@ BUILTIN(RelativeTimeFormatPrototypeFormatToParts) { // true, throw a TypeError exception. CHECK_RECEIVER(JSRelativeTimeFormat, format_holder, "Intl.RelativeTimeFormat.prototype.formatToParts"); - RETURN_RESULT_OR_FAILURE( - isolate, RelativeTimeFormatPrototypeFormatCommon( - args, isolate, format_holder, "formatToParts", true)); + Handle value_obj = args.atOrUndefined(isolate, 1); + Handle unit_obj = args.atOrUndefined(isolate, 2); + RETURN_RESULT_OR_FAILURE(isolate, JSRelativeTimeFormat::Format( + isolate, value_obj, unit_obj, + format_holder, "formatToParts", true)); } // Locale getters. @@ -1089,8 +742,9 @@ BUILTIN(RelativeTimeFormatConstructor) { // 2. Let relativeTimeFormat be // ! OrdinaryCreateFromConstructor(NewTarget, // "%RelativeTimeFormatPrototype%"). - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, - JSObject::New(target, new_target)); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + JSObject::New(target, new_target, Handle::null())); Handle format = Handle::cast(result); format->set_flags(0); @@ -1152,8 +806,9 @@ BUILTIN(PluralRulesConstructor) { // [[MinimumFractionDigits]], [[MaximumFractionDigits]], // [[MinimumSignificantDigits]], [[MaximumSignificantDigits]] »). Handle plural_rules_obj; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, plural_rules_obj, - JSObject::New(target, new_target)); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, plural_rules_obj, + JSObject::New(target, new_target, Handle::null())); Handle plural_rules = Handle::cast(plural_rules_obj); @@ -1163,6 +818,40 @@ BUILTIN(PluralRulesConstructor) { locales, options)); } +BUILTIN(PluralRulesPrototypeResolvedOptions) { + HandleScope scope(isolate); + CHECK_RECEIVER(JSPluralRules, plural_rules_holder, + "Intl.PluralRules.prototype.resolvedOptions"); + return *JSPluralRules::ResolvedOptions(isolate, plural_rules_holder); +} + +BUILTIN(PluralRulesPrototypeSelect) { + HandleScope scope(isolate); + + // 1. Let pr be the this value. + // 2. If Type(pr) is not Object, throw a TypeError exception. + // 3. If pr does not have an [[InitializedPluralRules]] internal slot, throw a + // TypeError exception. + CHECK_RECEIVER(JSPluralRules, plural_rules, + "Intl.PluralRules.prototype.select"); + + // 4. Let n be ? ToNumber(value). + Handle number = args.atOrUndefined(isolate, 1); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number, + Object::ToNumber(isolate, number)); + double number_double = number->Number(); + + // 5. Return ? ResolvePlural(pr, n). + RETURN_RESULT_OR_FAILURE(isolate, JSPluralRules::ResolvePlural( + isolate, plural_rules, number_double)); +} + +BUILTIN(PluralRulesSupportedLocalesOf) { + HandleScope scope(isolate); + RETURN_RESULT_OR_FAILURE( + isolate, SupportedLocalesOfCommon(isolate, "pluralrules", args)); +} + BUILTIN(CollatorConstructor) { HandleScope scope(isolate); Handle new_target; @@ -1183,16 +872,29 @@ BUILTIN(CollatorConstructor) { // 5. Let collator be ? OrdinaryCreateFromConstructor(newTarget, // "%CollatorPrototype%", internalSlotsList). Handle collator_obj; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, collator_obj, - JSObject::New(target, new_target)); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, collator_obj, + JSObject::New(target, new_target, Handle::null())); Handle collator = Handle::cast(collator_obj); - collator->set_flags(0); // 6. Return ? InitializeCollator(collator, locales, options). RETURN_RESULT_OR_FAILURE(isolate, JSCollator::InitializeCollator( isolate, collator, locales, options)); } +BUILTIN(CollatorPrototypeResolvedOptions) { + HandleScope scope(isolate); + CHECK_RECEIVER(JSCollator, collator_holder, + "Intl.Collator.prototype.resolvedOptions"); + return *JSCollator::ResolvedOptions(isolate, collator_holder); +} + +BUILTIN(CollatorSupportedLocalesOf) { + HandleScope scope(isolate); + RETURN_RESULT_OR_FAILURE(isolate, + SupportedLocalesOfCommon(isolate, "collator", args)); +} + BUILTIN(CollatorPrototypeCompare) { const char* const method = "get Intl.Collator.prototype.compare"; HandleScope scope(isolate); @@ -1211,21 +913,8 @@ BUILTIN(CollatorPrototypeCompare) { return *bound_compare; } - Handle native_context(isolate->context()->native_context(), - isolate); - Handle context = isolate->factory()->NewBuiltinContext( - native_context, JSCollator::ContextSlot::kLength); - - // 4.b. Set F.[[Collator]] to collator. - context->set(JSCollator::ContextSlot::kCollator, *collator); - - Handle info = Handle( - native_context->collator_internal_compare_shared_fun(), isolate); - Handle map = isolate->strict_function_without_prototype_map(); - - // 4.a. Let F be a new built-in function object as defined in 10.3.3.1. - Handle new_bound_compare_function = - isolate->factory()->NewFunctionFromSharedFunctionInfo(map, info, context); + Handle new_bound_compare_function = CreateBoundFunction( + isolate, collator, Builtins::kCollatorInternalCompare, 2); // 4.c. Set collator.[[BoundCompare]] to F. collator->set_bound_compare(*new_bound_compare_function); @@ -1242,7 +931,8 @@ BUILTIN(CollatorInternalCompare) { // 2. Assert: Type(collator) is Object and collator has an // [[InitializedCollator]] internal slot. Handle collator_holder = Handle( - JSCollator::cast(context->get(JSCollator::ContextSlot::kCollator)), + JSCollator::cast(context->get( + static_cast(Intl::BoundFunctionContextSlot::kBoundFunction))), isolate); // 3. If x is not provided, let x be undefined. @@ -1263,71 +953,231 @@ BUILTIN(CollatorInternalCompare) { return *Intl::CompareStrings(isolate, collator_holder, string_x, string_y); } -BUILTIN(BreakIteratorPrototypeAdoptText) { - const char* const method = "get Intl.v8BreakIterator.prototype.adoptText"; +BUILTIN(V8BreakIteratorConstructor) { HandleScope scope(isolate); + Handle new_target; - CHECK_RECEIVER(JSObject, break_iterator_holder, method); - if (!Intl::IsObjectOfType(isolate, break_iterator_holder, - Intl::Type::kBreakIterator)) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, - NewTypeError(MessageTemplate::kIncompatibleMethodReceiver, - isolate->factory()->NewStringFromAsciiChecked(method), - break_iterator_holder)); + if (args.new_target()->IsUndefined(isolate)) { + new_target = args.target(); + } else { + new_target = Handle::cast(args.new_target()); } - Handle bound_adopt_text = - Handle(break_iterator_holder->GetEmbedderField( - V8BreakIterator::kBoundAdoptTextIndex), - isolate); + // [[Construct]] + Handle target = args.target(); - if (!bound_adopt_text->IsUndefined(isolate)) { - DCHECK(bound_adopt_text->IsJSFunction()); - return *bound_adopt_text; - } + Handle locales = args.atOrUndefined(isolate, 1); + Handle options = args.atOrUndefined(isolate, 2); - Handle native_context(isolate->context()->native_context(), - isolate); - Handle context = isolate->factory()->NewBuiltinContext( - native_context, static_cast(V8BreakIterator::ContextSlot::kLength)); + Handle break_iterator_obj; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, break_iterator_obj, + JSObject::New(target, new_target, Handle::null())); + Handle break_iterator = + Handle::cast(break_iterator_obj); - context->set(static_cast(V8BreakIterator::ContextSlot::kV8BreakIterator), - *break_iterator_holder); + RETURN_RESULT_OR_FAILURE(isolate, + JSV8BreakIterator::InitializeV8BreakIterator( + isolate, break_iterator, locales, options)); +} - Handle info = Handle( - native_context->break_iterator_internal_adopt_text_shared_fun(), isolate); - Handle map = isolate->strict_function_without_prototype_map(); +BUILTIN(V8BreakIteratorPrototypeResolvedOptions) { + HandleScope scope(isolate); + CHECK_RECEIVER(JSV8BreakIterator, break_iterator, + "Intl.v8BreakIterator.prototype.resolvedOptions"); + return *JSV8BreakIterator::ResolvedOptions(isolate, break_iterator); +} - Handle new_bound_adopt_text_function = - isolate->factory()->NewFunctionFromSharedFunctionInfo(map, info, context); +BUILTIN(V8BreakIteratorPrototypeAdoptText) { + const char* const method = "get Intl.v8BreakIterator.prototype.adoptText"; + HandleScope scope(isolate); + + CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method); - break_iterator_holder->SetEmbedderField(V8BreakIterator::kBoundAdoptTextIndex, - *new_bound_adopt_text_function); + Handle bound_adopt_text(break_iterator->bound_adopt_text(), isolate); + if (!bound_adopt_text->IsUndefined(isolate)) { + DCHECK(bound_adopt_text->IsJSFunction()); + return *bound_adopt_text; + } + Handle new_bound_adopt_text_function = CreateBoundFunction( + isolate, break_iterator, Builtins::kV8BreakIteratorInternalAdoptText, 1); + break_iterator->set_bound_adopt_text(*new_bound_adopt_text_function); return *new_bound_adopt_text_function; } -BUILTIN(BreakIteratorInternalAdoptText) { +BUILTIN(V8BreakIteratorInternalAdoptText) { HandleScope scope(isolate); Handle context = Handle(isolate->context(), isolate); - Handle break_iterator_holder = Handle( - JSObject::cast(context->get( - static_cast(V8BreakIterator::ContextSlot::kV8BreakIterator))), + Handle break_iterator_holder = Handle( + JSV8BreakIterator::cast(context->get( + static_cast(Intl::BoundFunctionContextSlot::kBoundFunction))), isolate); - DCHECK(Intl::IsObjectOfType(isolate, break_iterator_holder, - Intl::Type::kBreakIterator)); - Handle input_text = args.atOrUndefined(isolate, 1); Handle text; ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, text, Object::ToString(isolate, input_text)); - V8BreakIterator::AdoptText(isolate, break_iterator_holder, text); + JSV8BreakIterator::AdoptText(isolate, break_iterator_holder, text); return ReadOnlyRoots(isolate).undefined_value(); } +BUILTIN(V8BreakIteratorPrototypeFirst) { + const char* const method = "get Intl.v8BreakIterator.prototype.first"; + HandleScope scope(isolate); + + CHECK_RECEIVER(JSV8BreakIterator, break_iterator_holder, method); + + Handle bound_first(break_iterator_holder->bound_first(), isolate); + if (!bound_first->IsUndefined(isolate)) { + DCHECK(bound_first->IsJSFunction()); + return *bound_first; + } + + Handle new_bound_first_function = + CreateBoundFunction(isolate, break_iterator_holder, + Builtins::kV8BreakIteratorInternalFirst, 0); + break_iterator_holder->set_bound_first(*new_bound_first_function); + return *new_bound_first_function; +} + +BUILTIN(V8BreakIteratorInternalFirst) { + HandleScope scope(isolate); + Handle context = Handle(isolate->context(), isolate); + + Handle break_iterator_holder = Handle( + JSV8BreakIterator::cast(context->get( + static_cast(Intl::BoundFunctionContextSlot::kBoundFunction))), + isolate); + + icu::BreakIterator* break_iterator = + JSV8BreakIterator::UnpackBreakIterator(break_iterator_holder); + CHECK_NOT_NULL(break_iterator); + + return *isolate->factory()->NewNumberFromInt(break_iterator->first()); +} + +BUILTIN(V8BreakIteratorPrototypeNext) { + const char* const method = "get Intl.v8BreakIterator.prototype.next"; + HandleScope scope(isolate); + + CHECK_RECEIVER(JSV8BreakIterator, break_iterator_holder, method); + + Handle bound_next(break_iterator_holder->bound_next(), isolate); + if (!bound_next->IsUndefined(isolate)) { + DCHECK(bound_next->IsJSFunction()); + return *bound_next; + } + + Handle new_bound_next_function = + CreateBoundFunction(isolate, break_iterator_holder, + Builtins::kV8BreakIteratorInternalNext, 0); + break_iterator_holder->set_bound_next(*new_bound_next_function); + return *new_bound_next_function; +} + +BUILTIN(V8BreakIteratorInternalNext) { + HandleScope scope(isolate); + Handle context = Handle(isolate->context(), isolate); + + Handle break_iterator_holder = Handle( + JSV8BreakIterator::cast(context->get( + static_cast(Intl::BoundFunctionContextSlot::kBoundFunction))), + isolate); + + icu::BreakIterator* break_iterator = + JSV8BreakIterator::UnpackBreakIterator(break_iterator_holder); + CHECK_NOT_NULL(break_iterator); + + return *isolate->factory()->NewNumberFromInt(break_iterator->next()); +} + +BUILTIN(V8BreakIteratorPrototypeCurrent) { + const char* const method = "get Intl.v8BreakIterator.prototype.current"; + HandleScope scope(isolate); + + CHECK_RECEIVER(JSV8BreakIterator, break_iterator_holder, method); + + Handle bound_current(break_iterator_holder->bound_current(), isolate); + if (!bound_current->IsUndefined(isolate)) { + DCHECK(bound_current->IsJSFunction()); + return *bound_current; + } + + Handle new_bound_current_function = + CreateBoundFunction(isolate, break_iterator_holder, + Builtins::kV8BreakIteratorInternalCurrent, 0); + break_iterator_holder->set_bound_current(*new_bound_current_function); + return *new_bound_current_function; +} + +BUILTIN(V8BreakIteratorInternalCurrent) { + HandleScope scope(isolate); + Handle context = Handle(isolate->context(), isolate); + + Handle break_iterator_holder = Handle( + JSV8BreakIterator::cast(context->get( + static_cast(Intl::BoundFunctionContextSlot::kBoundFunction))), + isolate); + + icu::BreakIterator* break_iterator = + JSV8BreakIterator::UnpackBreakIterator(break_iterator_holder); + CHECK_NOT_NULL(break_iterator); + + return *isolate->factory()->NewNumberFromInt(break_iterator->current()); +} + +BUILTIN(V8BreakIteratorPrototypeBreakType) { + const char* const method = "get Intl.v8BreakIterator.prototype.breakType"; + HandleScope scope(isolate); + + CHECK_RECEIVER(JSV8BreakIterator, break_iterator_holder, method); + + Handle bound_break_type(break_iterator_holder->bound_break_type(), + isolate); + if (!bound_break_type->IsUndefined(isolate)) { + DCHECK(bound_break_type->IsJSFunction()); + return *bound_break_type; + } + + Handle new_bound_break_type_function = + CreateBoundFunction(isolate, break_iterator_holder, + Builtins::kV8BreakIteratorInternalBreakType, 0); + break_iterator_holder->set_bound_break_type(*new_bound_break_type_function); + return *new_bound_break_type_function; +} + +BUILTIN(V8BreakIteratorInternalBreakType) { + HandleScope scope(isolate); + Handle context = Handle(isolate->context(), isolate); + + Handle break_iterator_holder = Handle( + JSV8BreakIterator::cast(context->get( + static_cast(Intl::BoundFunctionContextSlot::kBoundFunction))), + isolate); + + icu::BreakIterator* break_iterator = + JSV8BreakIterator::UnpackBreakIterator(break_iterator_holder); + CHECK_NOT_NULL(break_iterator); + + int32_t status = break_iterator->getRuleStatus(); + // Keep return values in sync with JavaScript BreakType enum. + if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) { + return *isolate->factory()->NewStringFromStaticChars("none"); + } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) { + return ReadOnlyRoots(isolate).number_string(); + } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) { + return *isolate->factory()->NewStringFromStaticChars("letter"); + } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) { + return *isolate->factory()->NewStringFromStaticChars("kana"); + } else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) { + return *isolate->factory()->NewStringFromStaticChars("ideo"); + } else { + return *isolate->factory()->NewStringFromStaticChars("unknown"); + } +} + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-intl.h b/deps/v8/src/builtins/builtins-intl.h deleted file mode 100644 index 419ff14db10cf9..00000000000000 --- a/deps/v8/src/builtins/builtins-intl.h +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2017 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_BUILTINS_BUILTINS_INTL_H_ -#define V8_BUILTINS_BUILTINS_INTL_H_ - -#include -#include - -namespace v8 { -namespace internal { - -struct NumberFormatSpan { - int32_t field_id; - int32_t begin_pos; - int32_t end_pos; - - NumberFormatSpan() {} - NumberFormatSpan(int32_t field_id, int32_t begin_pos, int32_t end_pos) - : field_id(field_id), begin_pos(begin_pos), end_pos(end_pos) {} -}; - -std::vector FlattenRegionsToParts( - std::vector* regions); - -} // namespace internal -} // namespace v8 - -#endif // V8_BUILTINS_BUILTINS_INTL_H_ diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc index 1e16a6b1ded3ca..7915f9b7f4ad9e 100644 --- a/deps/v8/src/builtins/builtins-iterator-gen.cc +++ b/deps/v8/src/builtins/builtins-iterator-gen.cc @@ -5,6 +5,9 @@ #include "src/builtins/builtins-iterator-gen.h" #include "src/builtins/growable-fixed-array-gen.h" +#include "src/builtins/builtins-utils-gen.h" +#include "src/builtins/builtins.h" +#include "src/code-stub-assembler.h" #include "src/heap/factory-inl.h" namespace v8 { @@ -38,8 +41,7 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context, BIND(&if_not_callable); { - Node* ret = CallRuntime(Runtime::kThrowTypeError, context, - SmiConstant(MessageTemplate::kNotIterable), object); + Node* ret = CallRuntime(Runtime::kThrowIteratorError, context, object); GotoIfException(ret, if_exception, exception); Unreachable(); } @@ -197,62 +199,91 @@ void IteratorBuiltinsAssembler::IteratorCloseOnException( TNode IteratorBuiltinsAssembler::IterableToList( TNode context, TNode iterable, TNode iterator_fn) { - Label fast_path(this), slow_path(this), done(this); + // 1. Let iteratorRecord be ? GetIterator(items, method). + IteratorRecord iterator_record = GetIterator(context, iterable, iterator_fn); + + // 2. Let values be a new empty List. + GrowableFixedArray values(state()); + + Variable* vars[] = {values.var_array(), values.var_length(), + values.var_capacity()}; + Label loop_start(this, 3, vars), done(this); + Goto(&loop_start); + // 3. Let next be true. + // 4. Repeat, while next is not false + BIND(&loop_start); + { + // a. Set next to ? IteratorStep(iteratorRecord). + TNode next = CAST(IteratorStep(context, iterator_record, &done)); + // b. If next is not false, then + // i. Let nextValue be ? IteratorValue(next). + TNode next_value = CAST(IteratorValue(context, next)); + // ii. Append nextValue to the end of the List values. + values.Push(next_value); + Goto(&loop_start); + } - TVARIABLE(JSArray, created_list); + BIND(&done); + return values.ToJSArray(context); +} - Branch(IsFastJSArrayWithNoCustomIteration(iterable, context), &fast_path, - &slow_path); +TF_BUILTIN(IterableToList, IteratorBuiltinsAssembler) { + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode iterable = CAST(Parameter(Descriptor::kIterable)); + TNode iterator_fn = CAST(Parameter(Descriptor::kIteratorFn)); - // This is a fast-path for ignoring the iterator. - BIND(&fast_path); - { - TNode input_array = CAST(iterable); - created_list = CAST(CloneFastJSArray(context, input_array)); - Goto(&done); - } + Return(IterableToList(context, iterable, iterator_fn)); +} - BIND(&slow_path); - { - // 1. Let iteratorRecord be ? GetIterator(items, method). - IteratorRecord iterator_record = - GetIterator(context, iterable, iterator_fn); +// This builtin always returns a new JSArray and is thus safe to use even in the +// presence of code that may call back into user-JS. This builtin will take the +// fast path if the iterable is a fast array and the Array prototype and the +// Symbol.iterator is untouched. The fast path skips the iterator and copies the +// backing store to the new array. Note that if the array has holes, the holes +// will be copied to the new array, which is inconsistent with the behavior of +// an actual iteration, where holes should be replaced with undefined (if the +// prototype has no elements). To maintain the correct behavior for holey +// arrays, use the builtins IterableToList or IterableToListWithSymbolLookup. +TF_BUILTIN(IterableToListMayPreserveHoles, IteratorBuiltinsAssembler) { + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode iterable = CAST(Parameter(Descriptor::kIterable)); + TNode iterator_fn = CAST(Parameter(Descriptor::kIteratorFn)); - // 2. Let values be a new empty List. - GrowableFixedArray values(state()); + Label slow_path(this); - Variable* vars[] = {values.var_array(), values.var_length(), - values.var_capacity()}; - Label loop_start(this, 3, vars), loop_end(this); - Goto(&loop_start); - // 3. Let next be true. - // 4. Repeat, while next is not false - BIND(&loop_start); - { - // a. Set next to ? IteratorStep(iteratorRecord). - TNode next = - CAST(IteratorStep(context, iterator_record, &loop_end)); - // b. If next is not false, then - // i. Let nextValue be ? IteratorValue(next). - TNode next_value = CAST(IteratorValue(context, next)); - // ii. Append nextValue to the end of the List values. - values.Push(next_value); - Goto(&loop_start); - } - BIND(&loop_end); + GotoIfNot(IsFastJSArrayWithNoCustomIteration(iterable, context), &slow_path); - created_list = values.ToJSArray(context); - Goto(&done); - } + // The fast path will copy holes to the new array. + TailCallBuiltin(Builtins::kCloneFastJSArray, context, iterable); - BIND(&done); - return created_list.value(); + BIND(&slow_path); + TailCallBuiltin(Builtins::kIterableToList, context, iterable, iterator_fn); } -TNode IteratorBuiltinsAssembler::IterableToList( - TNode context, TNode iterable) { - TNode method = GetIteratorMethod(context, iterable); - return IterableToList(context, iterable, method); +// This builtin uses the default Symbol.iterator for the iterator, and takes +// the fast path only if the iterable is a fast _packed_ array. +TF_BUILTIN(IterableToListWithSymbolLookup, IteratorBuiltinsAssembler) { + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode iterable = CAST(Parameter(Descriptor::kIterable)); + + Label slow_path(this); + + GotoIfNot(IsFastJSArrayWithNoCustomIteration(iterable, context), &slow_path); + // Here we are guaranteed that iterable is a fast JSArray with an original + // iterator. + Node* elements_kind = LoadMapElementsKind(LoadMap(CAST(iterable))); + // Take the slow path if the array is holey. + GotoIf(IsHoleyFastElementsKind(elements_kind), &slow_path); + + // This is a fast-path for ignoring the iterator. Here we are guaranteed that + // {iterable} is a fast _packed_ JSArray. + TailCallBuiltin(Builtins::kCloneFastJSArray, context, iterable); + + BIND(&slow_path); + { + TNode iterator_fn = GetIteratorMethod(context, iterable); + TailCallBuiltin(Builtins::kIterableToList, context, iterable, iterator_fn); + } } } // namespace internal diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h index 71d4b9753cc1ad..f61f7f52c0ffd8 100644 --- a/deps/v8/src/builtins/builtins-iterator-gen.h +++ b/deps/v8/src/builtins/builtins-iterator-gen.h @@ -54,10 +54,11 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler { void IteratorCloseOnException(Node* context, const IteratorRecord& iterator, Variable* exception); - // /#sec-iterabletolist + // #sec-iterabletolist + // Build a JSArray by iterating over {iterable} using {iterator_fn}, + // following the ECMAscript operation with the same name. TNode IterableToList(TNode context, TNode iterable, TNode iterator_fn); - TNode IterableToList(TNode context, TNode iterable); }; } // namespace internal diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc index cfc81612f2d4c2..582f6242ad56c3 100644 --- a/deps/v8/src/builtins/builtins-number-gen.cc +++ b/deps/v8/src/builtins/builtins-number-gen.cc @@ -525,17 +525,15 @@ TF_BUILTIN(Add, AddStubAssembler) { BIND(&string_add_convert_left); { // Convert {left} to a String and concatenate it with the String {right}. - Callable callable = - CodeFactory::StringAdd(isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED); - Return(CallStub(callable, context, var_left.value(), var_right.value())); + TailCallBuiltin(Builtins::kStringAdd_ConvertLeft, context, var_left.value(), + var_right.value()); } BIND(&string_add_convert_right); { // Convert {right} to a String and concatenate it with the String {left}. - Callable callable = CodeFactory::StringAdd( - isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED); - Return(CallStub(callable, context, var_left.value(), var_right.value())); + TailCallBuiltin(Builtins::kStringAdd_ConvertRight, context, + var_left.value(), var_right.value()); } BIND(&do_bigint_add); diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc index a8d83e641ffb9a..52b486e60bda73 100644 --- a/deps/v8/src/builtins/builtins-object-gen.cc +++ b/deps/v8/src/builtins/builtins-object-gen.cc @@ -46,11 +46,6 @@ class ObjectBuiltinsAssembler : public CodeStubAssembler { TNode IsStringWrapperElementsKind(TNode map); - // Checks that |map| has only simple properties, returns bitfield3. - TNode EnsureOnlyHasSimpleProperties(TNode map, - TNode instance_type, - Label* bailout); - void ObjectAssignFast(TNode context, TNode to, TNode from, Label* slow); }; @@ -96,8 +91,7 @@ void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context, Node* lhs = StringConstant("[object "); Node* rhs = StringConstant("]"); - Callable callable = - CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED); + Callable callable = CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE); Return(CallStub(callable, context, CallStub(callable, context, lhs, string), rhs)); @@ -304,7 +298,7 @@ TNode ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries( // So the array filled by the-hole even if enum_cache exists. FillFixedArrayWithValue(PACKED_ELEMENTS, values_or_entries, IntPtrConstant(0), object_enum_length, - Heap::kTheHoleValueRootIndex); + RootIndex::kTheHoleValue); TVARIABLE(IntPtrT, var_result_index, IntPtrConstant(0)); TVARIABLE(IntPtrT, var_descriptor_number, IntPtrConstant(0)); @@ -524,18 +518,6 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) { args.PopAndReturn(to); } -TNode ObjectBuiltinsAssembler::EnsureOnlyHasSimpleProperties( - TNode map, TNode instance_type, Label* bailout) { - GotoIf(IsCustomElementsReceiverInstanceType(instance_type), bailout); - - TNode bit_field3 = LoadMapBitField3(map); - GotoIf(IsSetWord32(bit_field3, Map::IsDictionaryMapBit::kMask | - Map::HasHiddenPrototypeBit::kMask), - bailout); - - return bit_field3; -} - // This function mimics what FastAssign() function does for C++ implementation. void ObjectBuiltinsAssembler::ObjectAssignFast(TNode context, TNode to, @@ -553,8 +535,9 @@ void ObjectBuiltinsAssembler::ObjectAssignFast(TNode context, GotoIf(IsJSReceiverInstanceType(from_instance_type), &cont); GotoIfNot(IsStringInstanceType(from_instance_type), &done); { - Branch(SmiEqual(LoadStringLengthAsSmi(CAST(from)), SmiConstant(0)), &done, - slow); + Branch( + Word32Equal(LoadStringLengthAsWord32(CAST(from)), Int32Constant(0)), + &done, slow); } BIND(&cont); } @@ -567,132 +550,18 @@ void ObjectBuiltinsAssembler::ObjectAssignFast(TNode context, TNode to_is_simple_receiver = IsSimpleObjectMap(to_map); GotoIfNot(IsJSObjectInstanceType(from_instance_type), slow); - TNode from_bit_field3 = - EnsureOnlyHasSimpleProperties(from_map, from_instance_type, slow); - GotoIfNot(IsEmptyFixedArray(LoadElements(CAST(from))), slow); - TNode from_descriptors = LoadMapDescriptors(from_map); - TNode nof_descriptors = - DecodeWord32(from_bit_field3); - - TVARIABLE(BoolT, var_stable, Int32TrueConstant()); - VariableList list({&var_stable}, zone()); - - DescriptorArrayForEach( - list, Unsigned(Int32Constant(0)), nof_descriptors, - [=, &var_stable](TNode descriptor_key_index) { - TNode next_key = CAST( - LoadWeakFixedArrayElement(from_descriptors, descriptor_key_index)); - - TVARIABLE(Object, var_value, SmiConstant(0)); - Label do_store(this), next_iteration(this); - - { - TVARIABLE(Map, var_from_map); - TVARIABLE(HeapObject, var_meta_storage); - TVARIABLE(IntPtrT, var_entry); - TVARIABLE(Uint32T, var_details); - Label if_found(this); - - Label if_found_fast(this), if_found_dict(this); - - Label if_stable(this), if_not_stable(this); - Branch(var_stable.value(), &if_stable, &if_not_stable); - BIND(&if_stable); - { - // Directly decode from the descriptor array if |from| did not - // change shape. - var_from_map = from_map; - var_meta_storage = from_descriptors; - var_entry = Signed(descriptor_key_index); - Goto(&if_found_fast); - } - BIND(&if_not_stable); - { - // If the map did change, do a slower lookup. We are still - // guaranteed that the object has a simple shape, and that the key - // is a name. - var_from_map = LoadMap(CAST(from)); - TryLookupPropertyInSimpleObject( - CAST(from), var_from_map.value(), next_key, &if_found_fast, - &if_found_dict, &var_meta_storage, &var_entry, &next_iteration); - } - - BIND(&if_found_fast); - { - TNode descriptors = CAST(var_meta_storage.value()); - TNode name_index = var_entry.value(); - - // Skip non-enumerable properties. - var_details = LoadDetailsByKeyIndex(descriptors, name_index); - GotoIf(IsSetWord32(var_details.value(), - PropertyDetails::kAttributesDontEnumMask), - &next_iteration); - - LoadPropertyFromFastObject(from, var_from_map.value(), descriptors, - name_index, var_details.value(), - &var_value); - Goto(&if_found); - } - BIND(&if_found_dict); - { - Node* dictionary = var_meta_storage.value(); - Node* entry = var_entry.value(); - - TNode details = - LoadDetailsByKeyIndex(dictionary, entry); - // Skip non-enumerable properties. - GotoIf( - IsSetWord32(details, PropertyDetails::kAttributesDontEnumMask), - &next_iteration); - - var_details = details; - var_value = LoadValueByKeyIndex(dictionary, entry); - Goto(&if_found); - } - - // Here we have details and value which could be an accessor. - BIND(&if_found); - { - Label slow_load(this, Label::kDeferred); - - var_value = - CallGetterIfAccessor(var_value.value(), var_details.value(), - context, from, &slow_load, kCallJSGetter); - Goto(&do_store); - - BIND(&slow_load); - { - var_value = - CallRuntime(Runtime::kGetProperty, context, from, next_key); - Goto(&do_store); - } - } - } - - // Store property to target object. - BIND(&do_store); - { - KeyedStoreGenericGenerator::SetProperty( - state(), context, to, to_is_simple_receiver, next_key, - var_value.value(), LanguageMode::kStrict); - - // Check if the |from| object is still stable, i.e. we can proceed - // using property details from preloaded |from_descriptors|. - var_stable = Select( - var_stable.value(), - [=] { return WordEqual(LoadMap(CAST(from)), from_map); }, - [=] { return Int32FalseConstant(); }); - - Goto(&next_iteration); - } - - BIND(&next_iteration); - }); + ForEachEnumerableOwnProperty(context, from_map, CAST(from), + [=](TNode key, TNode value) { + KeyedStoreGenericGenerator::SetProperty( + state(), context, to, + to_is_simple_receiver, key, value, + LanguageMode::kStrict); + }, + slow); Goto(&done); - BIND(&done); } @@ -1008,13 +877,13 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) { BIND(&if_arguments); { - var_default.Bind(LoadRoot(Heap::karguments_to_stringRootIndex)); + var_default.Bind(LoadRoot(RootIndex::karguments_to_string)); Goto(&checkstringtag); } BIND(&if_array); { - var_default.Bind(LoadRoot(Heap::karray_to_stringRootIndex)); + var_default.Bind(LoadRoot(RootIndex::karray_to_string)); Goto(&checkstringtag); } @@ -1027,26 +896,26 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) { boolean_constructor, JSFunction::kPrototypeOrInitialMapOffset); Node* boolean_prototype = LoadObjectField(boolean_initial_map, Map::kPrototypeOffset); - var_default.Bind(LoadRoot(Heap::kboolean_to_stringRootIndex)); + var_default.Bind(LoadRoot(RootIndex::kboolean_to_string)); var_holder.Bind(boolean_prototype); Goto(&checkstringtag); } BIND(&if_date); { - var_default.Bind(LoadRoot(Heap::kdate_to_stringRootIndex)); + var_default.Bind(LoadRoot(RootIndex::kdate_to_string)); Goto(&checkstringtag); } BIND(&if_error); { - var_default.Bind(LoadRoot(Heap::kerror_to_stringRootIndex)); + var_default.Bind(LoadRoot(RootIndex::kerror_to_string)); Goto(&checkstringtag); } BIND(&if_function); { - var_default.Bind(LoadRoot(Heap::kfunction_to_stringRootIndex)); + var_default.Bind(LoadRoot(RootIndex::kfunction_to_string)); Goto(&checkstringtag); } @@ -1059,7 +928,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) { number_constructor, JSFunction::kPrototypeOrInitialMapOffset); Node* number_prototype = LoadObjectField(number_initial_map, Map::kPrototypeOffset); - var_default.Bind(LoadRoot(Heap::knumber_to_stringRootIndex)); + var_default.Bind(LoadRoot(RootIndex::knumber_to_string)); var_holder.Bind(number_prototype); Goto(&checkstringtag); } @@ -1067,7 +936,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) { BIND(&if_object); { CSA_ASSERT(this, IsJSReceiver(receiver)); - var_default.Bind(LoadRoot(Heap::kobject_to_stringRootIndex)); + var_default.Bind(LoadRoot(RootIndex::kobject_to_string)); Goto(&checkstringtag); } @@ -1082,10 +951,10 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) { GotoIf(IsSymbolMap(receiver_map), &if_symbol); GotoIf(IsUndefined(receiver), &return_undefined); CSA_ASSERT(this, IsNull(receiver)); - Return(LoadRoot(Heap::knull_to_stringRootIndex)); + Return(LoadRoot(RootIndex::knull_to_string)); BIND(&return_undefined); - Return(LoadRoot(Heap::kundefined_to_stringRootIndex)); + Return(LoadRoot(RootIndex::kundefined_to_string)); } BIND(&if_proxy); @@ -1099,12 +968,12 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) { CallRuntime(Runtime::kArrayIsArray, context, receiver); TNode builtin_tag = Select( IsTrue(receiver_is_array), - [=] { return CAST(LoadRoot(Heap::kArray_stringRootIndex)); }, + [=] { return CAST(LoadRoot(RootIndex::kArray_string)); }, [=] { return Select( IsCallableMap(receiver_map), - [=] { return CAST(LoadRoot(Heap::kFunction_stringRootIndex)); }, - [=] { return CAST(LoadRoot(Heap::kObject_stringRootIndex)); }); + [=] { return CAST(LoadRoot(RootIndex::kFunction_string)); }, + [=] { return CAST(LoadRoot(RootIndex::kObject_string)); }); }); // Lookup the @@toStringTag property on the {receiver}. @@ -1125,7 +994,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) { BIND(&if_regexp); { - var_default.Bind(LoadRoot(Heap::kregexp_to_stringRootIndex)); + var_default.Bind(LoadRoot(RootIndex::kregexp_to_string)); Goto(&checkstringtag); } @@ -1138,7 +1007,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) { string_constructor, JSFunction::kPrototypeOrInitialMapOffset); Node* string_prototype = LoadObjectField(string_initial_map, Map::kPrototypeOffset); - var_default.Bind(LoadRoot(Heap::kstring_to_stringRootIndex)); + var_default.Bind(LoadRoot(RootIndex::kstring_to_string)); var_holder.Bind(string_prototype); Goto(&checkstringtag); } @@ -1152,7 +1021,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) { symbol_constructor, JSFunction::kPrototypeOrInitialMapOffset); Node* symbol_prototype = LoadObjectField(symbol_initial_map, Map::kPrototypeOffset); - var_default.Bind(LoadRoot(Heap::kobject_to_stringRootIndex)); + var_default.Bind(LoadRoot(RootIndex::kobject_to_string)); var_holder.Bind(symbol_prototype); Goto(&checkstringtag); } @@ -1166,7 +1035,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) { bigint_constructor, JSFunction::kPrototypeOrInitialMapOffset); Node* bigint_prototype = LoadObjectField(bigint_initial_map, Map::kPrototypeOffset); - var_default.Bind(LoadRoot(Heap::kobject_to_stringRootIndex)); + var_default.Bind(LoadRoot(RootIndex::kobject_to_string)); var_holder.Bind(bigint_prototype); Goto(&checkstringtag); } @@ -1209,7 +1078,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) { BIND(&return_generic); { Node* tag = GetProperty(context, ToObject(context, receiver), - LoadRoot(Heap::kto_string_tag_symbolRootIndex)); + LoadRoot(RootIndex::kto_string_tag_symbol)); GotoIf(TaggedIsSmi(tag), &return_default); GotoIfNot(IsString(tag), &return_default); ReturnToStringFormat(context, tag); @@ -1323,7 +1192,7 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) { GotoIf(IsSpecialReceiverMap(properties_map), &call_runtime); // Stay on the fast path only if there are no elements. GotoIfNot(WordEqual(LoadElements(properties), - LoadRoot(Heap::kEmptyFixedArrayRootIndex)), + LoadRoot(RootIndex::kEmptyFixedArray)), &call_runtime); // Handle dictionary objects or fast objects with properties in runtime. Node* bit_field3 = LoadMapBitField3(properties_map); @@ -1476,8 +1345,7 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) { formal_parameter_count); Node* parameters_and_registers = AllocateFixedArray(HOLEY_ELEMENTS, size); FillFixedArrayWithValue(HOLEY_ELEMENTS, parameters_and_registers, - IntPtrConstant(0), size, - Heap::kUndefinedValueRootIndex); + IntPtrConstant(0), size, RootIndex::kUndefinedValue); // TODO(cbruni): support start_offset to avoid double initialization. Node* result = AllocateJSObjectFromMap(maybe_map, nullptr, nullptr, kNone, kWithSlackTracking); @@ -1522,7 +1390,7 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) { object = ToObject_Inline(CAST(context), CAST(object)); // 2. Let key be ? ToPropertyKey(P). - key = ToName(context, key); + key = CallBuiltin(Builtins::kToName, context, key); // 3. Let desc be ? obj.[[GetOwnProperty]](key). Label if_keyisindex(this), if_iskeyunique(this), diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc index 241a2041bdf245..1d43217999d4b3 100644 --- a/deps/v8/src/builtins/builtins-promise-gen.cc +++ b/deps/v8/src/builtins/builtins-promise-gen.cc @@ -28,9 +28,9 @@ Node* PromiseBuiltinsAssembler::AllocateJSPromise(Node* context) { Node* const promise = Allocate(JSPromise::kSizeWithEmbedderFields); StoreMapNoWriteBarrier(promise, promise_map); StoreObjectFieldRoot(promise, JSPromise::kPropertiesOrHashOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldRoot(promise, JSPromise::kElementsOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); return promise; } @@ -137,7 +137,7 @@ TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) { CreatePromiseResolvingFunctions(promise, debug_event, native_context); Node* capability = Allocate(PromiseCapability::kSize); - StoreMapNoWriteBarrier(capability, Heap::kPromiseCapabilityMapRootIndex); + StoreMapNoWriteBarrier(capability, RootIndex::kPromiseCapabilityMap); StoreObjectFieldNoWriteBarrier(capability, PromiseCapability::kPromiseOffset, promise); StoreObjectFieldNoWriteBarrier(capability, @@ -150,13 +150,13 @@ TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) { BIND(&if_slow_promise_capability); { Node* capability = Allocate(PromiseCapability::kSize); - StoreMapNoWriteBarrier(capability, Heap::kPromiseCapabilityMapRootIndex); + StoreMapNoWriteBarrier(capability, RootIndex::kPromiseCapabilityMap); StoreObjectFieldRoot(capability, PromiseCapability::kPromiseOffset, - Heap::kUndefinedValueRootIndex); + RootIndex::kUndefinedValue); StoreObjectFieldRoot(capability, PromiseCapability::kResolveOffset, - Heap::kUndefinedValueRootIndex); + RootIndex::kUndefinedValue); StoreObjectFieldRoot(capability, PromiseCapability::kRejectOffset, - Heap::kUndefinedValueRootIndex); + RootIndex::kUndefinedValue); Node* executor_context = CreatePromiseGetCapabilitiesExecutorContext(capability, native_context); @@ -352,7 +352,7 @@ void PromiseBuiltinsAssembler::PerformPromiseThen( BIND(&if_fulfilled); { - var_map.Bind(LoadRoot(Heap::kPromiseFulfillReactionJobTaskMapRootIndex)); + var_map.Bind(LoadRoot(RootIndex::kPromiseFulfillReactionJobTaskMap)); var_handler.Bind(on_fulfilled); Goto(&enqueue); } @@ -360,7 +360,7 @@ void PromiseBuiltinsAssembler::PerformPromiseThen( BIND(&if_rejected); { CSA_ASSERT(this, IsPromiseStatus(status, v8::Promise::kRejected)); - var_map.Bind(LoadRoot(Heap::kPromiseRejectReactionJobTaskMapRootIndex)); + var_map.Bind(LoadRoot(RootIndex::kPromiseRejectReactionJobTaskMap)); var_handler.Bind(on_rejected); GotoIf(PromiseHasHandler(promise), &enqueue); CallRuntime(Runtime::kPromiseRevokeReject, context, promise); @@ -401,7 +401,7 @@ Node* PromiseBuiltinsAssembler::AllocatePromiseReaction( Node* next, Node* promise_or_capability, Node* fulfill_handler, Node* reject_handler) { Node* const reaction = Allocate(PromiseReaction::kSize); - StoreMapNoWriteBarrier(reaction, Heap::kPromiseReactionMapRootIndex); + StoreMapNoWriteBarrier(reaction, RootIndex::kPromiseReactionMap); StoreObjectFieldNoWriteBarrier(reaction, PromiseReaction::kNextOffset, next); StoreObjectFieldNoWriteBarrier(reaction, PromiseReaction::kPromiseOrCapabilityOffset, @@ -431,10 +431,10 @@ Node* PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask( } Node* PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask( - Heap::RootListIndex map_root_index, Node* context, Node* argument, - Node* handler, Node* promise_or_capability) { - DCHECK(map_root_index == Heap::kPromiseFulfillReactionJobTaskMapRootIndex || - map_root_index == Heap::kPromiseRejectReactionJobTaskMapRootIndex); + RootIndex map_root_index, Node* context, Node* argument, Node* handler, + Node* promise_or_capability) { + DCHECK(map_root_index == RootIndex::kPromiseFulfillReactionJobTaskMap || + map_root_index == RootIndex::kPromiseRejectReactionJobTaskMap); Node* const map = LoadRoot(map_root_index); return AllocatePromiseReactionJobTask(map, context, argument, handler, promise_or_capability); @@ -444,7 +444,7 @@ Node* PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobTask( Node* promise_to_resolve, Node* then, Node* thenable, Node* context) { Node* const microtask = Allocate(PromiseResolveThenableJobTask::kSize); StoreMapNoWriteBarrier(microtask, - Heap::kPromiseResolveThenableJobTaskMapRootIndex); + RootIndex::kPromiseResolveThenableJobTaskMap); StoreObjectFieldNoWriteBarrier( microtask, PromiseResolveThenableJobTask::kContextOffset, context); StoreObjectFieldNoWriteBarrier( @@ -502,8 +502,8 @@ Node* PromiseBuiltinsAssembler::TriggerPromiseReactions( // of stores here to avoid screwing up the store buffer. STATIC_ASSERT(PromiseReaction::kSize == PromiseReactionJobTask::kSize); if (type == PromiseReaction::kFulfill) { - StoreMapNoWriteBarrier( - current, Heap::kPromiseFulfillReactionJobTaskMapRootIndex); + StoreMapNoWriteBarrier(current, + RootIndex::kPromiseFulfillReactionJobTaskMap); StoreObjectField(current, PromiseReactionJobTask::kArgumentOffset, argument); StoreObjectField(current, PromiseReactionJobTask::kContextOffset, @@ -516,7 +516,7 @@ Node* PromiseBuiltinsAssembler::TriggerPromiseReactions( Node* handler = LoadObjectField(current, PromiseReaction::kRejectHandlerOffset); StoreMapNoWriteBarrier(current, - Heap::kPromiseRejectReactionJobTaskMapRootIndex); + RootIndex::kPromiseRejectReactionJobTaskMap); StoreObjectField(current, PromiseReactionJobTask::kArgumentOffset, argument); StoreObjectField(current, PromiseReactionJobTask::kContextOffset, diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h index 4954b383fecf17..39b2a246834e50 100644 --- a/deps/v8/src/builtins/builtins-promise-gen.h +++ b/deps/v8/src/builtins/builtins-promise-gen.h @@ -89,9 +89,8 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler { Node* AllocatePromiseReaction(Node* next, Node* promise_or_capability, Node* fulfill_handler, Node* reject_handler); - Node* AllocatePromiseReactionJobTask(Heap::RootListIndex map_root_index, - Node* context, Node* argument, - Node* handler, + Node* AllocatePromiseReactionJobTask(RootIndex map_root_index, Node* context, + Node* argument, Node* handler, Node* promise_or_capability); Node* AllocatePromiseReactionJobTask(Node* map, Node* context, Node* argument, Node* handler, diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc index 34caf586885cc6..f0d891910affe9 100644 --- a/deps/v8/src/builtins/builtins-proxy-gen.cc +++ b/deps/v8/src/builtins/builtins-proxy-gen.cc @@ -58,7 +58,7 @@ Node* ProxiesCodeStubAssembler::AllocateProxy(Node* target, Node* handler, Node* proxy = Allocate(JSProxy::kSize); StoreMapNoWriteBarrier(proxy, map.value()); StoreObjectFieldRoot(proxy, JSProxy::kPropertiesOrHashOffset, - Heap::kEmptyPropertyDictionaryRootIndex); + RootIndex::kEmptyPropertyDictionary); StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kTargetOffset, target); StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kHandlerOffset, handler); @@ -124,7 +124,7 @@ Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments( Node* ProxiesCodeStubAssembler::CreateProxyRevokeFunctionContext( Node* proxy, Node* native_context) { Node* const context = Allocate(FixedArray::SizeFor(kProxyContextLength)); - StoreMapNoWriteBarrier(context, Heap::kFunctionContextMapRootIndex); + StoreMapNoWriteBarrier(context, RootIndex::kFunctionContextMap); InitializeFunctionContext(native_context, context, kProxyContextLength); StoreContextElementNoWriteBarrier(context, kProxySlot, proxy); return context; @@ -230,9 +230,9 @@ TF_BUILTIN(ProxyRevocable, ProxiesCodeStubAssembler) { native_context, Context::PROXY_REVOCABLE_RESULT_MAP_INDEX); StoreMapNoWriteBarrier(result, result_map); StoreObjectFieldRoot(result, JSProxyRevocableResult::kPropertiesOrHashOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldRoot(result, JSProxyRevocableResult::kElementsOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldNoWriteBarrier(result, JSProxyRevocableResult::kProxyOffset, proxy); StoreObjectFieldNoWriteBarrier(result, JSProxyRevocableResult::kRevokeOffset, diff --git a/deps/v8/src/builtins/builtins-reflect.cc b/deps/v8/src/builtins/builtins-reflect.cc index 3dd07a796a18da..cd3f2b4bedbff0 100644 --- a/deps/v8/src/builtins/builtins-reflect.cc +++ b/deps/v8/src/builtins/builtins-reflect.cc @@ -218,7 +218,7 @@ BUILTIN(ReflectSet) { LookupIterator it = LookupIterator::PropertyOrElement( isolate, receiver, name, Handle::cast(target)); Maybe result = Object::SetSuperProperty( - &it, value, LanguageMode::kSloppy, Object::MAY_BE_STORE_FROM_KEYED); + &it, value, LanguageMode::kSloppy, StoreOrigin::kMaybeKeyed); MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception()); return *isolate->factory()->ToBoolean(result.FromJust()); } diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc index 206602aaa7d32a..30717f41deebce 100644 --- a/deps/v8/src/builtins/builtins-regexp-gen.cc +++ b/deps/v8/src/builtins/builtins-regexp-gen.cc @@ -81,13 +81,13 @@ TNode RegExpBuiltinsAssembler::AllocateRegExpResult( // Initialize the elements. DCHECK(!IsDoubleElementsKind(elements_kind)); - const Heap::RootListIndex map_index = Heap::kFixedArrayMapRootIndex; + const RootIndex map_index = RootIndex::kFixedArrayMap; DCHECK(Heap::RootIsImmortalImmovable(map_index)); StoreMapNoWriteBarrier(elements, map_index); StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length); FillFixedArrayWithValue(elements_kind, elements, IntPtrZero(), length_intptr, - Heap::kUndefinedValueRootIndex); + RootIndex::kUndefinedValue); return CAST(result); } @@ -862,7 +862,7 @@ TNode RegExpBuiltinsAssembler::IsFastRegExpWithOriginalExec( TVARIABLE(BoolT, var_result); #ifdef V8_ENABLE_FORCE_SLOW_PATH - var_result = BoolConstant(0); + var_result = BoolConstant(false); GotoIfForceSlowPath(&out); #endif @@ -1103,7 +1103,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context, Isolate* isolate = this->isolate(); TNode const int_one = IntPtrConstant(1); - TVARIABLE(Smi, var_length, SmiZero()); + TVARIABLE(Uint32T, var_length, Uint32Constant(0)); TVARIABLE(IntPtrT, var_flags); // First, count the number of characters we will need and check which flags @@ -1115,13 +1115,13 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context, Node* const flags_smi = LoadObjectField(regexp, JSRegExp::kFlagsOffset); var_flags = SmiUntag(flags_smi); -#define CASE_FOR_FLAG(FLAG) \ - do { \ - Label next(this); \ - GotoIfNot(IsSetWord(var_flags.value(), FLAG), &next); \ - var_length = SmiAdd(var_length.value(), SmiConstant(1)); \ - Goto(&next); \ - BIND(&next); \ +#define CASE_FOR_FLAG(FLAG) \ + do { \ + Label next(this); \ + GotoIfNot(IsSetWord(var_flags.value(), FLAG), &next); \ + var_length = Uint32Add(var_length.value(), Uint32Constant(1)); \ + Goto(&next); \ + BIND(&next); \ } while (false) CASE_FOR_FLAG(JSRegExp::kGlobal); @@ -1145,7 +1145,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context, Label if_isflagset(this); \ BranchIfToBooleanIsTrue(flag, &if_isflagset, &next); \ BIND(&if_isflagset); \ - var_length = SmiAdd(var_length.value(), SmiConstant(1)); \ + var_length = Uint32Add(var_length.value(), Uint32Constant(1)); \ var_flags = Signed(WordOr(var_flags.value(), IntPtrConstant(FLAG))); \ Goto(&next); \ BIND(&next); \ @@ -2109,9 +2109,9 @@ TNode RegExpBuiltinsAssembler::MatchAllIterator( StoreMapNoWriteBarrier(iterator, map); StoreObjectFieldRoot(iterator, JSRegExpStringIterator::kPropertiesOrHashOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldRoot(iterator, JSRegExpStringIterator::kElementsOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); // 5. Set iterator.[[IteratingRegExp]] to R. StoreObjectFieldNoWriteBarrier( @@ -2903,14 +2903,13 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath( TNode first_part = CAST(CallBuiltin(Builtins::kSubString, context, string, var_last_match_end.value(), match_start)); - var_result = CAST(CallBuiltin(Builtins::kStringAdd_CheckNone_NotTenured, - context, var_result.value(), first_part)); + var_result = CAST(CallBuiltin(Builtins::kStringAdd_CheckNone, context, + var_result.value(), first_part)); GotoIf(SmiEqual(replace_length, SmiZero()), &loop_end); - var_result = - CAST(CallBuiltin(Builtins::kStringAdd_CheckNone_NotTenured, context, - var_result.value(), replace_string)); + var_result = CAST(CallBuiltin(Builtins::kStringAdd_CheckNone, context, + var_result.value(), replace_string)); Goto(&loop_end); BIND(&loop_end); @@ -2936,8 +2935,8 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath( TNode last_part = CAST(CallBuiltin(Builtins::kSubString, context, string, var_last_match_end.value(), string_length)); - var_result = CAST(CallBuiltin(Builtins::kStringAdd_CheckNone_NotTenured, - context, var_result.value(), last_part)); + var_result = CAST(CallBuiltin(Builtins::kStringAdd_CheckNone, context, + var_result.value(), last_part)); Goto(&out); } diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc index 52673bfd367ba7..76f32c9a2d824d 100644 --- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc +++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc @@ -50,10 +50,9 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray( &invalid); // Fail if the array's JSArrayBuffer is not shared. - Node* array_buffer = LoadObjectField(tagged, JSTypedArray::kBufferOffset); - Node* bitfield = LoadObjectField(array_buffer, JSArrayBuffer::kBitFieldOffset, - MachineType::Uint32()); - GotoIfNot(IsSetWord32(bitfield), &invalid); + TNode array_buffer = LoadJSArrayBufferViewBuffer(CAST(tagged)); + TNode bitfield = LoadJSArrayBufferBitField(array_buffer); + GotoIfNot(IsSetWord32(bitfield), &invalid); // Fail if the array's element type is float32, float64 or clamped. Node* elements_instance_type = LoadInstanceType(LoadElements(tagged)); @@ -76,12 +75,9 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray( BIND(¬_float_or_clamped); *out_instance_type = elements_instance_type; - Node* backing_store = - LoadObjectField(array_buffer, JSArrayBuffer::kBackingStoreOffset); - Node* byte_offset = ChangeUint32ToWord(TruncateTaggedToWord32( - context, LoadObjectField(tagged, JSArrayBufferView::kByteOffsetOffset))); - *out_backing_store = - IntPtrAdd(BitcastTaggedToWord(backing_store), byte_offset); + TNode backing_store = LoadJSArrayBufferBackingStore(array_buffer); + TNode byte_offset = LoadJSArrayBufferViewByteOffset(CAST(tagged)); + *out_backing_store = IntPtrAdd(backing_store, byte_offset); } // https://tc39.github.io/ecmascript_sharedmem/shmem.html#Atomics.ValidateAtomicAccess @@ -112,7 +108,7 @@ void SharedArrayBufferBuiltinsAssembler::ValidateAtomicIndex(Node* array, // Check if the index is in bounds. If not, throw RangeError. Label check_passed(this); Node* array_length_word32 = - TruncateTaggedToWord32(context, LoadTypedArrayLength(CAST(array))); + TruncateTaggedToWord32(context, LoadJSTypedArrayLength(CAST(array))); GotoIf(Uint32LessThan(index_word, array_length_word32), &check_passed); ThrowRangeError(context, MessageTemplate::kInvalidAtomicAccessIndex); @@ -130,7 +126,7 @@ void SharedArrayBufferBuiltinsAssembler::DebugSanityCheckAtomicIndex( CSA_ASSERT(this, Uint32LessThan(index_word, TruncateTaggedToWord32( - context, LoadTypedArrayLength(CAST(array))))); + context, LoadJSTypedArrayLength(CAST(array))))); } #endif diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc index 92c1c65d1f762b..c5239bc444f242 100644 --- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc +++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc @@ -82,28 +82,24 @@ V8_WARN_UNUSED_RESULT Maybe ValidateAtomicAccess( return Just(access_index); } -// ES #sec-atomics.wake -// Atomics.wake( typedArray, index, count ) -BUILTIN(AtomicsWake) { - HandleScope scope(isolate); - Handle array = args.atOrUndefined(isolate, 1); - Handle index = args.atOrUndefined(isolate, 2); - Handle count = args.atOrUndefined(isolate, 3); - +namespace { +MaybeHandle AtomicsWake(Isolate* isolate, Handle array, + Handle index, Handle count) { Handle sta; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, sta, ValidateSharedIntegerTypedArray(isolate, array, true)); + ASSIGN_RETURN_ON_EXCEPTION( + isolate, sta, ValidateSharedIntegerTypedArray(isolate, array, true), + Object); Maybe maybe_index = ValidateAtomicAccess(isolate, sta, index); - if (maybe_index.IsNothing()) return ReadOnlyRoots(isolate).exception(); + MAYBE_RETURN_NULL(maybe_index); size_t i = maybe_index.FromJust(); uint32_t c; if (count->IsUndefined(isolate)) { c = kMaxUInt32; } else { - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, count, - Object::ToInteger(isolate, count)); + ASSIGN_RETURN_ON_EXCEPTION(isolate, count, + Object::ToInteger(isolate, count), Object); double count_double = count->Number(); if (count_double < 0) count_double = 0; @@ -113,9 +109,35 @@ BUILTIN(AtomicsWake) { } Handle array_buffer = sta->GetBuffer(); - size_t addr = (i << 2) + NumberToSize(sta->byte_offset()); + size_t addr = (i << 2) + sta->byte_offset(); + + return Handle(FutexEmulation::Wake(array_buffer, addr, c), isolate); +} + +} // namespace + +// ES #sec-atomics.wake +// Atomics.wake( typedArray, index, count ) +BUILTIN(AtomicsWake) { + HandleScope scope(isolate); + Handle array = args.atOrUndefined(isolate, 1); + Handle index = args.atOrUndefined(isolate, 2); + Handle count = args.atOrUndefined(isolate, 3); + + isolate->CountUsage(v8::Isolate::UseCounterFeature::kAtomicsWake); + RETURN_RESULT_OR_FAILURE(isolate, AtomicsWake(isolate, array, index, count)); +} + +// ES #sec-atomics.notify +// Atomics.notify( typedArray, index, count ) +BUILTIN(AtomicsNotify) { + HandleScope scope(isolate); + Handle array = args.atOrUndefined(isolate, 1); + Handle index = args.atOrUndefined(isolate, 2); + Handle count = args.atOrUndefined(isolate, 3); - return FutexEmulation::Wake(array_buffer, addr, c); + isolate->CountUsage(v8::Isolate::UseCounterFeature::kAtomicsNotify); + RETURN_RESULT_OR_FAILURE(isolate, AtomicsWake(isolate, array, index, count)); } // ES #sec-atomics.wait @@ -158,7 +180,7 @@ BUILTIN(AtomicsWait) { } Handle array_buffer = sta->GetBuffer(); - size_t addr = (i << 2) + NumberToSize(sta->byte_offset()); + size_t addr = (i << 2) + sta->byte_offset(); return FutexEmulation::Wait(isolate, array_buffer, addr, value_int32, timeout_number); diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc index c46a3fd35d761b..1ee7a4f760e7b0 100644 --- a/deps/v8/src/builtins/builtins-string-gen.cc +++ b/deps/v8/src/builtins/builtins-string-gen.cc @@ -39,11 +39,11 @@ Node* StringBuiltinsAssembler::DirectStringData(Node* string, BIND(&if_external); { // This is only valid for ExternalStrings where the resource data - // pointer is cached (i.e. no short external strings). - CSA_ASSERT( - this, Word32NotEqual(Word32And(string_instance_type, - Int32Constant(kShortExternalStringMask)), - Int32Constant(kShortExternalStringTag))); + // pointer is cached (i.e. no uncached external strings). + CSA_ASSERT(this, Word32NotEqual( + Word32And(string_instance_type, + Int32Constant(kUncachedExternalStringMask)), + Int32Constant(kUncachedExternalStringTag))); var_data.Bind(LoadObjectField(string, ExternalString::kResourceDataOffset, MachineType::Pointer())); Goto(&if_join); @@ -191,11 +191,11 @@ void StringBuiltinsAssembler::StringEqual_Core( // Check if both {lhs} and {rhs} are direct strings, and that in case of // ExternalStrings the data pointer is cached. - STATIC_ASSERT(kShortExternalStringTag != 0); + STATIC_ASSERT(kUncachedExternalStringTag != 0); STATIC_ASSERT(kIsIndirectStringTag != 0); int const kBothDirectStringMask = - kIsIndirectStringMask | kShortExternalStringMask | - ((kIsIndirectStringMask | kShortExternalStringMask) << 8); + kIsIndirectStringMask | kUncachedExternalStringMask | + ((kIsIndirectStringMask | kUncachedExternalStringMask) << 8); GotoIfNot(Word32Equal(Word32And(both_instance_types, Int32Constant(kBothDirectStringMask)), Int32Constant(0)), @@ -284,67 +284,31 @@ void StringBuiltinsAssembler::StringEqual_Loop( } } -void StringBuiltinsAssembler::Generate_StringAdd(StringAddFlags flags, - PretenureFlag pretenure_flag, - Node* context, Node* left, - Node* right) { - switch (flags) { - case STRING_ADD_CONVERT_LEFT: { - // TODO(danno): The ToString and JSReceiverToPrimitive below could be - // combined to avoid duplicate smi and instance type checks. - left = ToString(context, JSReceiverToPrimitive(context, left)); - Callable callable = CodeFactory::StringAdd( - isolate(), STRING_ADD_CHECK_NONE, pretenure_flag); - TailCallStub(callable, context, left, right); - break; - } - case STRING_ADD_CONVERT_RIGHT: { - // TODO(danno): The ToString and JSReceiverToPrimitive below could be - // combined to avoid duplicate smi and instance type checks. - right = ToString(context, JSReceiverToPrimitive(context, right)); - Callable callable = CodeFactory::StringAdd( - isolate(), STRING_ADD_CHECK_NONE, pretenure_flag); - TailCallStub(callable, context, left, right); - break; - } - case STRING_ADD_CHECK_NONE: { - CodeStubAssembler::AllocationFlag allocation_flags = - (pretenure_flag == TENURED) ? CodeStubAssembler::kPretenured - : CodeStubAssembler::kNone; - Return(StringAdd(context, CAST(left), CAST(right), allocation_flags)); - break; - } - } -} - -TF_BUILTIN(StringAdd_CheckNone_NotTenured, StringBuiltinsAssembler) { - Node* left = Parameter(Descriptor::kLeft); - Node* right = Parameter(Descriptor::kRight); +TF_BUILTIN(StringAdd_CheckNone, StringBuiltinsAssembler) { + TNode left = CAST(Parameter(Descriptor::kLeft)); + TNode right = CAST(Parameter(Descriptor::kRight)); Node* context = Parameter(Descriptor::kContext); - Generate_StringAdd(STRING_ADD_CHECK_NONE, NOT_TENURED, context, left, right); + Return(StringAdd(context, left, right)); } -TF_BUILTIN(StringAdd_CheckNone_Tenured, StringBuiltinsAssembler) { - Node* left = Parameter(Descriptor::kLeft); - Node* right = Parameter(Descriptor::kRight); +TF_BUILTIN(StringAdd_ConvertLeft, StringBuiltinsAssembler) { + TNode left = CAST(Parameter(Descriptor::kLeft)); + TNode right = CAST(Parameter(Descriptor::kRight)); Node* context = Parameter(Descriptor::kContext); - Generate_StringAdd(STRING_ADD_CHECK_NONE, TENURED, context, left, right); + // TODO(danno): The ToString and JSReceiverToPrimitive below could be + // combined to avoid duplicate smi and instance type checks. + left = ToString(context, JSReceiverToPrimitive(context, left)); + TailCallBuiltin(Builtins::kStringAdd_CheckNone, context, left, right); } -TF_BUILTIN(StringAdd_ConvertLeft_NotTenured, StringBuiltinsAssembler) { - Node* left = Parameter(Descriptor::kLeft); - Node* right = Parameter(Descriptor::kRight); +TF_BUILTIN(StringAdd_ConvertRight, StringBuiltinsAssembler) { + TNode left = CAST(Parameter(Descriptor::kLeft)); + TNode right = CAST(Parameter(Descriptor::kRight)); Node* context = Parameter(Descriptor::kContext); - Generate_StringAdd(STRING_ADD_CONVERT_LEFT, NOT_TENURED, context, left, - right); -} - -TF_BUILTIN(StringAdd_ConvertRight_NotTenured, StringBuiltinsAssembler) { - Node* left = Parameter(Descriptor::kLeft); - Node* right = Parameter(Descriptor::kRight); - Node* context = Parameter(Descriptor::kContext); - Generate_StringAdd(STRING_ADD_CONVERT_RIGHT, NOT_TENURED, context, left, - right); + // TODO(danno): The ToString and JSReceiverToPrimitive below could be + // combined to avoid duplicate smi and instance type checks. + right = ToString(context, JSReceiverToPrimitive(context, right)); + TailCallBuiltin(Builtins::kStringAdd_CheckNone, context, left, right); } TF_BUILTIN(SubString, StringBuiltinsAssembler) { @@ -354,12 +318,10 @@ TF_BUILTIN(SubString, StringBuiltinsAssembler) { Return(SubString(string, SmiUntag(from), SmiUntag(to))); } -void StringBuiltinsAssembler::GenerateStringAt(char const* method_name, - TNode context, - Node* receiver, - TNode maybe_position, - TNode default_return, - StringAtAccessor accessor) { +void StringBuiltinsAssembler::GenerateStringAt( + char const* method_name, TNode context, Node* receiver, + TNode maybe_position, TNode default_return, + const StringAtAccessor& accessor) { // Check that {receiver} is coercible to Object and convert it to a String. TNode string = ToThisString(context, receiver, method_name); @@ -587,8 +549,9 @@ TF_BUILTIN(StringGreaterThanOrEqual, StringBuiltinsAssembler) { } TF_BUILTIN(StringCharAt, StringBuiltinsAssembler) { - Node* receiver = Parameter(Descriptor::kReceiver); - Node* position = Parameter(Descriptor::kPosition); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + TNode position = + UncheckedCast(Parameter(Descriptor::kPosition)); // Load the character code at the {position} from the {receiver}. TNode code = StringCharCodeAt(receiver, position); @@ -639,7 +602,6 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) { Node* context = Parameter(Descriptor::kContext); CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc)); - TNode smi_argc = SmiTag(arguments.GetLength(INTPTR_PARAMETERS)); // Check if we have exactly one argument (plus the implicit receiver), i.e. // if the parent frame is not an arguments adaptor frame. Label if_oneargument(this), if_notoneargument(this); @@ -664,7 +626,7 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) { { Label two_byte(this); // Assume that the resulting string contains only one-byte characters. - Node* one_byte_result = AllocateSeqOneByteString(context, smi_argc); + Node* one_byte_result = AllocateSeqOneByteString(context, Unsigned(argc)); TVARIABLE(IntPtrT, var_max_index); var_max_index = IntPtrConstant(0); @@ -698,7 +660,7 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) { // At least one of the characters in the string requires a 16-bit // representation. Allocate a SeqTwoByteString to hold the resulting // string. - Node* two_byte_result = AllocateSeqTwoByteString(context, smi_argc); + Node* two_byte_result = AllocateSeqTwoByteString(context, Unsigned(argc)); // Copy the characters that have already been put in the 8-bit string into // their corresponding positions in the new 16-bit string. @@ -817,7 +779,7 @@ TF_BUILTIN(StringPrototypeConcat, CodeStubAssembler) { void StringBuiltinsAssembler::StringIndexOf( Node* const subject_string, Node* const search_string, Node* const position, - std::function f_return) { + const std::function& f_return) { CSA_ASSERT(this, IsString(subject_string)); CSA_ASSERT(this, IsString(search_string)); CSA_ASSERT(this, TaggedIsSmi(position)); @@ -1229,8 +1191,6 @@ TF_BUILTIN(StringPrototypeRepeat, StringBuiltinsAssembler) { TNode count = CAST(Parameter(Descriptor::kCount)); Node* const string = ToThisString(context, receiver, "String.prototype.repeat"); - Node* const is_stringempty = - SmiEqual(LoadStringLengthAsSmi(string), SmiConstant(0)); VARIABLE( var_count, MachineRepresentation::kTagged, @@ -1248,7 +1208,8 @@ TF_BUILTIN(StringPrototypeRepeat, StringBuiltinsAssembler) { TNode smi_count = CAST(var_count.value()); GotoIf(SmiLessThan(smi_count, SmiConstant(0)), &invalid_count); GotoIf(SmiEqual(smi_count, SmiConstant(0)), &return_emptystring); - GotoIf(is_stringempty, &return_emptystring); + GotoIf(Word32Equal(LoadStringLengthAsWord32(string), Int32Constant(0)), + &return_emptystring); GotoIf(SmiGreaterThan(smi_count, SmiConstant(String::kMaxLength)), &invalid_string_length); Return(CallBuiltin(Builtins::kStringRepeat, context, string, smi_count)); @@ -1266,7 +1227,8 @@ TF_BUILTIN(StringPrototypeRepeat, StringBuiltinsAssembler) { &invalid_count); GotoIf(Float64LessThan(number_value, Float64Constant(0.0)), &invalid_count); - Branch(is_stringempty, &return_emptystring, &invalid_string_length); + Branch(Word32Equal(LoadStringLengthAsWord32(string), Int32Constant(0)), + &return_emptystring, &invalid_string_length); } } @@ -1311,9 +1273,6 @@ TF_BUILTIN(StringRepeat, StringBuiltinsAssembler) { VARIABLE(var_temp, MachineRepresentation::kTagged, string); TVARIABLE(Smi, var_count, count); - Callable stringadd_callable = - CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED); - Label loop(this, {&var_count, &var_result, &var_temp}), return_result(this); Goto(&loop); BIND(&loop); @@ -1321,16 +1280,16 @@ TF_BUILTIN(StringRepeat, StringBuiltinsAssembler) { { Label next(this); GotoIfNot(SmiToInt32(SmiAnd(var_count.value(), SmiConstant(1))), &next); - var_result.Bind(CallStub(stringadd_callable, context, var_result.value(), - var_temp.value())); + var_result.Bind(CallBuiltin(Builtins::kStringAdd_CheckNone, context, + var_result.value(), var_temp.value())); Goto(&next); BIND(&next); } var_count = SmiShr(var_count.value(), 1); GotoIf(SmiEqual(var_count.value(), SmiConstant(0)), &return_result); - var_temp.Bind(CallStub(stringadd_callable, context, var_temp.value(), - var_temp.value())); + var_temp.Bind(CallBuiltin(Builtins::kStringAdd_CheckNone, context, + var_temp.value(), var_temp.value())); Goto(&loop); } @@ -1369,16 +1328,16 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) { TNode const subject_string = ToString_Inline(context, receiver); TNode const search_string = ToString_Inline(context, search); - TNode const subject_length = LoadStringLengthAsSmi(subject_string); - TNode const search_length = LoadStringLengthAsSmi(search_string); + TNode const subject_length = LoadStringLengthAsWord(subject_string); + TNode const search_length = LoadStringLengthAsWord(search_string); // Fast-path single-char {search}, long cons {receiver}, and simple string // {replace}. { Label next(this); - GotoIfNot(SmiEqual(search_length, SmiConstant(1)), &next); - GotoIfNot(SmiGreaterThan(subject_length, SmiConstant(0xFF)), &next); + GotoIfNot(WordEqual(search_length, IntPtrConstant(1)), &next); + GotoIfNot(IntPtrGreaterThan(subject_length, IntPtrConstant(0xFF)), &next); GotoIf(TaggedIsSmi(replace), &next); GotoIfNot(IsString(replace), &next); @@ -1430,10 +1389,8 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) { BIND(&next); } - TNode const match_end_index = SmiAdd(match_start_index, search_length); - - Callable stringadd_callable = - CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED); + TNode const match_end_index = + SmiAdd(match_start_index, SmiFromIntPtr(search_length)); VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant()); @@ -1465,8 +1422,8 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) { CallJS(call_callable, context, replace, UndefinedConstant(), search_string, match_start_index, subject_string); Node* const replacement_string = ToString_Inline(context, replacement); - var_result.Bind(CallStub(stringadd_callable, context, var_result.value(), - replacement_string)); + var_result.Bind(CallBuiltin(Builtins::kStringAdd_CheckNone, context, + var_result.value(), replacement_string)); Goto(&out); } @@ -1476,8 +1433,8 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) { Node* const replacement = GetSubstitution(context, subject_string, match_start_index, match_end_index, replace_string); - var_result.Bind( - CallStub(stringadd_callable, context, var_result.value(), replacement)); + var_result.Bind(CallBuiltin(Builtins::kStringAdd_CheckNone, context, + var_result.value(), replacement)); Goto(&out); } @@ -1485,9 +1442,9 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) { { Node* const suffix = CallBuiltin(Builtins::kStringSubstring, context, subject_string, - SmiUntag(match_end_index), SmiUntag(subject_length)); - Node* const result = - CallStub(stringadd_callable, context, var_result.value(), suffix); + SmiUntag(match_end_index), subject_length); + Node* const result = CallBuiltin(Builtins::kStringAdd_CheckNone, context, + var_result.value(), suffix); Return(result); } } @@ -1679,8 +1636,6 @@ class StringPadAssembler : public StringBuiltinsAssembler { SmiLessThanOrEqual(smi_max_length, SmiConstant(String::kMaxLength)), &invalid_string_length); - Callable stringadd_callable = - CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED); CSA_ASSERT(this, SmiGreaterThan(smi_max_length, string_length)); TNode const pad_length = SmiSub(smi_max_length, string_length); @@ -1717,19 +1672,20 @@ class StringPadAssembler : public StringBuiltinsAssembler { Node* const remainder_string = CallBuiltin( Builtins::kStringSubstring, context, var_fill_string.value(), IntPtrConstant(0), ChangeInt32ToIntPtr(remaining_word32)); - var_pad.Bind(CallStub(stringadd_callable, context, var_pad.value(), - remainder_string)); + var_pad.Bind(CallBuiltin(Builtins::kStringAdd_CheckNone, context, + var_pad.value(), remainder_string)); Goto(&return_result); } } BIND(&return_result); CSA_ASSERT(this, SmiEqual(pad_length, LoadStringLengthAsSmi(var_pad.value()))); - arguments.PopAndReturn(variant == kStart - ? CallStub(stringadd_callable, context, - var_pad.value(), receiver_string) - : CallStub(stringadd_callable, context, - receiver_string, var_pad.value())); + arguments.PopAndReturn( + variant == kStart + ? CallBuiltin(Builtins::kStringAdd_CheckNone, context, + var_pad.value(), receiver_string) + : CallBuiltin(Builtins::kStringAdd_CheckNone, context, + receiver_string, var_pad.value())); } BIND(&dont_pad); arguments.PopAndReturn(receiver_string); @@ -1844,7 +1800,7 @@ TNode StringBuiltinsAssembler::StringToArray( TNode string_data = UncheckedCast( to_direct.PointerToData(&fill_thehole_and_call_runtime)); TNode string_data_offset = to_direct.offset(); - TNode cache = LoadRoot(Heap::kSingleCharacterStringCacheRootIndex); + TNode cache = LoadRoot(RootIndex::kSingleCharacterStringCache); BuildFastLoop( IntPtrConstant(0), length, @@ -1876,7 +1832,7 @@ TNode StringBuiltinsAssembler::StringToArray( BIND(&fill_thehole_and_call_runtime); { FillFixedArrayWithValue(PACKED_ELEMENTS, elements, IntPtrConstant(0), - length, Heap::kTheHoleValueRootIndex); + length, RootIndex::kTheHoleValue); Goto(&call_runtime); } } @@ -2303,10 +2259,10 @@ void StringTrimAssembler::ScanForNonWhiteSpaceOrLineTerminator( BIND(&out); } -void StringTrimAssembler::BuildLoop(Variable* const var_index, Node* const end, - int increment, Label* const if_none_found, - Label* const out, - std::function get_character) { +void StringTrimAssembler::BuildLoop( + Variable* const var_index, Node* const end, int increment, + Label* const if_none_found, Label* const out, + const std::function& get_character) { Label loop(this, var_index); Goto(&loop); BIND(&loop); @@ -2408,9 +2364,9 @@ TF_BUILTIN(StringPrototypeIterator, CodeStubAssembler) { Node* iterator = Allocate(JSStringIterator::kSize); StoreMapNoWriteBarrier(iterator, map); StoreObjectFieldRoot(iterator, JSValue::kPropertiesOrHashOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldRoot(iterator, JSObject::kElementsOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kStringOffset, string); Node* index = SmiConstant(0); diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h index 06ac127f131328..d7af9ea15e5194 100644 --- a/deps/v8/src/builtins/builtins-string-gen.h +++ b/deps/v8/src/builtins/builtins-string-gen.h @@ -64,7 +64,7 @@ class StringBuiltinsAssembler : public CodeStubAssembler { void GenerateStringAt(const char* method_name, TNode context, Node* receiver, TNode maybe_position, TNode default_return, - StringAtAccessor accessor); + const StringAtAccessor& accessor); TNode LoadSurrogatePairAt(SloppyTNode string, SloppyTNode length, @@ -72,7 +72,8 @@ class StringBuiltinsAssembler : public CodeStubAssembler { UnicodeEncoding encoding); void StringIndexOf(Node* const subject_string, Node* const search_string, - Node* const position, std::function f_return); + Node* const position, + const std::function& f_return); TNode IndexOfDollarChar(Node* const context, Node* const string); @@ -107,9 +108,6 @@ class StringBuiltinsAssembler : public CodeStubAssembler { Handle symbol, const NodeFunction0& regexp_call, const NodeFunction1& generic_call); - - void Generate_StringAdd(StringAddFlags flags, PretenureFlag pretenure_flag, - Node* context, Node* left, Node* right); }; class StringIncludesIndexOfAssembler : public StringBuiltinsAssembler { @@ -145,7 +143,7 @@ class StringTrimAssembler : public StringBuiltinsAssembler { void BuildLoop(Variable* const var_index, Node* const end, int increment, Label* const if_none_found, Label* const out, - std::function get_character); + const std::function& get_character); }; } // namespace internal diff --git a/deps/v8/src/builtins/builtins-symbol.cc b/deps/v8/src/builtins/builtins-symbol.cc index 55c030748446e8..97e0def67cc0b6 100644 --- a/deps/v8/src/builtins/builtins-symbol.cc +++ b/deps/v8/src/builtins/builtins-symbol.cc @@ -39,7 +39,7 @@ BUILTIN(SymbolFor) { Handle key; ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key, Object::ToString(isolate, key_obj)); - return *isolate->SymbolFor(Heap::kPublicSymbolTableRootIndex, key, false); + return *isolate->SymbolFor(RootIndex::kPublicSymbolTable, key, false); } // ES6 section 19.4.2.5 Symbol.keyFor. diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc index c7c416d924e942..970136b11c0692 100644 --- a/deps/v8/src/builtins/builtins-typed-array-gen.cc +++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc @@ -5,7 +5,6 @@ #include "src/builtins/builtins-typed-array-gen.h" #include "src/builtins/builtins-constructor-gen.h" -#include "src/builtins/builtins-iterator-gen.h" #include "src/builtins/builtins-utils-gen.h" #include "src/builtins/builtins.h" #include "src/builtins/growable-fixed-array-gen.h" @@ -65,11 +64,15 @@ TNode TypedArrayBuiltinsAssembler::CalculateExternalPointer( // - Set EmbedderFields to 0. void TypedArrayBuiltinsAssembler::SetupTypedArray(TNode holder, TNode length, - TNode byte_offset, - TNode byte_length) { + TNode byte_offset, + TNode byte_length) { StoreObjectField(holder, JSTypedArray::kLengthOffset, length); - StoreObjectField(holder, JSArrayBufferView::kByteOffsetOffset, byte_offset); - StoreObjectField(holder, JSArrayBufferView::kByteLengthOffset, byte_length); + StoreObjectFieldNoWriteBarrier(holder, JSArrayBufferView::kByteOffsetOffset, + byte_offset, + MachineType::PointerRepresentation()); + StoreObjectFieldNoWriteBarrier(holder, JSArrayBufferView::kByteLengthOffset, + byte_length, + MachineType::PointerRepresentation()); for (int offset = JSTypedArray::kSize; offset < JSTypedArray::kSizeWithEmbedderFields; offset += kPointerSize) { StoreObjectField(holder, offset, SmiConstant(0)); @@ -114,7 +117,8 @@ TF_BUILTIN(TypedArrayInitializeWithBuffer, TypedArrayBuiltinsAssembler) { // SmiMul returns a heap number in case of Smi overflow. TNode byte_length = SmiMul(length, element_size); - SetupTypedArray(holder, length, byte_offset, byte_length); + SetupTypedArray(holder, length, ChangeNonnegativeNumberToUintPtr(byte_offset), + ChangeNonnegativeNumberToUintPtr(byte_length)); AttachBuffer(holder, buffer, fixed_typed_map, length, byte_offset); Return(UndefinedConstant()); } @@ -146,8 +150,6 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) { // SmiMul returns a heap number in case of Smi overflow. TNode byte_length = SmiMul(length, element_size); - SetupTypedArray(holder, length, byte_offset, byte_length); - TNode fixed_typed_map = LoadMapForType(holder); // If target and new_target for the buffer differ, allocate off-heap. @@ -173,7 +175,7 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) { Node* native_context = LoadNativeContext(context); Node* map = LoadContextElement(native_context, Context::ARRAY_BUFFER_MAP_INDEX); - Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex); + Node* empty_fixed_array = LoadRoot(RootIndex::kEmptyFixedArray); Node* buffer = Allocate(JSArrayBuffer::kSizeWithEmbedderFields); StoreMapNoWriteBarrier(buffer, map); @@ -189,14 +191,15 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) { // - Set all embedder fields to Smi(0). StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBitFieldSlot, SmiConstant(0)); - int32_t bitfield_value = (1 << JSArrayBuffer::IsExternal::kShift) | - (1 << JSArrayBuffer::IsNeuterable::kShift); + int32_t bitfield_value = (1 << JSArrayBuffer::IsExternalBit::kShift) | + (1 << JSArrayBuffer::IsNeuterableBit::kShift); StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBitFieldOffset, Int32Constant(bitfield_value), MachineRepresentation::kWord32); StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kByteLengthOffset, - byte_length); + SmiToIntPtr(CAST(byte_length)), + MachineType::PointerRepresentation()); StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBackingStoreOffset, SmiConstant(0)); for (int i = 0; i < v8::ArrayBuffer::kEmbedderFieldCount; i++) { @@ -305,6 +308,8 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) { } BIND(&done); + SetupTypedArray(holder, length, ChangeNonnegativeNumberToUintPtr(byte_offset), + ChangeNonnegativeNumberToUintPtr(byte_length)); Return(UndefinedConstant()); } @@ -399,8 +404,8 @@ void TypedArrayBuiltinsAssembler::ConstructByArrayBuffer( BIND(&length_undefined); { ThrowIfArrayBufferIsDetached(context, buffer, "Construct"); - Node* buffer_byte_length = - LoadObjectField(buffer, JSArrayBuffer::kByteLengthOffset); + TNode buffer_byte_length = ChangeUintPtrToTagged( + LoadObjectField(buffer, JSArrayBuffer::kByteLengthOffset)); Node* remainder = CallBuiltin(Builtins::kModulus, context, buffer_byte_length, element_size); @@ -424,8 +429,8 @@ void TypedArrayBuiltinsAssembler::ConstructByArrayBuffer( new_byte_length.Bind(SmiMul(new_length, element_size)); // Reading the byte length must come after the ToIndex operation, which // could cause the buffer to become detached. - Node* buffer_byte_length = - LoadObjectField(buffer, JSArrayBuffer::kByteLengthOffset); + TNode buffer_byte_length = ChangeUintPtrToTagged( + LoadObjectField(buffer, JSArrayBuffer::kByteLengthOffset)); Node* end = CallBuiltin(Builtins::kAdd, context, offset.value(), new_byte_length.value()); @@ -502,7 +507,7 @@ void TypedArrayBuiltinsAssembler::ConstructByTypedArray( Goto(&check_for_sab); BIND(&if_notdetached); - source_length = LoadTypedArrayLength(typed_array); + source_length = LoadJSTypedArrayLength(typed_array); Goto(&check_for_sab); // The spec requires that constructing a typed array using a SAB-backed typed @@ -511,7 +516,7 @@ void TypedArrayBuiltinsAssembler::ConstructByTypedArray( BIND(&check_for_sab); TNode bitfield = LoadObjectField(source_buffer, JSArrayBuffer::kBitFieldOffset); - Branch(IsSetWord32(bitfield), &construct, + Branch(IsSetWord32(bitfield), &construct, &if_buffernotshared); BIND(&if_buffernotshared); @@ -640,8 +645,9 @@ void TypedArrayBuiltinsAssembler::ConstructByIterable( Label fast_path(this), slow_path(this), done(this); CSA_ASSERT(this, IsCallable(iterator_fn)); - TNode array_like = CAST( - CallBuiltin(Builtins::kIterableToList, context, iterable, iterator_fn)); + TNode array_like = + CAST(CallBuiltin(Builtins::kIterableToListMayPreserveHoles, context, + iterable, iterator_fn)); TNode initial_length = LoadJSArrayLength(array_like); TNode default_constructor = CAST(LoadContextElement( @@ -674,6 +680,17 @@ TF_BUILTIN(CreateTypedArray, TypedArrayBuiltinsAssembler) { ConstructorBuiltinsAssembler constructor_assembler(this->state()); TNode result = CAST( constructor_assembler.EmitFastNewObject(context, target, new_target)); + // We need to set the byte_offset / byte_length to some sane values + // to keep the heap verifier happy. + // TODO(bmeurer): Fix this initialization to not use EmitFastNewObject, + // which causes the problem, since it puts Undefined into all slots of + // the object even though that doesn't make any sense for these fields. + StoreObjectFieldNoWriteBarrier(result, JSTypedArray::kByteOffsetOffset, + UintPtrConstant(0), + MachineType::PointerRepresentation()); + StoreObjectFieldNoWriteBarrier(result, JSTypedArray::kByteLengthOffset, + UintPtrConstant(0), + MachineType::PointerRepresentation()); TNode element_size = SmiTag(GetTypedArrayElementSize(LoadElementsKind(result))); @@ -775,50 +792,58 @@ TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) { } } -void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeGetter( - Node* context, Node* receiver, const char* method_name, int object_offset) { - // Check if the {receiver} is actually a JSTypedArray. - ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, method_name); - - // Check if the {receiver}'s JSArrayBuffer was neutered. - Node* receiver_buffer = - LoadObjectField(receiver, JSTypedArray::kBufferOffset); - Label if_receiverisneutered(this, Label::kDeferred); - GotoIf(IsDetachedBuffer(receiver_buffer), &if_receiverisneutered); - Return(LoadObjectField(receiver, object_offset)); - - BIND(&if_receiverisneutered); - { - // The {receiver}s buffer was neutered, default to zero. - Return(SmiConstant(0)); - } -} - // ES6 #sec-get-%typedarray%.prototype.bytelength TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) { + const char* const kMethodName = "get TypedArray.prototype.byteLength"; Node* context = Parameter(Descriptor::kContext); Node* receiver = Parameter(Descriptor::kReceiver); - GenerateTypedArrayPrototypeGetter(context, receiver, - "get TypedArray.prototype.byteLength", - JSTypedArray::kByteLengthOffset); + + // Check if the {receiver} is actually a JSTypedArray. + ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName); + + // Default to zero if the {receiver}s buffer was neutered. + TNode receiver_buffer = + LoadJSArrayBufferViewBuffer(CAST(receiver)); + TNode byte_length = Select( + IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); }, + [=] { return LoadJSArrayBufferViewByteLength(CAST(receiver)); }); + Return(ChangeUintPtrToTagged(byte_length)); } // ES6 #sec-get-%typedarray%.prototype.byteoffset TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) { + const char* const kMethodName = "get TypedArray.prototype.byteOffset"; Node* context = Parameter(Descriptor::kContext); Node* receiver = Parameter(Descriptor::kReceiver); - GenerateTypedArrayPrototypeGetter(context, receiver, - "get TypedArray.prototype.byteOffset", - JSTypedArray::kByteOffsetOffset); + + // Check if the {receiver} is actually a JSTypedArray. + ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName); + + // Default to zero if the {receiver}s buffer was neutered. + TNode receiver_buffer = + LoadJSArrayBufferViewBuffer(CAST(receiver)); + TNode byte_offset = Select( + IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); }, + [=] { return LoadJSArrayBufferViewByteOffset(CAST(receiver)); }); + Return(ChangeUintPtrToTagged(byte_offset)); } // ES6 #sec-get-%typedarray%.prototype.length TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) { + const char* const kMethodName = "get TypedArray.prototype.length"; Node* context = Parameter(Descriptor::kContext); Node* receiver = Parameter(Descriptor::kReceiver); - GenerateTypedArrayPrototypeGetter(context, receiver, - "get TypedArray.prototype.length", - JSTypedArray::kLengthOffset); + + // Check if the {receiver} is actually a JSTypedArray. + ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName); + + // Default to zero if the {receiver}s buffer was neutered. + TNode receiver_buffer = + LoadJSArrayBufferViewBuffer(CAST(receiver)); + TNode length = Select( + IsDetachedBuffer(receiver_buffer), [=] { return SmiConstant(0); }, + [=] { return LoadJSTypedArrayLength(CAST(receiver)); }); + Return(length); } TNode TypedArrayBuiltinsAssembler::IsUint8ElementsKind( @@ -923,7 +948,7 @@ TNode TypedArrayBuiltinsAssembler::CreateByLength( // If newTypedArray.[[ArrayLength]] < argumentList[0], throw a TypeError // exception. Label if_length_is_not_short(this); - TNode new_length = LoadTypedArrayLength(new_typed_array); + TNode new_length = LoadJSTypedArrayLength(new_typed_array); GotoIfNot(SmiLessThan(new_length, len), &if_length_is_not_short); ThrowTypeError(context, MessageTemplate::kTypedArrayTooShort); @@ -979,8 +1004,8 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource( // Check for possible range errors. - TNode source_length = SmiUntag(LoadTypedArrayLength(source)); - TNode target_length = SmiUntag(LoadTypedArrayLength(target)); + TNode source_length = SmiUntag(LoadJSTypedArrayLength(source)); + TNode target_length = SmiUntag(LoadJSTypedArrayLength(target)); TNode required_target_length = IntPtrAdd(source_length, offset); GotoIf(IntPtrGreaterThan(required_target_length, target_length), @@ -1030,7 +1055,7 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource( IsBigInt64ElementsKind(target_el_kind)), &exception); - TNode source_length = SmiUntag(LoadTypedArrayLength(source)); + TNode source_length = SmiUntag(LoadJSTypedArrayLength(source)); CallCCopyTypedArrayElementsToTypedArray(source, target, source_length, offset); Goto(&out); @@ -1051,7 +1076,7 @@ void TypedArrayBuiltinsAssembler::SetJSArraySource( IntPtrLessThanOrEqual(offset, IntPtrConstant(Smi::kMaxValue))); TNode source_length = SmiUntag(LoadFastJSArrayLength(source)); - TNode target_length = SmiUntag(LoadTypedArrayLength(target)); + TNode target_length = SmiUntag(LoadJSTypedArrayLength(target)); // Maybe out of bounds? GotoIf(IntPtrGreaterThan(IntPtrAdd(source_length, offset), target_length), @@ -1266,7 +1291,7 @@ TF_BUILTIN(TypedArrayPrototypeSlice, TypedArrayBuiltinsAssembler) { TNode source = ValidateTypedArray(context, receiver, method_name); - TNode source_length = LoadTypedArrayLength(source); + TNode source_length = LoadJSTypedArrayLength(source); // Convert start offset argument to integer, and calculate relative offset. TNode start = args.GetOptionalArgumentValue(0, SmiConstant(0)); @@ -1299,7 +1324,7 @@ TF_BUILTIN(TypedArrayPrototypeSlice, TypedArrayBuiltinsAssembler) { CSA_ASSERT(this, Word32BinaryNot(IsDetachedBuffer(LoadObjectField( result_array, JSTypedArray::kBufferOffset)))); TNode receiver_buffer = - LoadArrayBufferViewBuffer(CAST(receiver)); + LoadJSArrayBufferViewBuffer(CAST(receiver)); ThrowIfArrayBufferIsDetached(context, receiver_buffer, method_name); // result_array could be a different type from source or share the same @@ -1332,24 +1357,16 @@ TF_BUILTIN(TypedArrayPrototypeSlice, TypedArrayBuiltinsAssembler) { TNode count_bytes = IntPtrMul(SmiToIntPtr(count), source_el_size); #ifdef DEBUG - Label done(this), to_intptr_failed(this, Label::kDeferred); - TNode target_byte_length = TryToIntptr( - LoadObjectField(result_array, JSTypedArray::kByteLengthOffset), - &to_intptr_failed); - CSA_ASSERT(this, IntPtrLessThanOrEqual(count_bytes, target_byte_length)); - - TNode source_byte_length = TryToIntptr( - LoadObjectField(source, JSTypedArray::kByteLengthOffset), - &to_intptr_failed); - TNode source_size_in_bytes = - IntPtrSub(source_byte_length, source_start_bytes); - CSA_ASSERT(this, IntPtrLessThanOrEqual(count_bytes, source_size_in_bytes)); - Goto(&done); - - BIND(&to_intptr_failed); - Unreachable(); - - BIND(&done); + TNode target_byte_length = + LoadJSArrayBufferViewByteLength(result_array); + CSA_ASSERT(this, UintPtrLessThanOrEqual(Unsigned(count_bytes), + target_byte_length)); + TNode source_byte_length = + LoadJSArrayBufferViewByteLength(source); + TNode source_size_in_bytes = + UintPtrSub(source_byte_length, Unsigned(source_start_bytes)); + CSA_ASSERT(this, UintPtrLessThanOrEqual(Unsigned(count_bytes), + source_size_in_bytes)); #endif // DEBUG CallCMemmove(target_data_ptr, source_start, count_bytes); @@ -1395,7 +1412,7 @@ TF_BUILTIN(TypedArrayPrototypeSubArray, TypedArrayBuiltinsAssembler) { // 5. Let buffer be O.[[ViewedArrayBuffer]]. TNode buffer = GetBuffer(context, source); // 6. Let srcLength be O.[[ArrayLength]]. - TNode source_length = LoadTypedArrayLength(source); + TNode source_length = LoadJSTypedArrayLength(source); // 7. Let relativeBegin be ? ToInteger(begin). // 8. If relativeBegin < 0, let beginIndex be max((srcLength + relativeBegin), @@ -1430,7 +1447,7 @@ TF_BUILTIN(TypedArrayPrototypeSubArray, TypedArrayBuiltinsAssembler) { // 14. Let srcByteOffset be O.[[ByteOffset]]. TNode source_byte_offset = - LoadObjectField(source, JSTypedArray::kByteOffsetOffset); + ChangeUintPtrToTagged(LoadJSArrayBufferViewByteOffset(source)); // 15. Let beginByteOffset be srcByteOffset + beginIndex × elementSize. TNode offset = SmiMul(var_begin.value(), SmiFromIntPtr(element_size)); @@ -1606,17 +1623,6 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) { "%TypedArray%.of"); } -// This builtin always returns a new JSArray and is thus safe to use even in the -// presence of code that may call back into user-JS. -TF_BUILTIN(IterableToList, TypedArrayBuiltinsAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode iterable = CAST(Parameter(Descriptor::kIterable)); - TNode iterator_fn = CAST(Parameter(Descriptor::kIteratorFn)); - - IteratorBuiltinsAssembler iterator_assembler(state()); - Return(iterator_assembler.IterableToList(context, iterable, iterator_fn)); -} - // ES6 #sec-%typedarray%.from TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) { TNode context = CAST(Parameter(Descriptor::kContext)); @@ -1818,7 +1824,7 @@ TF_BUILTIN(TypedArrayPrototypeFilter, TypedArrayBuiltinsAssembler) { ValidateTypedArray(context, receiver, method_name); // 3. Let len be O.[[ArrayLength]]. - TNode length = LoadTypedArrayLength(source); + TNode length = LoadJSTypedArrayLength(source); // 4. If IsCallable(callbackfn) is false, throw a TypeError exception. TNode callbackfn = args.GetOptionalArgumentValue(0); diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h index 11768d660a0b49..1e35ae69a96bce 100644 --- a/deps/v8/src/builtins/builtins-typed-array-gen.h +++ b/deps/v8/src/builtins/builtins-typed-array-gen.h @@ -21,9 +21,6 @@ class TypedArrayBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler { const char* method_name); protected: - void GenerateTypedArrayPrototypeGetter(Node* context, Node* receiver, - const char* method_name, - int object_offset); void GenerateTypedArrayPrototypeIterationMethod(TNode context, TNode receiver, const char* method_name, @@ -50,7 +47,8 @@ class TypedArrayBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler { TNode element_size); void SetupTypedArray(TNode holder, TNode length, - TNode byte_offset, TNode byte_length); + TNode byte_offset, + TNode byte_length); void AttachBuffer(TNode holder, TNode buffer, TNode map, TNode length, TNode byte_offset); diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc index facfaf93f814e1..594680f39dc491 100644 --- a/deps/v8/src/builtins/builtins-wasm-gen.cc +++ b/deps/v8/src/builtins/builtins-wasm-gen.cc @@ -38,16 +38,19 @@ class WasmBuiltinsAssembler : public CodeStubAssembler { LoadFromParentFrame(WasmCompiledFrameConstants::kWasmInstanceOffset)); } + TNode LoadContextFromInstance(TNode instance) { + return UncheckedCast( + Load(MachineType::AnyTagged(), instance, + IntPtrConstant(WasmInstanceObject::kNativeContextOffset - + kHeapObjectTag))); + } + TNode LoadCEntryFromInstance(TNode instance) { return UncheckedCast( Load(MachineType::AnyTagged(), instance, IntPtrConstant(WasmInstanceObject::kCEntryStubOffset - kHeapObjectTag))); } - - TNode LoadCEntryFromFrame() { - return LoadCEntryFromInstance(LoadInstanceFromFrame()); - } }; TF_BUILTIN(WasmAllocateHeapNumber, WasmBuiltinsAssembler) { @@ -55,18 +58,6 @@ TF_BUILTIN(WasmAllocateHeapNumber, WasmBuiltinsAssembler) { TailCallStub(AllocateHeapNumberDescriptor(), target, NoContextConstant()); } -TF_BUILTIN(WasmArgumentsAdaptor, WasmBuiltinsAssembler) { - TNode context = UncheckedParameter(Descriptor::kContext); - TNode function = UncheckedParameter(Descriptor::kTarget); - TNode new_target = UncheckedParameter(Descriptor::kNewTarget); - TNode argc1 = UncheckedParameter(Descriptor::kActualArgumentsCount); - TNode argc2 = UncheckedParameter(Descriptor::kExpectedArgumentsCount); - TNode target = - LoadBuiltinFromFrame(Builtins::kArgumentsAdaptorTrampoline); - TailCallStub(ArgumentAdaptorDescriptor{}, target, context, function, - new_target, argc1, argc2); -} - TF_BUILTIN(WasmCallJavaScript, WasmBuiltinsAssembler) { TNode context = UncheckedParameter(Descriptor::kContext); TNode function = UncheckedParameter(Descriptor::kFunction); @@ -83,9 +74,10 @@ TF_BUILTIN(WasmToNumber, WasmBuiltinsAssembler) { } TF_BUILTIN(WasmStackGuard, WasmBuiltinsAssembler) { - TNode centry = LoadCEntryFromFrame(); - TailCallRuntimeWithCEntry(Runtime::kWasmStackGuard, centry, - NoContextConstant()); + TNode instance = LoadInstanceFromFrame(); + TNode centry = LoadCEntryFromInstance(instance); + TNode context = LoadContextFromInstance(instance); + TailCallRuntimeWithCEntry(Runtime::kWasmStackGuard, centry, context); } TF_BUILTIN(WasmGrowMemory, WasmBuiltinsAssembler) { @@ -100,9 +92,9 @@ TF_BUILTIN(WasmGrowMemory, WasmBuiltinsAssembler) { TNode num_pages_smi = SmiFromInt32(num_pages); TNode instance = LoadInstanceFromFrame(); TNode centry = LoadCEntryFromInstance(instance); - TNode ret_smi = UncheckedCast( - CallRuntimeWithCEntry(Runtime::kWasmGrowMemory, centry, - NoContextConstant(), instance, num_pages_smi)); + TNode context = LoadContextFromInstance(instance); + TNode ret_smi = UncheckedCast(CallRuntimeWithCEntry( + Runtime::kWasmGrowMemory, centry, context, instance, num_pages_smi)); TNode ret = SmiToInt32(ret_smi); ReturnRaw(ret); @@ -112,10 +104,12 @@ TF_BUILTIN(WasmGrowMemory, WasmBuiltinsAssembler) { #define DECLARE_ENUM(name) \ TF_BUILTIN(ThrowWasm##name, WasmBuiltinsAssembler) { \ - TNode centry = LoadCEntryFromFrame(); \ + TNode instance = LoadInstanceFromFrame(); \ + TNode centry = LoadCEntryFromInstance(instance); \ + TNode context = LoadContextFromInstance(instance); \ int message_id = wasm::WasmOpcodes::TrapReasonToMessageId(wasm::k##name); \ - TailCallRuntimeWithCEntry(Runtime::kThrowWasmError, centry, \ - NoContextConstant(), SmiConstant(message_id)); \ + TailCallRuntimeWithCEntry(Runtime::kThrowWasmError, centry, context, \ + SmiConstant(message_id)); \ } FOREACH_WASM_TRAPREASON(DECLARE_ENUM) #undef DECLARE_ENUM diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc index b2c7433e8d6a10..6da0f1503710ea 100644 --- a/deps/v8/src/builtins/builtins.cc +++ b/deps/v8/src/builtins/builtins.cc @@ -51,13 +51,12 @@ struct BuiltinMetadata { #define DECL_TFC(Name, ...) { #Name, Builtins::TFC, {} }, #define DECL_TFS(Name, ...) { #Name, Builtins::TFS, {} }, #define DECL_TFH(Name, ...) { #Name, Builtins::TFH, {} }, -#define DECL_BCH(Name, ...) { #Name "Handler", Builtins::BCH, {} }, \ - { #Name "WideHandler", Builtins::BCH, {} }, \ - { #Name "ExtraWideHandler", Builtins::BCH, {} }, +#define DECL_BCH(Name, ...) { #Name, Builtins::BCH, {} }, +#define DECL_DLH(Name, ...) { #Name, Builtins::DLH, {} }, #define DECL_ASM(Name, ...) { #Name, Builtins::ASM, {} }, const BuiltinMetadata builtin_metadata[] = { BUILTIN_LIST(DECL_CPP, DECL_API, DECL_TFJ, DECL_TFC, DECL_TFS, DECL_TFH, - DECL_BCH, DECL_ASM) + DECL_BCH, DECL_DLH, DECL_ASM) }; #undef DECL_CPP #undef DECL_API @@ -66,6 +65,7 @@ const BuiltinMetadata builtin_metadata[] = { #undef DECL_TFS #undef DECL_TFH #undef DECL_BCH +#undef DECL_DLH #undef DECL_ASM // clang-format on @@ -166,11 +166,12 @@ Callable Builtins::CallableFor(Isolate* isolate, Name name) { break; \ } BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, CASE_OTHER, - CASE_OTHER, CASE_OTHER, IGNORE_BUILTIN, IGNORE_BUILTIN) + CASE_OTHER, CASE_OTHER, IGNORE_BUILTIN, IGNORE_BUILTIN, + IGNORE_BUILTIN) #undef CASE_OTHER default: Builtins::Kind kind = Builtins::KindOf(name); - DCHECK_NE(kind, BCH); + DCHECK(kind != BCH && kind != DLH); if (kind == TFJ || kind == CPP) { return Callable(code, JSTrampolineDescriptor{}); } @@ -281,9 +282,13 @@ bool Builtins::IsLazy(int index) { case kCompileLazy: case kDebugBreakTrampoline: case kDeserializeLazy: + case kDeserializeLazyHandler: + case kDeserializeLazyWideHandler: + case kDeserializeLazyExtraWideHandler: case kFunctionPrototypeHasInstance: // https://crbug.com/v8/6786. case kHandleApiCall: case kIllegal: + case kIllegalHandler: case kInstantiateAsmJs: case kInterpreterEnterBytecodeAdvance: case kInterpreterEnterBytecodeDispatch: @@ -306,27 +311,21 @@ bool Builtins::IsLazy(int index) { return false; default: // TODO(6624): Extend to other kinds. - return KindOf(index) == TFJ; + return KindOf(index) == TFJ || KindOf(index) == BCH; } UNREACHABLE(); } +// static +bool Builtins::IsLazyDeserializer(Code* code) { + return IsLazyDeserializer(code->builtin_index()); +} + // static bool Builtins::IsIsolateIndependent(int index) { DCHECK(IsBuiltinId(index)); #ifndef V8_TARGET_ARCH_IA32 switch (index) { -// Bytecode handlers do not yet support being embedded. -#ifdef V8_EMBEDDED_BYTECODE_HANDLERS -#define BYTECODE_BUILTIN(Name, ...) \ - case k##Name##Handler: \ - case k##Name##WideHandler: \ - case k##Name##ExtraWideHandler: \ - return false; - BUILTIN_LIST_BYTECODE_HANDLERS(BYTECODE_BUILTIN) -#undef BYTECODE_BUILTIN -#endif // V8_EMBEDDED_BYTECODE_HANDLERS - // TODO(jgruber): There's currently two blockers for moving // InterpreterEntryTrampoline into the binary: // 1. InterpreterEnterBytecode calculates a pointer into the middle of @@ -423,6 +422,7 @@ const char* Builtins::KindNameOf(int index) { case TFS: return "TFS"; case TFH: return "TFH"; case BCH: return "BCH"; + case DLH: return "DLH"; case ASM: return "ASM"; } // clang-format on diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h index 0bd3c317bfe5fd..9f404a0ac00b5a 100644 --- a/deps/v8/src/builtins/builtins.h +++ b/deps/v8/src/builtins/builtins.h @@ -25,6 +25,11 @@ namespace compiler { class CodeAssemblerState; } +template +static constexpr T FirstFromVarArgs(T x, ...) noexcept { + return x; +} + // Convenience macro to avoid generating named accessors for all builtins. #define BUILTIN_CODE(isolate, name) \ (isolate)->builtins()->builtin_handle(Builtins::k##name) @@ -40,13 +45,16 @@ class Builtins { enum Name : int32_t { #define DEF_ENUM(Name, ...) k##Name, -#define DEF_ENUM_BYTECODE_HANDLER(Name, ...) \ - k##Name##Handler, k##Name##WideHandler, k##Name##ExtraWideHandler, BUILTIN_LIST(DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM, - DEF_ENUM_BYTECODE_HANDLER, DEF_ENUM) + DEF_ENUM, DEF_ENUM, DEF_ENUM) #undef DEF_ENUM -#undef DEF_ENUM_BYTECODE_HANDLER - builtin_count + builtin_count, + +#define EXTRACT_NAME(Name, ...) k##Name, + // Define kFirstBytecodeHandler, + kFirstBytecodeHandler = + FirstFromVarArgs(BUILTIN_LIST_BYTECODE_HANDLERS(EXTRACT_NAME) 0) +#undef EXTRACT_NAME }; static const int32_t kNoBuiltinId = -1; @@ -56,7 +64,7 @@ class Builtins { } // The different builtin kinds are documented in builtins-definitions.h. - enum Kind { CPP, API, TFJ, TFC, TFS, TFH, BCH, ASM }; + enum Kind { CPP, API, TFJ, TFC, TFS, TFH, BCH, DLH, ASM }; static BailoutId GetContinuationBailoutId(Name name); static Name GetBuiltinFromBailoutId(BailoutId); @@ -111,6 +119,35 @@ class Builtins { // special cases such as CompileLazy and DeserializeLazy. static bool IsLazy(int index); + static constexpr int kFirstWideBytecodeHandler = + kFirstBytecodeHandler + kNumberOfBytecodeHandlers; + static constexpr int kFirstExtraWideBytecodeHandler = + kFirstWideBytecodeHandler + kNumberOfWideBytecodeHandlers; + STATIC_ASSERT(kFirstExtraWideBytecodeHandler + + kNumberOfWideBytecodeHandlers == + builtin_count); + + // Returns the index of the appropriate lazy deserializer in the builtins + // table. + static constexpr int LazyDeserializerForBuiltin(const int index) { + return index < kFirstWideBytecodeHandler + ? (index < kFirstBytecodeHandler + ? Builtins::kDeserializeLazy + : Builtins::kDeserializeLazyHandler) + : (index < kFirstExtraWideBytecodeHandler + ? Builtins::kDeserializeLazyWideHandler + : Builtins::kDeserializeLazyExtraWideHandler); + } + + static constexpr bool IsLazyDeserializer(int builtin_index) { + return builtin_index == kDeserializeLazy || + builtin_index == kDeserializeLazyHandler || + builtin_index == kDeserializeLazyWideHandler || + builtin_index == kDeserializeLazyExtraWideHandler; + } + + static bool IsLazyDeserializer(Code* code); + // Helper methods used for testing isolate-independent builtins. // TODO(jgruber,v8:6666): Remove once all builtins have been migrated. static bool IsIsolateIndependent(int index); @@ -179,7 +216,8 @@ class Builtins { static void Generate_##Name(compiler::CodeAssemblerState* state); BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, DECLARE_TF, DECLARE_TF, - DECLARE_TF, DECLARE_TF, IGNORE_BUILTIN, DECLARE_ASM) + DECLARE_TF, DECLARE_TF, IGNORE_BUILTIN, IGNORE_BUILTIN, + DECLARE_ASM) #undef DECLARE_ASM #undef DECLARE_TF diff --git a/deps/v8/src/builtins/constants-table-builder.cc b/deps/v8/src/builtins/constants-table-builder.cc index 04c0655bf73d4b..26995453dd9e46 100644 --- a/deps/v8/src/builtins/constants-table-builder.cc +++ b/deps/v8/src/builtins/constants-table-builder.cc @@ -19,14 +19,14 @@ BuiltinsConstantsTableBuilder::BuiltinsConstantsTableBuilder(Isolate* isolate) // as a constant, which means that codegen will load it using the root // register. DCHECK(isolate_->heap()->RootCanBeTreatedAsConstant( - Heap::kEmptyFixedArrayRootIndex)); + RootIndex::kEmptyFixedArray)); } uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle object) { #ifdef DEBUG // Roots must not be inserted into the constants table as they are already // accessibly from the root list. - Heap::RootListIndex root_list_index; + RootIndex root_list_index; DCHECK(!isolate_->heap()->IsRootHandle(object, &root_list_index)); // Not yet finalized. @@ -56,7 +56,7 @@ void BuiltinsConstantsTableBuilder::PatchSelfReference( #ifdef DEBUG // Roots must not be inserted into the constants table as they are already // accessibly from the root list. - Heap::RootListIndex root_list_index; + RootIndex root_list_index; DCHECK(!isolate_->heap()->IsRootHandle(code_object, &root_list_index)); // Not yet finalized. diff --git a/deps/v8/src/builtins/data-view.tq b/deps/v8/src/builtins/data-view.tq index 874c122995991f..81a4d72ecb79d4 100644 --- a/deps/v8/src/builtins/data-view.tq +++ b/deps/v8/src/builtins/data-view.tq @@ -5,13 +5,13 @@ module data_view { extern operator '.buffer' - macro LoadArrayBufferViewBuffer(JSArrayBufferView): JSArrayBuffer; + macro LoadJSArrayBufferViewBuffer(JSArrayBufferView): JSArrayBuffer; extern operator '.byte_length' - macro LoadDataViewByteLength(JSDataView): Number; + macro LoadJSArrayBufferViewByteLength(JSArrayBufferView): uintptr; extern operator '.byte_offset' - macro LoadDataViewByteOffset(JSDataView): Number; + macro LoadJSArrayBufferViewByteOffset(JSArrayBufferView): uintptr; extern operator '.backing_store' - macro LoadArrayBufferBackingStore(JSArrayBuffer): RawPtr; + macro LoadJSArrayBufferBackingStore(JSArrayBuffer): RawPtr; macro MakeDataViewGetterNameString(kind: constexpr ElementsKind): String { if constexpr (kind == UINT8_ELEMENTS) { @@ -72,7 +72,7 @@ module data_view { macro ValidateDataView(context: Context, o: Object, method: String): JSDataView { try { - return cast(o) otherwise CastError; + return Cast(o) otherwise CastError; } label CastError { ThrowTypeError(context, kIncompatibleMethodReceiver, method); @@ -82,35 +82,35 @@ module data_view { // ES6 section 24.2.4.1 get DataView.prototype.buffer javascript builtin DataViewPrototypeGetBuffer( context: Context, receiver: Object, ...arguments): JSArrayBuffer { - let data_view: JSDataView = ValidateDataView( + let dataView: JSDataView = ValidateDataView( context, receiver, 'get DataView.prototype.buffer'); - return data_view.buffer; + return dataView.buffer; } // ES6 section 24.2.4.2 get DataView.prototype.byteLength javascript builtin DataViewPrototypeGetByteLength( context: Context, receiver: Object, ...arguments): Number { - let data_view: JSDataView = ValidateDataView( + let dataView: JSDataView = ValidateDataView( context, receiver, 'get DataView.prototype.byte_length'); - if (WasNeutered(data_view)) { + if (WasNeutered(dataView)) { // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError - // here if the JSArrayBuffer of the {data_view} was neutered. + // here if the JSArrayBuffer of the {dataView} was neutered. return 0; } - return data_view.byte_length; + return Convert(dataView.byte_length); } // ES6 section 24.2.4.3 get DataView.prototype.byteOffset javascript builtin DataViewPrototypeGetByteOffset( context: Context, receiver: Object, ...arguments): Number { - let data_view: JSDataView = ValidateDataView( + let dataView: JSDataView = ValidateDataView( context, receiver, 'get DataView.prototype.byte_offset'); - if (WasNeutered(data_view)) { + if (WasNeutered(dataView)) { // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError - // here if the JSArrayBuffer of the {data_view} was neutered. + // here if the JSArrayBuffer of the {dataView} was neutered. return 0; } - return data_view.byte_offset; + return Convert(dataView.byte_offset); } extern macro BitcastInt32ToFloat32(uint32): float32; @@ -120,102 +120,102 @@ module data_view { extern macro Float64InsertLowWord32(float64, uint32): float64; extern macro Float64InsertHighWord32(float64, uint32): float64; - extern macro LoadUint8(RawPtr, intptr): uint32; - extern macro LoadInt8(RawPtr, intptr): int32; + extern macro LoadUint8(RawPtr, uintptr): uint32; + extern macro LoadInt8(RawPtr, uintptr): int32; - macro LoadDataView8(buffer: JSArrayBuffer, offset: intptr, + macro LoadDataView8(buffer: JSArrayBuffer, offset: uintptr, signed: constexpr bool): Smi { if constexpr (signed) { - return convert(LoadInt8(buffer.backing_store, offset)); + return Convert(LoadInt8(buffer.backing_store, offset)); } else { - return convert(LoadUint8(buffer.backing_store, offset)); + return Convert(LoadUint8(buffer.backing_store, offset)); } } - macro LoadDataView16(buffer: JSArrayBuffer, offset: intptr, - requested_little_endian: bool, + macro LoadDataView16(buffer: JSArrayBuffer, offset: uintptr, + requestedLittleEndian: bool, signed: constexpr bool): Number { - let data_pointer: RawPtr = buffer.backing_store; + let dataPointer: RawPtr = buffer.backing_store; let b0: int32; let b1: int32; let result: int32; // Sign-extend the most significant byte by loading it as an Int8. - if (requested_little_endian) { - b0 = Signed(LoadUint8(data_pointer, offset)); - b1 = LoadInt8(data_pointer, offset + 1); + if (requestedLittleEndian) { + b0 = Signed(LoadUint8(dataPointer, offset)); + b1 = LoadInt8(dataPointer, offset + 1); result = (b1 << 8) + b0; } else { - b0 = LoadInt8(data_pointer, offset); - b1 = Signed(LoadUint8(data_pointer, offset + 1)); + b0 = LoadInt8(dataPointer, offset); + b1 = Signed(LoadUint8(dataPointer, offset + 1)); result = (b0 << 8) + b1; } if constexpr (signed) { - return convert(result); + return Convert(result); } else { // Bit-mask the higher bits to prevent sign extension if we're unsigned. - return convert(result & 0xFFFF); + return Convert(result & 0xFFFF); } } - macro LoadDataView32(buffer: JSArrayBuffer, offset: intptr, - requested_little_endian: bool, + macro LoadDataView32(buffer: JSArrayBuffer, offset: uintptr, + requestedLittleEndian: bool, kind: constexpr ElementsKind): Number { - let data_pointer: RawPtr = buffer.backing_store; + let dataPointer: RawPtr = buffer.backing_store; - let b0: uint32 = LoadUint8(data_pointer, offset); - let b1: uint32 = LoadUint8(data_pointer, offset + 1); - let b2: uint32 = LoadUint8(data_pointer, offset + 2); - let b3: uint32 = LoadUint8(data_pointer, offset + 3); + let b0: uint32 = LoadUint8(dataPointer, offset); + let b1: uint32 = LoadUint8(dataPointer, offset + 1); + let b2: uint32 = LoadUint8(dataPointer, offset + 2); + let b3: uint32 = LoadUint8(dataPointer, offset + 3); let result: uint32; - if (requested_little_endian) { + if (requestedLittleEndian) { result = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0; } else { result = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3; } if constexpr (kind == INT32_ELEMENTS) { - return convert(Signed(result)); + return Convert(Signed(result)); } else if constexpr (kind == UINT32_ELEMENTS) { - return convert(result); + return Convert(result); } else if constexpr (kind == FLOAT32_ELEMENTS) { - let float_res: float64 = convert(BitcastInt32ToFloat32(result)); - return convert(float_res); + let floatRes: float64 = Convert(BitcastInt32ToFloat32(result)); + return Convert(floatRes); } else { unreachable; } } - macro LoadDataViewFloat64(buffer: JSArrayBuffer, offset: intptr, - requested_little_endian: bool): Number { - let data_pointer: RawPtr = buffer.backing_store; - - let b0: uint32 = LoadUint8(data_pointer, offset); - let b1: uint32 = LoadUint8(data_pointer, offset + 1); - let b2: uint32 = LoadUint8(data_pointer, offset + 2); - let b3: uint32 = LoadUint8(data_pointer, offset + 3); - let b4: uint32 = LoadUint8(data_pointer, offset + 4); - let b5: uint32 = LoadUint8(data_pointer, offset + 5); - let b6: uint32 = LoadUint8(data_pointer, offset + 6); - let b7: uint32 = LoadUint8(data_pointer, offset + 7); - let low_word: uint32; - let high_word: uint32; - - if (requested_little_endian) { - low_word = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0; - high_word = (b7 << 24) | (b6 << 16) | (b5 << 8) | b4; + macro LoadDataViewFloat64(buffer: JSArrayBuffer, offset: uintptr, + requestedLittleEndian: bool): Number { + let dataPointer: RawPtr = buffer.backing_store; + + let b0: uint32 = LoadUint8(dataPointer, offset); + let b1: uint32 = LoadUint8(dataPointer, offset + 1); + let b2: uint32 = LoadUint8(dataPointer, offset + 2); + let b3: uint32 = LoadUint8(dataPointer, offset + 3); + let b4: uint32 = LoadUint8(dataPointer, offset + 4); + let b5: uint32 = LoadUint8(dataPointer, offset + 5); + let b6: uint32 = LoadUint8(dataPointer, offset + 6); + let b7: uint32 = LoadUint8(dataPointer, offset + 7); + let lowWord: uint32; + let highWord: uint32; + + if (requestedLittleEndian) { + lowWord = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0; + highWord = (b7 << 24) | (b6 << 16) | (b5 << 8) | b4; } else { - high_word = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3; - low_word = (b4 << 24) | (b5 << 16) | (b6 << 8) | b7; + highWord = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3; + lowWord = (b4 << 24) | (b5 << 16) | (b6 << 8) | b7; } let result: float64 = 0; - result = Float64InsertLowWord32(result, low_word); - result = Float64InsertHighWord32(result, high_word); + result = Float64InsertLowWord32(result, lowWord); + result = Float64InsertHighWord32(result, highWord); - return convert(result); + return Convert(result); } extern macro AllocateBigInt(intptr): BigInt; @@ -230,12 +230,12 @@ module data_view { const kOneDigitBigInt: constexpr int31 = 1; const kTwoDigitBigInt: constexpr int31 = 2; - macro CreateEmptyBigInt(is_positive: bool, length: constexpr int31): BigInt { + macro CreateEmptyBigInt(isPositive: bool, length: constexpr int31): BigInt { // Allocate a BigInt with the desired length (number of digits). let result: BigInt = AllocateBigInt(length); // Write the desired sign and length to the BigInt bitfield. - if (is_positive) { + if (isPositive) { StoreBigIntBitfield(result, DataViewEncodeBigIntBits(kPositiveBigInt, length)); } else { @@ -247,143 +247,143 @@ module data_view { } // Create a BigInt on a 64-bit architecture from two 32-bit values. - macro MakeBigIntOn64Bit(low_word: uint32, high_word: uint32, + macro MakeBigIntOn64Bit(lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt { // 0n is represented by a zero-length BigInt. - if (low_word == 0 && high_word == 0) { + if (lowWord == 0 && highWord == 0) { return AllocateBigInt(kZeroDigitBigInt); } - let is_positive: bool = true; - let high_part: intptr = Signed(convert(high_word)); - let low_part: intptr = Signed(convert(low_word)); - let raw_value: intptr = (high_part << 32) + low_part; + let isPositive: bool = true; + let highPart: intptr = Signed(Convert(highWord)); + let lowPart: intptr = Signed(Convert(lowWord)); + let rawValue: intptr = (highPart << 32) + lowPart; if constexpr (signed) { - if (raw_value < 0) { - is_positive = false; - // We have to store the absolute value of raw_value in the digit. - raw_value = 0 - raw_value; + if (rawValue < 0) { + isPositive = false; + // We have to store the absolute value of rawValue in the digit. + rawValue = 0 - rawValue; } } // Allocate the BigInt and store the absolute value. - let result: BigInt = CreateEmptyBigInt(is_positive, kOneDigitBigInt); + let result: BigInt = CreateEmptyBigInt(isPositive, kOneDigitBigInt); - StoreBigIntDigit(result, 0, Unsigned(raw_value)); + StoreBigIntDigit(result, 0, Unsigned(rawValue)); return result; } // Create a BigInt on a 32-bit architecture from two 32-bit values. - macro MakeBigIntOn32Bit(low_word: uint32, high_word: uint32, + macro MakeBigIntOn32Bit(lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt { // 0n is represented by a zero-length BigInt. - if (low_word == 0 && high_word == 0) { + if (lowWord == 0 && highWord == 0) { return AllocateBigInt(kZeroDigitBigInt); } // On a 32-bit platform, we might need 1 or 2 digits to store the number. - let need_two_digits: bool = false; - let is_positive: bool = true; + let needTwoDigits: bool = false; + let isPositive: bool = true; - // We need to do some math on low_word and high_word, - // so convert them to int32. - let low_part: int32 = Signed(low_word); - let high_part: int32 = Signed(high_word); + // We need to do some math on lowWord and highWord, + // so Convert them to int32. + let lowPart: int32 = Signed(lowWord); + let highPart: int32 = Signed(highWord); - // If high_word == 0, the number is positive, and we only need 1 digit, + // If highWord == 0, the number is positive, and we only need 1 digit, // so we don't have anything to do. // Otherwise, all cases are possible. - if (high_word != 0) { + if (highWord != 0) { if constexpr (signed) { - // If high_part < 0, the number is always negative. - if (high_part < 0) { - is_positive = false; + // If highPart < 0, the number is always negative. + if (highPart < 0) { + isPositive = false; // We have to compute the absolute value by hand. // There will be a negative carry from the low word // to the high word iff low != 0. - high_part = 0 - high_part; - if (low_part != 0) { - high_part = high_part - 1; + highPart = 0 - highPart; + if (lowPart != 0) { + highPart = highPart - 1; } - low_part = 0 - low_part; + lowPart = 0 - lowPart; - // Here, high_part could be 0 again so we might have 1 or 2 digits. - if (high_part != 0) { - need_two_digits = true; + // Here, highPart could be 0 again so we might have 1 or 2 digits. + if (highPart != 0) { + needTwoDigits = true; } } else { // In this case, the number is positive, and we need 2 digits. - need_two_digits = true; + needTwoDigits = true; } } else { // In this case, the number is positive (unsigned), // and we need 2 digits. - need_two_digits = true; + needTwoDigits = true; } } // Allocate the BigInt with the right sign and length. let result: BigInt; - if (need_two_digits) { - result = CreateEmptyBigInt(is_positive, kTwoDigitBigInt); + if (needTwoDigits) { + result = CreateEmptyBigInt(isPositive, kTwoDigitBigInt); } else { - result = CreateEmptyBigInt(is_positive, kOneDigitBigInt); + result = CreateEmptyBigInt(isPositive, kOneDigitBigInt); } // Finally, write the digit(s) to the BigInt. - StoreBigIntDigit(result, 0, Unsigned(convert(low_part))); + StoreBigIntDigit(result, 0, Unsigned(Convert(lowPart))); - if (need_two_digits) { - StoreBigIntDigit(result, 1, Unsigned(convert(high_part))); + if (needTwoDigits) { + StoreBigIntDigit(result, 1, Unsigned(Convert(highPart))); } return result; } - macro MakeBigInt(low_word: uint32, high_word: uint32, + macro MakeBigInt(lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt { // A BigInt digit has the platform word size, so we only need one digit // on 64-bit platforms but may need two on 32-bit. if constexpr (Is64()) { - return MakeBigIntOn64Bit(low_word, high_word, signed); + return MakeBigIntOn64Bit(lowWord, highWord, signed); } else { - return MakeBigIntOn32Bit(low_word, high_word, signed); + return MakeBigIntOn32Bit(lowWord, highWord, signed); } } - macro LoadDataViewBigInt(buffer: JSArrayBuffer, offset: intptr, - requested_little_endian: bool, + macro LoadDataViewBigInt(buffer: JSArrayBuffer, offset: uintptr, + requestedLittleEndian: bool, signed: constexpr bool): BigInt { - let data_pointer: RawPtr = buffer.backing_store; - - let b0: uint32 = LoadUint8(data_pointer, offset); - let b1: uint32 = LoadUint8(data_pointer, offset + 1); - let b2: uint32 = LoadUint8(data_pointer, offset + 2); - let b3: uint32 = LoadUint8(data_pointer, offset + 3); - let b4: uint32 = LoadUint8(data_pointer, offset + 4); - let b5: uint32 = LoadUint8(data_pointer, offset + 5); - let b6: uint32 = LoadUint8(data_pointer, offset + 6); - let b7: uint32 = LoadUint8(data_pointer, offset + 7); - let low_word: uint32; - let high_word: uint32; - - if (requested_little_endian) { - low_word = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0; - high_word = (b7 << 24) | (b6 << 16) | (b5 << 8) | b4; + let dataPointer: RawPtr = buffer.backing_store; + + let b0: uint32 = LoadUint8(dataPointer, offset); + let b1: uint32 = LoadUint8(dataPointer, offset + 1); + let b2: uint32 = LoadUint8(dataPointer, offset + 2); + let b3: uint32 = LoadUint8(dataPointer, offset + 3); + let b4: uint32 = LoadUint8(dataPointer, offset + 4); + let b5: uint32 = LoadUint8(dataPointer, offset + 5); + let b6: uint32 = LoadUint8(dataPointer, offset + 6); + let b7: uint32 = LoadUint8(dataPointer, offset + 7); + let lowWord: uint32; + let highWord: uint32; + + if (requestedLittleEndian) { + lowWord = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0; + highWord = (b7 << 24) | (b6 << 16) | (b5 << 8) | b4; } else { - high_word = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3; - low_word = (b4 << 24) | (b5 << 16) | (b6 << 8) | b7; + highWord = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3; + lowWord = (b4 << 24) | (b5 << 16) | (b6 << 8) | b7; } - return MakeBigInt(low_word, high_word, signed); + return MakeBigInt(lowWord, highWord, signed); } extern macro ToSmiIndex(Object, Context): Smi labels RangeError; @@ -392,10 +392,10 @@ module data_view { macro DataViewGet(context: Context, receiver: Object, offset: Object, - requested_little_endian: Object, + requestedLittleEndian: Object, kind: constexpr ElementsKind): Numeric { - let data_view: JSDataView = ValidateDataView( + let dataView: JSDataView = ValidateDataView( context, receiver, MakeDataViewGetterNameString(kind)); let getIndex: Number; @@ -406,28 +406,26 @@ module data_view { ThrowRangeError(context, kInvalidDataViewAccessorOffset); } - let littleEndian: bool = ToBoolean(requested_little_endian); - let buffer: JSArrayBuffer = data_view.buffer; + let littleEndian: bool = ToBoolean(requestedLittleEndian); + let buffer: JSArrayBuffer = dataView.buffer; if (IsDetachedBuffer(buffer)) { ThrowTypeError(context, kDetachedOperation, MakeDataViewGetterNameString(kind)); } - let viewOffset: Number = data_view.byte_offset; - let viewSize: Number = data_view.byte_length; - let elementSize: Number = DataViewElementSize(kind); + let getIndexFloat: float64 = Convert(getIndex); + let getIndexWord: uintptr = Convert(getIndexFloat); - if (getIndex + elementSize > viewSize) { + let viewOffsetWord: uintptr = dataView.byte_offset; + let viewSizeFloat: float64 = Convert(dataView.byte_length); + let elementSizeFloat: float64 = Convert(DataViewElementSize(kind)); + + if (getIndexFloat + elementSizeFloat > viewSizeFloat) { ThrowRangeError(context, kInvalidDataViewAccessorOffset); } - let getIndexFloat: float64 = convert(getIndex); - let getIndexIntptr: intptr = Signed(convert(getIndexFloat)); - let viewOffsetFloat: float64 = convert(viewOffset); - let viewOffsetIntptr: intptr = Signed(convert(viewOffsetFloat)); - - let bufferIndex: intptr = getIndexIntptr + viewOffsetIntptr; + let bufferIndex: uintptr = getIndexWord + viewOffsetWord; if constexpr (kind == UINT8_ELEMENTS) { return LoadDataView8(buffer, bufferIndex, false); @@ -475,10 +473,10 @@ module data_view { let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let is_little_endian : Object = arguments.length > 1 ? + let isLittleEndian : Object = arguments.length > 1 ? arguments[1] : Undefined; - return DataViewGet(context, receiver, offset, is_little_endian, + return DataViewGet(context, receiver, offset, isLittleEndian, UINT16_ELEMENTS); } @@ -487,10 +485,10 @@ module data_view { let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let is_little_endian : Object = arguments.length > 1 ? + let isLittleEndian : Object = arguments.length > 1 ? arguments[1] : Undefined; - return DataViewGet(context, receiver, offset, is_little_endian, + return DataViewGet(context, receiver, offset, isLittleEndian, INT16_ELEMENTS); } @@ -499,10 +497,10 @@ module data_view { let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let is_little_endian : Object = arguments.length > 1 ? + let isLittleEndian : Object = arguments.length > 1 ? arguments[1] : Undefined; - return DataViewGet(context, receiver, offset, is_little_endian, + return DataViewGet(context, receiver, offset, isLittleEndian, UINT32_ELEMENTS); } @@ -511,10 +509,10 @@ module data_view { let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let is_little_endian : Object = arguments.length > 1 ? + let isLittleEndian : Object = arguments.length > 1 ? arguments[1] : Undefined; - return DataViewGet(context, receiver, offset, is_little_endian, + return DataViewGet(context, receiver, offset, isLittleEndian, INT32_ELEMENTS); } @@ -523,10 +521,10 @@ module data_view { let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let is_little_endian : Object = arguments.length > 1 ? + let isLittleEndian : Object = arguments.length > 1 ? arguments[1] : Undefined; - return DataViewGet(context, receiver, offset, is_little_endian, + return DataViewGet(context, receiver, offset, isLittleEndian, FLOAT32_ELEMENTS); } @@ -535,10 +533,10 @@ module data_view { let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let is_little_endian : Object = arguments.length > 1 ? + let isLittleEndian : Object = arguments.length > 1 ? arguments[1] : Undefined; - return DataViewGet(context, receiver, offset, is_little_endian, + return DataViewGet(context, receiver, offset, isLittleEndian, FLOAT64_ELEMENTS); } @@ -547,10 +545,10 @@ module data_view { let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let is_little_endian : Object = arguments.length > 1 ? + let isLittleEndian : Object = arguments.length > 1 ? arguments[1] : Undefined; - return DataViewGet(context, receiver, offset, is_little_endian, + return DataViewGet(context, receiver, offset, isLittleEndian, BIGUINT64_ELEMENTS); } @@ -559,10 +557,10 @@ module data_view { let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let is_little_endian : Object = arguments.length > 1 ? + let isLittleEndian : Object = arguments.length > 1 ? arguments[1] : Undefined; - return DataViewGet(context, receiver, offset, is_little_endian, + return DataViewGet(context, receiver, offset, isLittleEndian, BIGINT64_ELEMENTS); } @@ -571,85 +569,85 @@ module data_view { extern macro TruncateFloat64ToFloat32(float64): float32; extern macro TruncateFloat64ToWord32(float64): uint32; - extern macro StoreWord8(RawPtr, intptr, uint32): void; + extern macro StoreWord8(RawPtr, uintptr, uint32): void; - macro StoreDataView8(buffer: JSArrayBuffer, offset: intptr, + macro StoreDataView8(buffer: JSArrayBuffer, offset: uintptr, value: uint32) { StoreWord8(buffer.backing_store, offset, value & 0xFF); } - macro StoreDataView16(buffer: JSArrayBuffer, offset: intptr, value: uint32, - requested_little_endian: bool) { - let data_pointer: RawPtr = buffer.backing_store; + macro StoreDataView16(buffer: JSArrayBuffer, offset: uintptr, value: uint32, + requestedLittleEndian: bool) { + let dataPointer: RawPtr = buffer.backing_store; let b0: uint32 = value & 0xFF; let b1: uint32 = (value >>> 8) & 0xFF; - if (requested_little_endian) { - StoreWord8(data_pointer, offset, b0); - StoreWord8(data_pointer, offset + 1, b1); + if (requestedLittleEndian) { + StoreWord8(dataPointer, offset, b0); + StoreWord8(dataPointer, offset + 1, b1); } else { - StoreWord8(data_pointer, offset, b1); - StoreWord8(data_pointer, offset + 1, b0); + StoreWord8(dataPointer, offset, b1); + StoreWord8(dataPointer, offset + 1, b0); } } - macro StoreDataView32(buffer: JSArrayBuffer, offset: intptr, value: uint32, - requested_little_endian: bool) { - let data_pointer: RawPtr = buffer.backing_store; + macro StoreDataView32(buffer: JSArrayBuffer, offset: uintptr, value: uint32, + requestedLittleEndian: bool) { + let dataPointer: RawPtr = buffer.backing_store; let b0: uint32 = value & 0xFF; let b1: uint32 = (value >>> 8) & 0xFF; let b2: uint32 = (value >>> 16) & 0xFF; let b3: uint32 = value >>> 24; // We don't need to mask here. - if (requested_little_endian) { - StoreWord8(data_pointer, offset, b0); - StoreWord8(data_pointer, offset + 1, b1); - StoreWord8(data_pointer, offset + 2, b2); - StoreWord8(data_pointer, offset + 3, b3); + if (requestedLittleEndian) { + StoreWord8(dataPointer, offset, b0); + StoreWord8(dataPointer, offset + 1, b1); + StoreWord8(dataPointer, offset + 2, b2); + StoreWord8(dataPointer, offset + 3, b3); } else { - StoreWord8(data_pointer, offset, b3); - StoreWord8(data_pointer, offset + 1, b2); - StoreWord8(data_pointer, offset + 2, b1); - StoreWord8(data_pointer, offset + 3, b0); + StoreWord8(dataPointer, offset, b3); + StoreWord8(dataPointer, offset + 1, b2); + StoreWord8(dataPointer, offset + 2, b1); + StoreWord8(dataPointer, offset + 3, b0); } } - macro StoreDataView64(buffer: JSArrayBuffer, offset: intptr, - low_word: uint32, high_word: uint32, - requested_little_endian: bool) { - let data_pointer: RawPtr = buffer.backing_store; - - let b0: uint32 = low_word & 0xFF; - let b1: uint32 = (low_word >>> 8) & 0xFF; - let b2: uint32 = (low_word >>> 16) & 0xFF; - let b3: uint32 = low_word >>> 24; - - let b4: uint32 = high_word & 0xFF; - let b5: uint32 = (high_word >>> 8) & 0xFF; - let b6: uint32 = (high_word >>> 16) & 0xFF; - let b7: uint32 = high_word >>> 24; - - - if (requested_little_endian) { - StoreWord8(data_pointer, offset, b0); - StoreWord8(data_pointer, offset + 1, b1); - StoreWord8(data_pointer, offset + 2, b2); - StoreWord8(data_pointer, offset + 3, b3); - StoreWord8(data_pointer, offset + 4, b4); - StoreWord8(data_pointer, offset + 5, b5); - StoreWord8(data_pointer, offset + 6, b6); - StoreWord8(data_pointer, offset + 7, b7); + macro StoreDataView64(buffer: JSArrayBuffer, offset: uintptr, + lowWord: uint32, highWord: uint32, + requestedLittleEndian: bool) { + let dataPointer: RawPtr = buffer.backing_store; + + let b0: uint32 = lowWord & 0xFF; + let b1: uint32 = (lowWord >>> 8) & 0xFF; + let b2: uint32 = (lowWord >>> 16) & 0xFF; + let b3: uint32 = lowWord >>> 24; + + let b4: uint32 = highWord & 0xFF; + let b5: uint32 = (highWord >>> 8) & 0xFF; + let b6: uint32 = (highWord >>> 16) & 0xFF; + let b7: uint32 = highWord >>> 24; + + + if (requestedLittleEndian) { + StoreWord8(dataPointer, offset, b0); + StoreWord8(dataPointer, offset + 1, b1); + StoreWord8(dataPointer, offset + 2, b2); + StoreWord8(dataPointer, offset + 3, b3); + StoreWord8(dataPointer, offset + 4, b4); + StoreWord8(dataPointer, offset + 5, b5); + StoreWord8(dataPointer, offset + 6, b6); + StoreWord8(dataPointer, offset + 7, b7); } else { - StoreWord8(data_pointer, offset, b7); - StoreWord8(data_pointer, offset + 1, b6); - StoreWord8(data_pointer, offset + 2, b5); - StoreWord8(data_pointer, offset + 3, b4); - StoreWord8(data_pointer, offset + 4, b3); - StoreWord8(data_pointer, offset + 5, b2); - StoreWord8(data_pointer, offset + 6, b1); - StoreWord8(data_pointer, offset + 7, b0); + StoreWord8(dataPointer, offset, b7); + StoreWord8(dataPointer, offset + 1, b6); + StoreWord8(dataPointer, offset + 2, b5); + StoreWord8(dataPointer, offset + 3, b4); + StoreWord8(dataPointer, offset + 4, b3); + StoreWord8(dataPointer, offset + 5, b2); + StoreWord8(dataPointer, offset + 6, b1); + StoreWord8(dataPointer, offset + 7, b0); } } @@ -660,54 +658,54 @@ module data_view { // We might get here a BigInt that is bigger than 64 bits, but we're only // interested in the 64 lowest ones. This means the lowest BigInt digit // on 64-bit platforms, and the 2 lowest BigInt digits on 32-bit ones. - macro StoreDataViewBigInt(buffer: JSArrayBuffer, offset: intptr, - bigint_value: BigInt, - requested_little_endian: bool) { + macro StoreDataViewBigInt(buffer: JSArrayBuffer, offset: uintptr, + bigIntValue: BigInt, + requestedLittleEndian: bool) { - let length: uintptr = DataViewDecodeBigIntLength(bigint_value); - let sign: uintptr = DataViewDecodeBigIntSign(bigint_value); + let length: uintptr = DataViewDecodeBigIntLength(bigIntValue); + let sign: uintptr = DataViewDecodeBigIntSign(bigIntValue); // The 32-bit words that will hold the BigInt's value in // two's complement representation. - let low_word: uint32 = 0; - let high_word: uint32 = 0; + let lowWord: uint32 = 0; + let highWord: uint32 = 0; // The length is nonzero if and only if the BigInt's value is nonzero. if (length != 0) { if constexpr (Is64()) { // There is always exactly 1 BigInt digit to load in this case. - let value: uintptr = LoadBigIntDigit(bigint_value, 0); - low_word = convert(value); // Truncates value to 32 bits. - high_word = convert(value >>> 32); + let value: uintptr = LoadBigIntDigit(bigIntValue, 0); + lowWord = Convert(value); // Truncates value to 32 bits. + highWord = Convert(value >>> 32); } else { // There might be either 1 or 2 BigInt digits we need to load. - low_word = convert(LoadBigIntDigit(bigint_value, 0)); + lowWord = Convert(LoadBigIntDigit(bigIntValue, 0)); if (length >= 2) { // Only load the second digit if there is one. - high_word = convert(LoadBigIntDigit(bigint_value, 1)); + highWord = Convert(LoadBigIntDigit(bigIntValue, 1)); } } } - if (sign != 0) { // The number is negative, convert it. - high_word = Unsigned(0 - Signed(high_word)); - if (low_word != 0) { - high_word = Unsigned(Signed(high_word) - 1); + if (sign != 0) { // The number is negative, Convert it. + highWord = Unsigned(0 - Signed(highWord)); + if (lowWord != 0) { + highWord = Unsigned(Signed(highWord) - 1); } - low_word = Unsigned(0 - Signed(low_word)); + lowWord = Unsigned(0 - Signed(lowWord)); } - StoreDataView64(buffer, offset, low_word, high_word, - requested_little_endian); + StoreDataView64(buffer, offset, lowWord, highWord, + requestedLittleEndian); } macro DataViewSet(context: Context, receiver: Object, offset: Object, value: Object, - requested_little_endian: Object, + requestedLittleEndian: Object, kind: constexpr ElementsKind): Object { - let data_view: JSDataView = ValidateDataView( + let dataView: JSDataView = ValidateDataView( context, receiver, MakeDataViewSetterNameString(kind)); let getIndex: Number; @@ -718,17 +716,17 @@ module data_view { ThrowRangeError(context, kInvalidDataViewAccessorOffset); } - let littleEndian: bool = ToBoolean(requested_little_endian); - let buffer: JSArrayBuffer = data_view.buffer; + let littleEndian: bool = ToBoolean(requestedLittleEndian); + let buffer: JSArrayBuffer = dataView.buffer; - let bigint_value: BigInt; - let num_value: Number; + let bigIntValue: BigInt; + let numValue: Number; // According to ES6 section 24.2.1.2 SetViewValue, we must perform // the conversion before doing the bounds check. if constexpr (kind == BIGUINT64_ELEMENTS || kind == BIGINT64_ELEMENTS) { - bigint_value = ToBigInt(context, value); + bigIntValue = ToBigInt(context, value); } else { - num_value = ToNumber(context, value); + numValue = ToNumber(context, value); } if (IsDetachedBuffer(buffer)) { @@ -736,49 +734,47 @@ module data_view { MakeDataViewSetterNameString(kind)); } - let viewOffset: Number = data_view.byte_offset; - let viewSize: Number = data_view.byte_length; - let elementSize: Number = DataViewElementSize(kind); + let getIndexFloat: float64 = Convert(getIndex); + let getIndexWord: uintptr = Convert(getIndexFloat); - if (getIndex + elementSize > viewSize) { + let viewOffsetWord: uintptr = dataView.byte_offset; + let viewSizeFloat: float64 = Convert(dataView.byte_length); + let elementSizeFloat: float64 = Convert(DataViewElementSize(kind)); + + if (getIndexFloat + elementSizeFloat > viewSizeFloat) { ThrowRangeError(context, kInvalidDataViewAccessorOffset); } - let getIndexFloat: float64 = convert(getIndex); - let getIndexIntptr: intptr = Signed(convert(getIndexFloat)); - let viewOffsetFloat: float64 = convert(viewOffset); - let viewOffsetIntptr: intptr = Signed(convert(viewOffsetFloat)); - - let bufferIndex: intptr = getIndexIntptr + viewOffsetIntptr; + let bufferIndex: uintptr = getIndexWord + viewOffsetWord; if constexpr (kind == BIGUINT64_ELEMENTS || kind == BIGINT64_ELEMENTS) { - StoreDataViewBigInt(buffer, bufferIndex, bigint_value, + StoreDataViewBigInt(buffer, bufferIndex, bigIntValue, littleEndian); } else { - let double_value: float64 = ChangeNumberToFloat64(num_value); + let doubleValue: float64 = ChangeNumberToFloat64(numValue); if constexpr (kind == UINT8_ELEMENTS || kind == INT8_ELEMENTS) { StoreDataView8(buffer, bufferIndex, - TruncateFloat64ToWord32(double_value)); + TruncateFloat64ToWord32(doubleValue)); } else if constexpr (kind == UINT16_ELEMENTS || kind == INT16_ELEMENTS) { StoreDataView16(buffer, bufferIndex, - TruncateFloat64ToWord32(double_value), littleEndian); + TruncateFloat64ToWord32(doubleValue), littleEndian); } else if constexpr (kind == UINT32_ELEMENTS || kind == INT32_ELEMENTS) { StoreDataView32(buffer, bufferIndex, - TruncateFloat64ToWord32(double_value), littleEndian); + TruncateFloat64ToWord32(doubleValue), littleEndian); } else if constexpr (kind == FLOAT32_ELEMENTS) { - let float_value: float32 = TruncateFloat64ToFloat32(double_value); + let floatValue: float32 = TruncateFloat64ToFloat32(doubleValue); StoreDataView32(buffer, bufferIndex, - BitcastFloat32ToInt32(float_value), littleEndian); + BitcastFloat32ToInt32(floatValue), littleEndian); } else if constexpr (kind == FLOAT64_ELEMENTS) { - let low_word: uint32 = Float64ExtractLowWord32(double_value); - let high_word: uint32 = Float64ExtractHighWord32(double_value); - StoreDataView64(buffer, bufferIndex, low_word, high_word, + let lowWord: uint32 = Float64ExtractLowWord32(doubleValue); + let highWord: uint32 = Float64ExtractHighWord32(doubleValue); + StoreDataView64(buffer, bufferIndex, lowWord, highWord, littleEndian); } } @@ -817,11 +813,11 @@ module data_view { let value : Object = arguments.length > 1 ? arguments[1] : Undefined; - let is_little_endian : Object = arguments.length > 2 ? + let isLittleEndian : Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet(context, receiver, offset, value, - is_little_endian, UINT16_ELEMENTS); + isLittleEndian, UINT16_ELEMENTS); } javascript builtin DataViewPrototypeSetInt16( @@ -832,11 +828,11 @@ module data_view { let value : Object = arguments.length > 1 ? arguments[1] : Undefined; - let is_little_endian : Object = arguments.length > 2 ? + let isLittleEndian : Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet(context, receiver, offset, value, - is_little_endian, INT16_ELEMENTS); + isLittleEndian, INT16_ELEMENTS); } javascript builtin DataViewPrototypeSetUint32( @@ -847,11 +843,11 @@ module data_view { let value : Object = arguments.length > 1 ? arguments[1] : Undefined; - let is_little_endian : Object = arguments.length > 2 ? + let isLittleEndian : Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet(context, receiver, offset, value, - is_little_endian, UINT32_ELEMENTS); + isLittleEndian, UINT32_ELEMENTS); } javascript builtin DataViewPrototypeSetInt32( @@ -862,11 +858,11 @@ module data_view { let value : Object = arguments.length > 1 ? arguments[1] : Undefined; - let is_little_endian : Object = arguments.length > 2 ? + let isLittleEndian : Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet(context, receiver, offset, value, - is_little_endian, INT32_ELEMENTS); + isLittleEndian, INT32_ELEMENTS); } javascript builtin DataViewPrototypeSetFloat32( @@ -877,11 +873,11 @@ module data_view { let value : Object = arguments.length > 1 ? arguments[1] : Undefined; - let is_little_endian : Object = arguments.length > 2 ? + let isLittleEndian : Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet(context, receiver, offset, value, - is_little_endian, FLOAT32_ELEMENTS); + isLittleEndian, FLOAT32_ELEMENTS); } javascript builtin DataViewPrototypeSetFloat64( @@ -892,11 +888,11 @@ module data_view { let value : Object = arguments.length > 1 ? arguments[1] : Undefined; - let is_little_endian : Object = arguments.length > 2 ? + let isLittleEndian : Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet(context, receiver, offset, value, - is_little_endian, FLOAT64_ELEMENTS); + isLittleEndian, FLOAT64_ELEMENTS); } javascript builtin DataViewPrototypeSetBigUint64( @@ -907,11 +903,11 @@ module data_view { let value : Object = arguments.length > 1 ? arguments[1] : Undefined; - let is_little_endian : Object = arguments.length > 2 ? + let isLittleEndian : Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet(context, receiver, offset, value, - is_little_endian, BIGUINT64_ELEMENTS); + isLittleEndian, BIGUINT64_ELEMENTS); } javascript builtin DataViewPrototypeSetBigInt64( @@ -922,11 +918,11 @@ module data_view { let value : Object = arguments.length > 1 ? arguments[1] : Undefined; - let is_little_endian : Object = arguments.length > 2 ? + let isLittleEndian : Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet(context, receiver, offset, value, - is_little_endian, BIGINT64_ELEMENTS); + isLittleEndian, BIGINT64_ELEMENTS); } } diff --git a/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc b/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc new file mode 100644 index 00000000000000..8266807b43481d --- /dev/null +++ b/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc @@ -0,0 +1,97 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include +#include + +#include "src/interpreter/bytecodes.h" + +namespace v8 { +namespace internal { +namespace interpreter { + +void WriteBytecode(std::ofstream& out, Bytecode bytecode, + OperandScale operand_scale, int* count, int offset_table[], + int table_index) { + DCHECK_NOT_NULL(count); + if (Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) { + out << " \\\n V(" << Bytecodes::ToString(bytecode, operand_scale, "") + << "Handler, interpreter::OperandScale::k" << operand_scale + << ", interpreter::Bytecode::k" << Bytecodes::ToString(bytecode) << ")"; + offset_table[table_index] = *count; + (*count)++; + } else { + offset_table[table_index] = -1; + } +} + +void WriteHeader(const char* header_filename) { + std::ofstream out(header_filename); + + out << "// Automatically generated from interpreter/bytecodes.h\n" + << "// The following list macro is used to populate the builtins list\n" + << "// with the bytecode handlers\n\n" + << "#ifndef V8_BUILTINS_GENERATED_BYTECODES_BUILTINS_LIST\n" + << "#define V8_BUILTINS_GENERATED_BYTECODES_BUILTINS_LIST\n\n" + << "namespace v8 {\n" + << "namespace internal {\n\n" + << "#define BUILTIN_LIST_BYTECODE_HANDLERS(V)"; + + constexpr int kTableSize = + BytecodeOperands::kOperandScaleCount * Bytecodes::kBytecodeCount; + int offset_table[kTableSize]; + int count = 0; + int index = 0; + +#define ADD_BYTECODES(Name, ...) \ + WriteBytecode(out, Bytecode::k##Name, operand_scale, &count, offset_table, \ + index++); + OperandScale operand_scale = OperandScale::kSingle; + BYTECODE_LIST(ADD_BYTECODES) + int single_count = count; + operand_scale = OperandScale::kDouble; + BYTECODE_LIST(ADD_BYTECODES) + int wide_count = count - single_count; + operand_scale = OperandScale::kQuadruple; + BYTECODE_LIST(ADD_BYTECODES) +#undef ADD_BYTECODES + int extra_wide_count = count - wide_count - single_count; + CHECK_GT(single_count, wide_count); + CHECK_EQ(single_count, Bytecodes::kBytecodeCount); + CHECK_EQ(wide_count, extra_wide_count); + out << "\n\nconst int kNumberOfBytecodeHandlers = " << single_count << ";\n" + << "const int kNumberOfWideBytecodeHandlers = " << wide_count << ";\n\n" + << "// Mapping from (Bytecode + OperandScaleAsIndex * |Bytecodes|) to\n" + << "// a dense form with all the illegal Bytecode/OperandScale\n" + << "// combinations removed. Used to index into the builtins table.\n" + << "constexpr int kBytecodeToBuiltinsMapping[" << kTableSize << "] = {\n" + << " "; + + for (int i = 0; i < kTableSize; ++i) { + if (i == single_count || i == 2 * single_count) { + out << "\n "; + } + out << offset_table[i] << ", "; + } + + out << "};\n\n" + << "} // namespace internal\n" + << "} // namespace v8\n" + << "#endif // V8_BUILTINS_GENERATED_BYTECODES_BUILTINS_LIST\n"; +} + +} // namespace interpreter +} // namespace internal +} // namespace v8 + +int main(int argc, const char* argv[]) { + if (argc != 2) { + std::cerr << "Usage: " << argv[0] << " \n"; + std::exit(1); + } + + v8::internal::interpreter::WriteHeader(argv[1]); + + return 0; +} diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc index 4707667bbfdac8..6c4b81008fe6e4 100644 --- a/deps/v8/src/builtins/ia32/builtins-ia32.cc +++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc @@ -22,6 +22,7 @@ namespace internal { void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address, ExitFrameType exit_frame_type) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); __ mov(kJavaScriptCallExtraArg1Register, Immediate(ExternalReference::Create(address))); if (exit_frame_type == BUILTIN_EXIT) { @@ -70,6 +71,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm, namespace { void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); // ----------- S t a t e ------------- // -- eax: number of arguments // -- edi: constructor function @@ -88,10 +90,10 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { __ SmiUntag(eax); // The receiver for the builtin/api call. - __ PushRoot(Heap::kTheHoleValueRootIndex); + __ PushRoot(RootIndex::kTheHoleValue); - // Set up pointer to last argument. - __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset)); + // Set up pointer to last argument. We are using esi as scratch register. + __ lea(esi, Operand(ebp, StandardFrameConstants::kCallerSPOffset)); // Copy arguments and receiver to the expression stack. Label loop, entry; @@ -100,7 +102,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // -- eax: number of arguments (untagged) // -- edi: constructor function // -- edx: new target - // -- ebx: pointer to last argument + // -- esi: pointer to last argument // -- ecx: counter // -- sp[0*kPointerSize]: the hole (receiver) // -- sp[1*kPointerSize]: number of arguments (tagged) @@ -108,7 +110,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // ----------------------------------- __ jmp(&entry); __ bind(&loop); - __ push(Operand(ebx, ecx, times_4, 0)); + __ push(Operand(esi, ecx, times_4, 0)); __ bind(&entry); __ dec(ecx); __ j(greater_equal, &loop); @@ -118,20 +120,22 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // edi: constructor function // edx: new target ParameterCount actual(eax); + // Reload context from the frame. + __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset)); __ InvokeFunction(edi, edx, actual, CALL_FUNCTION); // Restore context from the frame. __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset)); // Restore smi-tagged arguments count from the frame. - __ mov(ebx, Operand(ebp, ConstructFrameConstants::kLengthOffset)); + __ mov(edx, Operand(ebp, ConstructFrameConstants::kLengthOffset)); // Leave construct frame. } // Remove caller arguments from the stack and return. STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); - __ pop(ecx); - __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver - __ push(ecx); + __ PopReturnAddressTo(ecx); + __ lea(esp, Operand(esp, edx, times_2, 1 * kPointerSize)); // 1 ~ receiver + __ PushReturnAddressFrom(ecx); __ ret(0); } @@ -139,6 +143,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // The construct stub for ES5 constructor functions and ES6 class constructors. void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); // ----------- S t a t e ------------- // -- eax: number of arguments (untagged) // -- edi: constructor function @@ -158,7 +163,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ Push(esi); __ Push(ecx); __ Push(edi); - __ PushRoot(Heap::kTheHoleValueRootIndex); + __ PushRoot(RootIndex::kTheHoleValue); __ Push(edx); // ----------- S t a t e ------------- @@ -169,8 +174,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // -- sp[4*kPointerSize]: context // ----------------------------------- - __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); - __ test(FieldOperand(ebx, SharedFunctionInfo::kFlagsOffset), + __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ test(FieldOperand(eax, SharedFunctionInfo::kFlagsOffset), Immediate(SharedFunctionInfo::IsDerivedConstructorBit::kMask)); __ j(not_zero, ¬_create_implicit_receiver); @@ -182,7 +187,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Else: use TheHoleValue as receiver for constructor call __ bind(¬_create_implicit_receiver); - __ LoadRoot(eax, Heap::kTheHoleValueRootIndex); + __ LoadRoot(eax, RootIndex::kTheHoleValue); // ----------- S t a t e ------------- // -- eax: implicit receiver @@ -216,13 +221,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // -- sp[5*kPointerSize]: context // ----------------------------------- - // Restore constructor function and argument count. - __ mov(edi, Operand(ebp, ConstructFrameConstants::kConstructorOffset)); + // Restore argument count. __ mov(eax, Operand(ebp, ConstructFrameConstants::kLengthOffset)); __ SmiUntag(eax); // Set up pointer to last argument. - __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset)); + __ lea(edi, Operand(ebp, StandardFrameConstants::kCallerSPOffset)); // Copy arguments and receiver to the expression stack. Label loop, entry; @@ -230,23 +234,24 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- eax: number of arguments (untagged) // -- edx: new target - // -- ebx: pointer to last argument + // -- edi: pointer to last argument // -- ecx: counter (tagged) // -- sp[0*kPointerSize]: implicit receiver // -- sp[1*kPointerSize]: implicit receiver // -- sp[2*kPointerSize]: padding - // -- edi and sp[3*kPointerSize]: constructor function + // -- sp[3*kPointerSize]: constructor function // -- sp[4*kPointerSize]: number of arguments (tagged) // -- sp[5*kPointerSize]: context // ----------------------------------- __ jmp(&entry, Label::kNear); __ bind(&loop); - __ Push(Operand(ebx, ecx, times_pointer_size, 0)); + __ Push(Operand(edi, ecx, times_pointer_size, 0)); __ bind(&entry); __ dec(ecx); __ j(greater_equal, &loop); - // Call the function. + // Restore and and call the constructor function. + __ mov(edi, Operand(ebp, ConstructFrameConstants::kConstructorOffset)); ParameterCount actual(eax); __ InvokeFunction(edi, edx, actual, CALL_FUNCTION); @@ -272,8 +277,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { Label use_receiver, do_throw, leave_frame; // If the result is undefined, we jump out to using the implicit receiver. - __ JumpIfRoot(eax, Heap::kUndefinedValueRootIndex, &use_receiver, - Label::kNear); + __ JumpIfRoot(eax, RootIndex::kUndefinedValue, &use_receiver, Label::kNear); // Otherwise we do a smi check and fall through to check if the return value // is a valid receiver. @@ -295,55 +299,55 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // on-stack receiver as the result. __ bind(&use_receiver); __ mov(eax, Operand(esp, 0 * kPointerSize)); - __ JumpIfRoot(eax, Heap::kTheHoleValueRootIndex, &do_throw); + __ JumpIfRoot(eax, RootIndex::kTheHoleValue, &do_throw); __ bind(&leave_frame); // Restore smi-tagged arguments count from the frame. - __ mov(ebx, Operand(ebp, ConstructFrameConstants::kLengthOffset)); + __ mov(edx, Operand(ebp, ConstructFrameConstants::kLengthOffset)); // Leave construct frame. } // Remove caller arguments from the stack and return. STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); __ pop(ecx); - __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver + __ lea(esp, Operand(esp, edx, times_2, 1 * kPointerSize)); // 1 ~ receiver __ push(ecx); __ ret(0); } void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); Generate_JSBuiltinsConstructStubHelper(masm); } void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); FrameScope scope(masm, StackFrame::INTERNAL); __ push(edi); __ CallRuntime(Runtime::kThrowConstructedNonConstructable); } static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, - Register scratch1, Register scratch2, - Label* stack_overflow, + Register scratch, Label* stack_overflow, bool include_receiver = false) { // Check the stack for overflow. We are not trying to catch // interruptions (e.g. debug break and preemption) here, so the "real stack // limit" is checked. ExternalReference real_stack_limit = ExternalReference::address_of_real_stack_limit(masm->isolate()); - __ mov(scratch1, __ StaticVariable(real_stack_limit)); - // Make scratch2 the space we have left. The stack might already be overflowed - // here which will cause scratch2 to become negative. - __ mov(scratch2, esp); - __ sub(scratch2, scratch1); - // Make scratch1 the space we need for the array when it is unrolled onto the - // stack. - __ mov(scratch1, num_args); + // Compute the space that is left as a negative number in scratch. If + // we already overflowed, this will be a positive number. + __ mov(scratch, __ StaticVariable(real_stack_limit)); + __ sub(scratch, esp); + // Add the size of the arguments. + static_assert(kPointerSize == 4, + "The next instruction assumes kPointerSize == 4"); + __ lea(scratch, Operand(scratch, num_args, times_4, 0)); if (include_receiver) { - __ add(scratch1, Immediate(1)); + __ add(scratch, Immediate(kPointerSize)); } - __ shl(scratch1, kPointerSizeLog2); - // Check if the arguments will overflow the stack. - __ cmp(scratch2, scratch1); - __ j(less_equal, stack_overflow); // Signed comparison. + // See if we overflowed, i.e. scratch is positive. + __ cmp(scratch, Immediate(0)); + __ j(greater, stack_overflow); // Signed comparison. } static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, @@ -353,26 +357,29 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, { FrameScope scope(masm, StackFrame::INTERNAL); + const Register scratch1 = edx; + const Register scratch2 = edi; + // Setup the context (we need to use the caller context from the isolate). ExternalReference context_address = ExternalReference::Create( IsolateAddressId::kContextAddress, masm->isolate()); __ mov(esi, __ StaticVariable(context_address)); - // Load the previous frame pointer (ebx) to access C arguments - __ mov(ebx, Operand(ebp, 0)); + // Load the previous frame pointer (edx) to access C arguments + __ mov(scratch1, Operand(ebp, 0)); // Push the function and the receiver onto the stack. - __ push(Operand(ebx, EntryFrameConstants::kFunctionArgOffset)); - __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset)); + __ push(Operand(scratch1, EntryFrameConstants::kFunctionArgOffset)); + __ push(Operand(scratch1, EntryFrameConstants::kReceiverArgOffset)); // Load the number of arguments and setup pointer to the arguments. - __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset)); - __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset)); + __ mov(eax, Operand(scratch1, EntryFrameConstants::kArgcOffset)); + __ mov(scratch1, Operand(scratch1, EntryFrameConstants::kArgvOffset)); // Check if we have enough stack space to push all arguments. - // Argument count in eax. Clobbers ecx and edx. + // Argument count in eax. Clobbers ecx. Label enough_stack_space, stack_overflow; - Generate_StackOverflowCheck(masm, eax, ecx, edx, &stack_overflow); + Generate_StackOverflowCheck(masm, eax, ecx, &stack_overflow); __ jmp(&enough_stack_space); __ bind(&stack_overflow); @@ -387,19 +394,20 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ Move(ecx, Immediate(0)); __ jmp(&entry, Label::kNear); __ bind(&loop); - __ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv - __ push(Operand(edx, 0)); // dereference handle + // Push the parameter from argv. + __ mov(scratch2, Operand(scratch1, ecx, times_4, 0)); + __ push(Operand(scratch2, 0)); // dereference handle __ inc(ecx); __ bind(&entry); __ cmp(ecx, eax); __ j(not_equal, &loop); // Load the previous frame pointer (ebx) to access C arguments - __ mov(ebx, Operand(ebp, 0)); + __ mov(scratch2, Operand(ebp, 0)); // Get the new.target and function from the frame. - __ mov(edx, Operand(ebx, EntryFrameConstants::kNewTargetArgOffset)); - __ mov(edi, Operand(ebx, EntryFrameConstants::kFunctionArgOffset)); + __ mov(edx, Operand(scratch2, EntryFrameConstants::kNewTargetArgOffset)); + __ mov(edi, Operand(scratch2, EntryFrameConstants::kFunctionArgOffset)); // Invoke the code. Handle builtin = is_construct @@ -415,10 +423,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, } void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); Generate_JSEntryTrampolineHelper(masm, false); } void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); Generate_JSEntryTrampolineHelper(masm, true); } @@ -437,6 +447,8 @@ static void GetSharedFunctionInfoBytecode(MacroAssembler* masm, // static void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); + // ----------- S t a t e ------------- // -- eax : the value to pass to the generator // -- edx : the JSGeneratorObject to resume @@ -471,7 +483,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack limit". Label stack_overflow; - __ CompareRoot(esp, ecx, Heap::kRealStackLimitRootIndex); + __ CompareRoot(esp, ecx, RootIndex::kRealStackLimit); __ j(below, &stack_overflow); // Pop return address. @@ -488,26 +500,34 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // -- esp[0] : generator receiver // ----------------------------------- - // Copy the function arguments from the generator object's register file. - __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); - __ movzx_w( - ecx, FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset)); - __ mov(ebx, - FieldOperand(edx, JSGeneratorObject::kParametersAndRegistersOffset)); { - Label done_loop, loop; - __ Set(edi, 0); + Assembler::AllowExplicitEbxAccessScope root_is_spilled(masm); + __ movd(xmm0, ebx); - __ bind(&loop); - __ cmp(edi, ecx); - __ j(greater_equal, &done_loop); - __ Push( - FieldOperand(ebx, edi, times_pointer_size, FixedArray::kHeaderSize)); - __ add(edi, Immediate(1)); - __ jmp(&loop); + // Copy the function arguments from the generator object's register file. + __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ movzx_w(ecx, FieldOperand( + ecx, SharedFunctionInfo::kFormalParameterCountOffset)); + __ mov(ebx, + FieldOperand(edx, JSGeneratorObject::kParametersAndRegistersOffset)); + { + Label done_loop, loop; + __ Set(edi, 0); - __ bind(&done_loop); + __ bind(&loop); + __ cmp(edi, ecx); + __ j(greater_equal, &done_loop); + __ Push( + FieldOperand(ebx, edi, times_pointer_size, FixedArray::kHeaderSize)); + __ add(edi, Immediate(1)); + __ jmp(&loop); + + __ bind(&done_loop); + } + + // Restore registers. __ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset)); + __ movd(ebx, xmm0); } // Underlying function needs to have bytecode available. @@ -542,7 +562,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ Push(edx); __ Push(edi); // Push hole as receiver since we do not use it for stepping. - __ PushRoot(Heap::kTheHoleValueRootIndex); + __ PushRoot(RootIndex::kTheHoleValue); __ CallRuntime(Runtime::kDebugOnFunctionCall); __ Pop(edx); __ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset)); @@ -567,10 +587,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { } } -static void ReplaceClosureCodeWithOptimizedCode( - MacroAssembler* masm, Register optimized_code, Register closure, - Register scratch1, Register scratch2, Register scratch3) { - +static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, + Register optimized_code, + Register closure, + Register scratch1, + Register scratch2) { // Store the optimized code in the closure. __ mov(FieldOperand(closure, JSFunction::kCodeOffset), optimized_code); __ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below. @@ -611,21 +632,25 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, } static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, - Register feedback_vector, Register scratch) { // ----------- S t a t e ------------- // -- eax : argument count (preserved for callee if needed, and caller) // -- edx : new target (preserved for callee if needed, and caller) // -- edi : target function (preserved for callee if needed, and caller) - // -- feedback vector (preserved for caller if needed) // ----------------------------------- - DCHECK(!AreAliased(feedback_vector, eax, edx, edi, scratch)); + DCHECK(!AreAliased(eax, edx, edi, scratch)); Label optimized_code_slot_is_weak_ref, fallthrough; Register closure = edi; - Register optimized_code_entry = scratch; + // Load the feedback vector from the closure. + Register feedback_vector = scratch; + __ mov(feedback_vector, + FieldOperand(closure, JSFunction::kFeedbackCellOffset)); + __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset)); + // Load the optimized code from the feedback vector and re-use the register. + Register optimized_code_entry = scratch; __ mov(optimized_code_entry, FieldOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset)); @@ -686,10 +711,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, // Optimized code is good, get it into the closure and link the closure into // the optimized functions list, then tail call the optimized code. - // The feedback vector is no longer used, so re-use it as a scratch - // register. ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, - edx, eax, feedback_vector); + edx, eax); static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch"); __ Move(ecx, optimized_code_entry); __ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag)); @@ -716,15 +739,19 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, Register bytecode_array, Register bytecode_offset, - Register bytecode, Register scratch1, + Register scratch1, Register scratch2, Label* if_return) { Register bytecode_size_table = scratch1; + Register bytecode = scratch2; DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table, bytecode)); - __ Move(bytecode_size_table, Immediate(ExternalReference::bytecode_size_table_address())); + // Load the current bytecode. + __ movzx_b(bytecode, Operand(kInterpreterBytecodeArrayRegister, + kInterpreterBytecodeOffsetRegister, times_1, 0)); + // Check if the bytecode is a Wide or ExtraWide prefix bytecode. Label process_bytecode, extra_wide; STATIC_ASSERT(0 == static_cast(interpreter::Bytecode::kWide)); @@ -732,7 +759,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, STATIC_ASSERT(2 == static_cast(interpreter::Bytecode::kDebugBreakWide)); STATIC_ASSERT(3 == static_cast(interpreter::Bytecode::kDebugBreakExtraWide)); - __ cmpb(bytecode, Immediate(0x3)); + __ cmp(bytecode, Immediate(0x3)); __ j(above, &process_bytecode, Label::kNear); __ test(bytecode, Immediate(0x1)); __ j(not_equal, &extra_wide, Label::kNear); @@ -754,9 +781,9 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, __ bind(&process_bytecode); // Bailout to the return label if this is a return bytecode. -#define JUMP_IF_EQUAL(NAME) \ - __ cmpb(bytecode, \ - Immediate(static_cast(interpreter::Bytecode::k##NAME))); \ +#define JUMP_IF_EQUAL(NAME) \ + __ cmp(bytecode, \ + Immediate(static_cast(interpreter::Bytecode::k##NAME))); \ __ j(equal, if_return); RETURN_BYTECODE_LIST(JUMP_IF_EQUAL) #undef JUMP_IF_EQUAL @@ -780,18 +807,23 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, // The function builds an interpreter frame. See InterpreterFrameConstants in // frames.h for its layout. void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); ProfileEntryHookStub::MaybeCallEntryHook(masm); + __ VerifyRootRegister(); + Register closure = edi; - Register feedback_vector = ebx; - // Load the feedback vector from the closure. + // Read off the optimized code slot in the closure's feedback vector, and if + // there is optimized code or an optimization marker, call that instead. + MaybeTailCallOptimizedCodeSlot(masm, ecx); + + // Load the feedback vector and increment the invocation count. + Register feedback_vector = ecx; __ mov(feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset)); __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset)); - // Read off the optimized code slot in the feedback vector, and if there - // is optimized code or an optimization marker, call that instead. - MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx); + __ inc(FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset)); // Open a frame scope to indicate that there is a frame on the stack. The // MANUAL indicates that the scope shouldn't actually generate code to set @@ -811,8 +843,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, eax); __ Pop(eax); - __ inc(FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset)); - // Check function data field is actually a BytecodeArray object. if (FLAG_debug_code) { __ AssertNotSmi(kInterpreterBytecodeArrayRegister); @@ -836,16 +866,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Allocate the local and temporary register file on the stack. { // Load frame size from the BytecodeArray object. - __ mov(ebx, FieldOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kFrameSizeOffset)); + Register frame_size = ecx; + __ mov(frame_size, FieldOperand(kInterpreterBytecodeArrayRegister, + BytecodeArray::kFrameSizeOffset)); // Do a stack check to ensure we don't go over the limit. Label ok; - __ mov(ecx, esp); - __ sub(ecx, ebx); + __ mov(eax, esp); + __ sub(eax, frame_size); ExternalReference stack_limit = ExternalReference::address_of_real_stack_limit(masm->isolate()); - __ cmp(ecx, __ StaticVariable(stack_limit)); + __ cmp(eax, __ StaticVariable(stack_limit)); __ j(above_equal, &ok); __ CallRuntime(Runtime::kThrowStackOverflow); __ bind(&ok); @@ -860,7 +891,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ push(eax); // Continue loop if not done. __ bind(&loop_check); - __ sub(ebx, Immediate(kPointerSize)); + __ sub(frame_size, Immediate(kPointerSize)); __ j(greater_equal, &loop_header); } @@ -876,7 +907,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ bind(&no_incoming_new_target_or_generator_register); // Load accumulator and bytecode offset into registers. - __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); __ mov(kInterpreterBytecodeOffsetRegister, Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag)); @@ -887,11 +918,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ mov(kInterpreterDispatchTableRegister, Immediate(ExternalReference::interpreter_dispatch_table_address( masm->isolate()))); - __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister, + __ movzx_b(ecx, Operand(kInterpreterBytecodeArrayRegister, kInterpreterBytecodeOffsetRegister, times_1, 0)); __ mov( kJavaScriptCallCodeStartRegister, - Operand(kInterpreterDispatchTableRegister, ebx, times_pointer_size, 0)); + Operand(kInterpreterDispatchTableRegister, ecx, times_pointer_size, 0)); + __ VerifyRootRegister(); __ call(kJavaScriptCallCodeStartRegister); masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset()); @@ -907,16 +939,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Either return, or advance to the next bytecode and dispatch. Label do_return; - __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister, - kInterpreterBytecodeOffsetRegister, times_1, 0)); AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, - kInterpreterBytecodeOffsetRegister, ebx, ecx, - &do_return); + kInterpreterBytecodeOffsetRegister, ecx, + kInterpreterDispatchTableRegister, &do_return); __ jmp(&do_dispatch); __ bind(&do_return); // The return value is in eax. - LeaveInterpreterFrame(masm, ebx, ecx); + LeaveInterpreterFrame(masm, edx, ecx); + __ VerifyRootRegister(); __ ret(0); } @@ -924,6 +955,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register array_limit, Register start_address) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); // ----------- S t a t e ------------- // -- start_address : Pointer to the last argument in the args array. // -- array_limit : Pointer to one before the first argument in the @@ -943,62 +975,62 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm, void Builtins::Generate_InterpreterPushArgsThenCallImpl( MacroAssembler* masm, ConvertReceiverMode receiver_mode, InterpreterPushArgsMode mode) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); DCHECK(mode != InterpreterPushArgsMode::kArrayFunction); // ----------- S t a t e ------------- // -- eax : the number of arguments (not including the receiver) - // -- ebx : the address of the first argument to be pushed. Subsequent + // -- ecx : the address of the first argument to be pushed. Subsequent // arguments should be consecutive above this, in the same order as // they are to be pushed onto the stack. // -- edi : the target to call (can be any Object). // ----------------------------------- + + const Register scratch = edx; + const Register argv = ecx; + Label stack_overflow; - // Compute the expected number of arguments. - __ mov(ecx, eax); - __ add(ecx, Immediate(1)); // Add one for receiver. + // Add a stack check before pushing the arguments. + Generate_StackOverflowCheck(masm, eax, scratch, &stack_overflow, true); - // Add a stack check before pushing the arguments. We need an extra register - // to perform a stack check. So push it onto the stack temporarily. This - // might cause stack overflow, but it will be detected by the check. - __ Push(edi); - Generate_StackOverflowCheck(masm, ecx, edx, edi, &stack_overflow); - __ Pop(edi); + __ movd(xmm0, eax); // Spill number of arguments. + + // Compute the expected number of arguments. + __ mov(scratch, eax); + __ add(scratch, Immediate(1)); // Add one for receiver. // Pop return address to allow tail-call after pushing arguments. - __ Pop(edx); + __ PopReturnAddressTo(eax); // Push "undefined" as the receiver arg if we need to. if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { - __ PushRoot(Heap::kUndefinedValueRootIndex); - __ sub(ecx, Immediate(1)); // Subtract one for receiver. + __ PushRoot(RootIndex::kUndefinedValue); + __ sub(scratch, Immediate(1)); // Subtract one for receiver. } // Find the address of the last argument. - __ shl(ecx, kPointerSizeLog2); - __ neg(ecx); - __ add(ecx, ebx); - Generate_InterpreterPushArgs(masm, ecx, ebx); - - if (mode == InterpreterPushArgsMode::kWithFinalSpread) { - __ Pop(ebx); // Pass the spread in a register - __ sub(eax, Immediate(1)); // Subtract one for spread - } + __ shl(scratch, kPointerSizeLog2); + __ neg(scratch); + __ add(scratch, argv); + Generate_InterpreterPushArgs(masm, scratch, argv); // Call the target. - __ Push(edx); // Re-push return address. if (mode == InterpreterPushArgsMode::kWithFinalSpread) { + __ Pop(ecx); // Pass the spread in a register + __ PushReturnAddressFrom(eax); + __ movd(eax, xmm0); // Restore number of arguments. + __ sub(eax, Immediate(1)); // Subtract one for spread __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread), RelocInfo::CODE_TARGET); } else { + __ PushReturnAddressFrom(eax); + __ movd(eax, xmm0); // Restore number of arguments. __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny), RelocInfo::CODE_TARGET); } __ bind(&stack_overflow); { - // Pop the temporary registers, so that return address is on top of stack. - __ Pop(edi); - __ TailCallRuntime(Runtime::kThrowStackOverflow); // This should be unreachable. @@ -1008,44 +1040,40 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( namespace { -// This function modified start_addr, and only reads the contents of num_args -// register. scratch1 and scratch2 are used as temporary registers. Their -// original values are restored after the use. +// This function modifies start_addr, and only reads the contents of num_args +// register. scratch1 and scratch2 are used as temporary registers. void Generate_InterpreterPushZeroAndArgsAndReturnAddress( MacroAssembler* masm, Register num_args, Register start_addr, - Register scratch1, Register scratch2, int num_slots_above_ret_addr, + Register scratch1, Register scratch2, int num_slots_to_move, Label* stack_overflow) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); // We have to move return address and the temporary registers above it // before we can copy arguments onto the stack. To achieve this: // Step 1: Increment the stack pointer by num_args + 1 (for receiver). - // Step 2: Move the return address and values above it to the top of stack. + // Step 2: Move the return address and values around it to the top of stack. // Step 3: Copy the arguments into the correct locations. // current stack =====> required stack layout - // | | | scratch1 | (2) <-- esp(1) - // | | | .... | (2) - // | | | scratch-n | (2) - // | | | return addr | (2) + // | | | return addr | (2) <-- esp (1) + // | | | addtl. slot | // | | | arg N | (3) - // | scratch1 | <-- esp | .... | - // | .... | | arg 1 | - // | scratch-n | | arg 0 | - // | return addr | | receiver slot | + // | | | .... | + // | | | arg 1 | + // | return addr | <-- esp | arg 0 | + // | addtl. slot | | receiver slot | // Check for stack overflow before we increment the stack pointer. - Generate_StackOverflowCheck(masm, num_args, scratch1, scratch2, - stack_overflow, true); + Generate_StackOverflowCheck(masm, num_args, scratch1, stack_overflow, true); - // Step 1 - Update the stack pointer. scratch1 already contains the required - // increment to the stack. i.e. num_args + 1 stack slots. This is computed in - // Generate_StackOverflowCheck. + // Step 1 - Update the stack pointer. + __ lea(scratch1, Operand(num_args, times_4, kPointerSize)); __ AllocateStackFrame(scratch1); - // Step 2 move return_address and slots above it to the correct locations. + // Step 2 move return_address and slots around it to the correct locations. // Move from top to bottom, otherwise we may overwrite when num_args = 0 or 1, // basically when the source and destination overlap. We at least need one // extra slot for receiver, so no extra checks are required to avoid copy. - for (int i = 0; i < num_slots_above_ret_addr + 1; i++) { + for (int i = 0; i < num_slots_to_move + 1; i++) { __ mov(scratch1, Operand(esp, num_args, times_pointer_size, (i + 1) * kPointerSize)); __ mov(Operand(esp, i * kPointerSize), scratch1); @@ -1055,7 +1083,7 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress( // Slot meant for receiver contains return address. Reset it so that // we will not incorrectly interpret return address as an object. __ mov(Operand(esp, num_args, times_pointer_size, - (num_slots_above_ret_addr + 1) * kPointerSize), + (num_slots_to_move + 1) * kPointerSize), Immediate(0)); __ mov(scratch1, num_args); @@ -1064,7 +1092,7 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress( __ bind(&loop_header); __ mov(scratch2, Operand(start_addr, 0)); __ mov(Operand(esp, scratch1, times_pointer_size, - num_slots_above_ret_addr * kPointerSize), + num_slots_to_move * kPointerSize), scratch2); __ sub(start_addr, Immediate(kPointerSize)); __ sub(scratch1, Immediate(1)); @@ -1078,69 +1106,74 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress( // static void Builtins::Generate_InterpreterPushArgsThenConstructImpl( MacroAssembler* masm, InterpreterPushArgsMode mode) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); // ----------- S t a t e ------------- - // -- eax : the number of arguments (not including the receiver) - // -- edx : the new target - // -- edi : the constructor - // -- ebx : allocation site feedback (if available or undefined) - // -- ecx : the address of the first argument to be pushed. Subsequent - // arguments should be consecutive above this, in the same order as - // they are to be pushed onto the stack. + // -- eax : the number of arguments (not including the receiver) + // -- ecx : the address of the first argument to be pushed. Subsequent + // arguments should be consecutive above this, in the same order + // as they are to be pushed onto the stack. + // -- esp[0] : return address + // -- esp[4] : allocation site feedback (if available or undefined) + // -- esp[8] : the new target + // -- esp[12] : the constructor // ----------------------------------- + Label stack_overflow; - // We need two scratch registers. Push edi and edx onto stack. - __ Push(edi); - __ Push(edx); - // Push arguments and move return address to the top of stack. - // The eax register is readonly. The ecx register will be modified. The edx - // and edi registers will be modified but restored to their original values. - Generate_InterpreterPushZeroAndArgsAndReturnAddress(masm, eax, ecx, edx, edi, - 2, &stack_overflow); + // Push arguments and move return address and stack spill slots to the top of + // stack. The eax register is readonly. The ecx register will be modified. edx + // and edi are used as scratch registers. + Generate_InterpreterPushZeroAndArgsAndReturnAddress( + masm, eax, ecx, edx, edi, + InterpreterPushArgsThenConstructDescriptor::kStackArgumentsCount, + &stack_overflow); - // Restore edi and edx - __ Pop(edx); - __ Pop(edi); - - if (mode == InterpreterPushArgsMode::kWithFinalSpread) { - __ PopReturnAddressTo(ecx); - __ Pop(ebx); // Pass the spread in a register - __ PushReturnAddressFrom(ecx); - __ sub(eax, Immediate(1)); // Subtract one for spread - } else { - __ AssertUndefinedOrAllocationSite(ebx); - } + // Call the appropriate constructor. eax and ecx already contain intended + // values, remaining registers still need to be initialized from the stack. if (mode == InterpreterPushArgsMode::kArrayFunction) { - // Tail call to the array construct stub (still in the caller - // context at this point). - __ AssertFunction(edi); - // TODO(v8:6666): When rewriting ia32 ASM builtins to not clobber the - // kRootRegister ebx, this useless move can be removed. - __ Move(kJavaScriptCallExtraArg1Register, ebx); - Handle code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl); - __ Jump(code, RelocInfo::CODE_TARGET); + // Tail call to the array construct stub (still in the caller context at + // this point). + + __ movd(xmm0, eax); // Spill number of arguments. + __ PopReturnAddressTo(eax); + __ Pop(kJavaScriptCallExtraArg1Register); + __ Pop(kJavaScriptCallNewTargetRegister); + __ Pop(kJavaScriptCallTargetRegister); + __ PushReturnAddressFrom(eax); + __ movd(eax, xmm0); // Reload number of arguments. + + __ AssertFunction(kJavaScriptCallTargetRegister); + __ AssertUndefinedOrAllocationSite(kJavaScriptCallExtraArg1Register); + __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl), + RelocInfo::CODE_TARGET); } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) { - // Call the constructor with unmodified eax, edi, edx values. + __ movd(xmm0, eax); // Spill number of arguments. + __ PopReturnAddressTo(eax); + __ Drop(1); // The allocation site is unused. + __ Pop(kJavaScriptCallNewTargetRegister); + __ Pop(kJavaScriptCallTargetRegister); + __ Pop(ecx); // Pop the spread (i.e. the first argument), overwriting ecx. + __ PushReturnAddressFrom(eax); + __ movd(eax, xmm0); // Reload number of arguments. + __ sub(eax, Immediate(1)); // The actual argc thus decrements by one. + __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread), RelocInfo::CODE_TARGET); } else { DCHECK_EQ(InterpreterPushArgsMode::kOther, mode); - // Call the constructor with unmodified eax, edi, edx values. + __ PopReturnAddressTo(ecx); + __ Drop(1); // The allocation site is unused. + __ Pop(kJavaScriptCallNewTargetRegister); + __ Pop(kJavaScriptCallTargetRegister); + __ PushReturnAddressFrom(ecx); + __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); } __ bind(&stack_overflow); - { - // Pop the temporary registers, so that return address is on top of stack. - __ Pop(edx); - __ Pop(edi); - - __ TailCallRuntime(Runtime::kThrowStackOverflow); - - // This should be unreachable. - __ int3(); - } + __ TailCallRuntime(Runtime::kThrowStackOverflow); + __ int3(); } static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { @@ -1151,26 +1184,30 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { masm->isolate()->heap()->interpreter_entry_return_pc_offset()); DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero); + static constexpr Register scratch = ecx; + // If the SFI function_data is an InterpreterData, get the trampoline stored // in it, otherwise get the trampoline from the builtins list. - __ mov(ebx, Operand(ebp, StandardFrameConstants::kFunctionOffset)); - __ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset)); - __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset)); + __ mov(scratch, Operand(ebp, StandardFrameConstants::kFunctionOffset)); + __ mov(scratch, FieldOperand(scratch, JSFunction::kSharedFunctionInfoOffset)); + __ mov(scratch, + FieldOperand(scratch, SharedFunctionInfo::kFunctionDataOffset)); __ Push(eax); - __ CmpObjectType(ebx, INTERPRETER_DATA_TYPE, eax); + __ CmpObjectType(scratch, INTERPRETER_DATA_TYPE, eax); __ j(not_equal, &builtin_trampoline, Label::kNear); - __ mov(ebx, FieldOperand(ebx, InterpreterData::kInterpreterTrampolineOffset)); + __ mov(scratch, + FieldOperand(scratch, InterpreterData::kInterpreterTrampolineOffset)); __ jmp(&trampoline_loaded, Label::kNear); __ bind(&builtin_trampoline); - __ Move(ebx, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline)); + __ Move(scratch, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline)); __ bind(&trampoline_loaded); __ Pop(eax); - __ add(ebx, Immediate(interpreter_entry_return_pc_offset->value() + - Code::kHeaderSize - kHeapObjectTag)); - __ push(ebx); + __ add(scratch, Immediate(interpreter_entry_return_pc_offset->value() + + Code::kHeaderSize - kHeapObjectTag)); + __ push(scratch); // Initialize the dispatch table register. __ mov(kInterpreterDispatchTableRegister, @@ -1185,7 +1222,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { // Check function data field is actually a BytecodeArray object. __ AssertNotSmi(kInterpreterBytecodeArrayRegister); __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE, - ebx); + scratch); __ Assert( equal, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); @@ -1197,15 +1234,17 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ SmiUntag(kInterpreterBytecodeOffsetRegister); // Dispatch to the target bytecode. - __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister, - kInterpreterBytecodeOffsetRegister, times_1, 0)); - __ mov( - kJavaScriptCallCodeStartRegister, - Operand(kInterpreterDispatchTableRegister, ebx, times_pointer_size, 0)); + __ movzx_b(scratch, Operand(kInterpreterBytecodeArrayRegister, + kInterpreterBytecodeOffsetRegister, times_1, 0)); + __ mov(kJavaScriptCallCodeStartRegister, + Operand(kInterpreterDispatchTableRegister, scratch, times_pointer_size, + 0)); __ jmp(kJavaScriptCallCodeStartRegister); } void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); + // Get bytecode array and bytecode offset from the stack frame. __ mov(kInterpreterBytecodeArrayRegister, Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp)); @@ -1213,20 +1252,16 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); __ SmiUntag(kInterpreterBytecodeOffsetRegister); - // Load the current bytecode - __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister, - kInterpreterBytecodeOffsetRegister, times_1, 0)); - // Advance to the next bytecode. Label if_return; AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, - kInterpreterBytecodeOffsetRegister, ebx, ecx, + kInterpreterBytecodeOffsetRegister, ecx, esi, &if_return); // Convert new bytecode offset to a Smi and save in the stackframe. - __ mov(ebx, kInterpreterBytecodeOffsetRegister); - __ SmiTag(ebx); - __ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), ebx); + __ mov(ecx, kInterpreterBytecodeOffsetRegister); + __ SmiTag(ecx); + __ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), ecx); Generate_InterpreterEnterBytecode(masm); @@ -1236,10 +1271,13 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { } void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); Generate_InterpreterEnterBytecode(masm); } void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); + // ----------- S t a t e ------------- // -- eax : argument count (preserved for callee) // -- edx : new target (preserved for callee) @@ -1272,7 +1310,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { ebp, StandardFrameConstants::kCallerSPOffset + i * kPointerSize)); } for (int i = 0; i < 3 - j; ++i) { - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); } if (j < 3) { __ jmp(&args_done, Label::kNear); @@ -1291,10 +1329,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { __ SmiUntag(ecx); scope.GenerateLeaveFrame(); - __ PopReturnAddressTo(ebx); + __ PopReturnAddressTo(edx); __ inc(ecx); __ lea(esp, Operand(esp, ecx, times_pointer_size, 0)); - __ PushReturnAddressFrom(ebx); + __ PushReturnAddressFrom(edx); __ ret(0); __ bind(&failed); @@ -1316,7 +1354,13 @@ namespace { void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, bool java_script_builtin, bool with_result) { +#ifdef V8_EMBEDDED_BUILTINS + // TODO(v8:6666): Fold into Default config once root is fully supported. + const RegisterConfiguration* config( + RegisterConfiguration::PreserveRootIA32()); +#else const RegisterConfiguration* config(RegisterConfiguration::Default()); +#endif int allocatable_register_count = config->num_allocatable_general_registers(); if (with_result) { // Overwrite the hole inserted by the deoptimizer with the return value from @@ -1346,24 +1390,42 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, } // namespace void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) { +#ifdef V8_EMBEDDED_BUILTINS + // TODO(v8:6666): Remove the ifdef once root is preserved by default. + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif Generate_ContinueToBuiltinHelper(masm, false, false); } void Builtins::Generate_ContinueToCodeStubBuiltinWithResult( MacroAssembler* masm) { +#ifdef V8_EMBEDDED_BUILTINS + // TODO(v8:6666): Remove the ifdef once root is preserved by default. + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif Generate_ContinueToBuiltinHelper(masm, false, true); } void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) { +#ifdef V8_EMBEDDED_BUILTINS + // TODO(v8:6666): Remove the ifdef once root is preserved by default. + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif Generate_ContinueToBuiltinHelper(masm, true, false); } void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult( MacroAssembler* masm) { +#ifdef V8_EMBEDDED_BUILTINS + // TODO(v8:6666): Remove the ifdef once root is preserved by default. + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif Generate_ContinueToBuiltinHelper(masm, true, true); } void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); + { FrameScope scope(masm, StackFrame::INTERNAL); __ CallRuntime(Runtime::kNotifyDeoptimized); @@ -1377,6 +1439,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { // static void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); + // ----------- S t a t e ------------- // -- eax : argc // -- esp[0] : return address @@ -1385,32 +1449,37 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // -- esp[12] : receiver // ----------------------------------- - // 1. Load receiver into edi, argArray into ebx (if present), remove all + // 1. Load receiver into xmm0, argArray into edx (if present), remove all // arguments from the stack (including the receiver), and push thisArg (if // present) instead. { Label no_arg_array, no_this_arg; - __ LoadRoot(edx, Heap::kUndefinedValueRootIndex); - __ mov(ebx, edx); - __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize)); + // Spill receiver to allow the usage of edi as a scratch register. + __ movd(xmm0, Operand(esp, eax, times_pointer_size, kPointerSize)); + + __ LoadRoot(edx, RootIndex::kUndefinedValue); + __ mov(edi, edx); __ test(eax, eax); __ j(zero, &no_this_arg, Label::kNear); { - __ mov(edx, Operand(esp, eax, times_pointer_size, 0)); + __ mov(edi, Operand(esp, eax, times_pointer_size, 0)); __ cmp(eax, Immediate(1)); __ j(equal, &no_arg_array, Label::kNear); - __ mov(ebx, Operand(esp, eax, times_pointer_size, -kPointerSize)); + __ mov(edx, Operand(esp, eax, times_pointer_size, -kPointerSize)); __ bind(&no_arg_array); } __ bind(&no_this_arg); __ PopReturnAddressTo(ecx); __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize)); - __ Push(edx); + __ Push(edi); __ PushReturnAddressFrom(ecx); + + // Restore receiver to edi. + __ movd(edi, xmm0); } // ----------- S t a t e ------------- - // -- ebx : argArray + // -- edx : argArray // -- edi : receiver // -- esp[0] : return address // -- esp[4] : thisArg @@ -1422,9 +1491,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // 3. Tail call with no arguments if argArray is null or undefined. Label no_arguments; - __ JumpIfRoot(ebx, Heap::kNullValueRootIndex, &no_arguments, Label::kNear); - __ JumpIfRoot(ebx, Heap::kUndefinedValueRootIndex, &no_arguments, - Label::kNear); + __ JumpIfRoot(edx, RootIndex::kNullValue, &no_arguments, Label::kNear); + __ JumpIfRoot(edx, RootIndex::kUndefinedValue, &no_arguments, Label::kNear); // 4a. Apply the receiver to the given argArray. __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), @@ -1441,6 +1509,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // static void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); + // Stack Layout: // esp[0] : Return address // esp[8] : Argument n @@ -1456,9 +1526,9 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { Label done; __ test(eax, eax); __ j(not_zero, &done, Label::kNear); - __ PopReturnAddressTo(ebx); - __ PushRoot(Heap::kUndefinedValueRootIndex); - __ PushReturnAddressFrom(ebx); + __ PopReturnAddressTo(edx); + __ PushRoot(RootIndex::kUndefinedValue); + __ PushReturnAddressFrom(edx); __ inc(eax); __ bind(&done); } @@ -1473,11 +1543,11 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { Label loop; __ mov(ecx, eax); __ bind(&loop); - __ mov(ebx, Operand(esp, ecx, times_pointer_size, 0)); - __ mov(Operand(esp, ecx, times_pointer_size, kPointerSize), ebx); + __ mov(edx, Operand(esp, ecx, times_pointer_size, 0)); + __ mov(Operand(esp, ecx, times_pointer_size, kPointerSize), edx); __ dec(ecx); __ j(not_sign, &loop); // While non-negative (to copy return address). - __ pop(ebx); // Discard copy of return address. + __ pop(edx); // Discard copy of return address. __ dec(eax); // One fewer argument (first argument is new receiver). } @@ -1486,6 +1556,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { } void Builtins::Generate_ReflectApply(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); + // ----------- S t a t e ------------- // -- eax : argc // -- esp[0] : return address @@ -1495,31 +1567,38 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { // -- esp[16] : receiver // ----------------------------------- - // 1. Load target into edi (if present), argumentsList into ebx (if present), + // 1. Load target into edi (if present), argumentsList into edx (if present), // remove all arguments from the stack (including the receiver), and push // thisArgument (if present) instead. { Label done; - __ LoadRoot(edi, Heap::kUndefinedValueRootIndex); + __ LoadRoot(edi, RootIndex::kUndefinedValue); __ mov(edx, edi); - __ mov(ebx, edi); + __ mov(ecx, edi); __ cmp(eax, Immediate(1)); __ j(below, &done, Label::kNear); __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize)); __ j(equal, &done, Label::kNear); - __ mov(edx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize)); + __ mov(ecx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize)); __ cmp(eax, Immediate(3)); __ j(below, &done, Label::kNear); - __ mov(ebx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize)); + __ mov(edx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize)); __ bind(&done); - __ PopReturnAddressTo(ecx); + + // Spill argumentsList to use edx as a scratch register. + __ movd(xmm0, edx); + + __ PopReturnAddressTo(edx); __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize)); - __ Push(edx); - __ PushReturnAddressFrom(ecx); + __ Push(ecx); + __ PushReturnAddressFrom(edx); + + // Restore argumentsList. + __ movd(edx, xmm0); } // ----------- S t a t e ------------- - // -- ebx : argumentsList + // -- edx : argumentsList // -- edi : target // -- esp[0] : return address // -- esp[4] : thisArgument @@ -1535,6 +1614,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { } void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); + // ----------- S t a t e ------------- // -- eax : argc // -- esp[0] : return address @@ -1544,33 +1625,40 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { // -- esp[16] : receiver // ----------------------------------- - // 1. Load target into edi (if present), argumentsList into ebx (if present), + // 1. Load target into edi (if present), argumentsList into ecx (if present), // new.target into edx (if present, otherwise use target), remove all // arguments from the stack (including the receiver), and push thisArgument // (if present) instead. { Label done; - __ LoadRoot(edi, Heap::kUndefinedValueRootIndex); + __ LoadRoot(edi, RootIndex::kUndefinedValue); __ mov(edx, edi); - __ mov(ebx, edi); + __ mov(ecx, edi); __ cmp(eax, Immediate(1)); __ j(below, &done, Label::kNear); __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize)); __ mov(edx, edi); __ j(equal, &done, Label::kNear); - __ mov(ebx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize)); + __ mov(ecx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize)); __ cmp(eax, Immediate(3)); __ j(below, &done, Label::kNear); __ mov(edx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize)); __ bind(&done); + + // Spill argumentsList to use ecx as a scratch register. + __ movd(xmm0, ecx); + __ PopReturnAddressTo(ecx); __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize)); - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); __ PushReturnAddressFrom(ecx); + + // Restore argumentsList. + __ movd(ecx, xmm0); } // ----------- S t a t e ------------- - // -- ebx : argumentsList + // -- ecx : argumentsList // -- edx : new.target // -- edi : target // -- esp[0] : return address @@ -1591,6 +1679,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { } void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); + // ----------- S t a t e ------------- // -- eax : argc // -- esp[0] : return address @@ -1600,19 +1690,18 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { if (FLAG_debug_code) { // Initial map for the builtin InternalArray function should be a map. - __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); + __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); // Will both indicate a nullptr and a Smi. - __ test(ebx, Immediate(kSmiTagMask)); + __ test(ecx, Immediate(kSmiTagMask)); __ Assert(not_zero, AbortReason::kUnexpectedInitialMapForInternalArrayFunction); - __ CmpObjectType(ebx, MAP_TYPE, ecx); + __ CmpObjectType(ecx, MAP_TYPE, ecx); __ Assert(equal, AbortReason::kUnexpectedInitialMapForInternalArrayFunction); } // Run the native code for the InternalArray function called as a normal // function. - __ mov(ebx, masm->isolate()->factory()->undefined_value()); __ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl), RelocInfo::CODE_TARGET); } @@ -1639,45 +1728,57 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { // Retrieve the number of arguments from the stack. - __ mov(ebx, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ mov(edi, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset)); // Leave the frame. __ leave(); // Remove caller arguments from the stack. STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); - __ pop(ecx); - __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver - __ push(ecx); + __ PopReturnAddressTo(ecx); + __ lea(esp, Operand(esp, edi, times_2, 1 * kPointerSize)); // 1 ~ receiver + __ PushReturnAddressFrom(ecx); } // static void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Handle code) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); // ----------- S t a t e ------------- // -- edi : target + // -- esi : context for the Call / Construct builtin // -- eax : number of parameters on the stack (not including the receiver) - // -- ebx : arguments list (a FixedArray) // -- ecx : len (number of elements to from args) - // -- edx : new.target (checked to be constructor or undefined) + // -- ecx : new.target (checked to be constructor or undefined) + // -- esp[4] : arguments list (a FixedArray) // -- esp[0] : return address. // ----------------------------------- - // We need to preserve eax, edi and ebx. + // We need to preserve eax, edi, esi and ebx. __ movd(xmm0, edx); __ movd(xmm1, edi); __ movd(xmm2, eax); + __ movd(xmm3, esi); // Spill the context. + + // TODO(v8:6666): Remove this usage of ebx to enable kRootRegister support. + const Register kArgumentsList = esi; + const Register kArgumentsLength = ecx; + + __ PopReturnAddressTo(edx); + __ pop(kArgumentsList); + __ PushReturnAddressFrom(edx); if (masm->emit_debug_code()) { - // Allow ebx to be a FixedArray, or a FixedDoubleArray if ecx == 0. + // Allow kArgumentsList to be a FixedArray, or a FixedDoubleArray if + // kArgumentsLength == 0. Label ok, fail; - __ AssertNotSmi(ebx); - __ mov(edx, FieldOperand(ebx, HeapObject::kMapOffset)); + __ AssertNotSmi(kArgumentsList); + __ mov(edx, FieldOperand(kArgumentsList, HeapObject::kMapOffset)); __ CmpInstanceType(edx, FIXED_ARRAY_TYPE); __ j(equal, &ok); __ CmpInstanceType(edx, FIXED_DOUBLE_ARRAY_TYPE); __ j(not_equal, &fail); - __ cmp(ecx, 0); + __ cmp(kArgumentsLength, 0); __ j(equal, &ok); // Fall through. __ bind(&fail); @@ -1686,25 +1787,10 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, __ bind(&ok); } - // Check for stack overflow. - { - // Check the stack for overflow. We are not trying to catch interruptions - // (i.e. debug break and preemption) here, so check the "real stack limit". - Label done; - ExternalReference real_stack_limit = - ExternalReference::address_of_real_stack_limit(masm->isolate()); - __ mov(edx, __ StaticVariable(real_stack_limit)); - // Make edx the space we have left. The stack might already be overflowed - // here which will cause edx to become negative. - __ neg(edx); - __ add(edx, esp); - __ sar(edx, kPointerSizeLog2); - // Check if the arguments will overflow the stack. - __ cmp(edx, ecx); - __ j(greater, &done, Label::kNear); // Signed comparison. - __ TailCallRuntime(Runtime::kThrowStackOverflow); - __ bind(&done); - } + // Check the stack for overflow. We are not trying to catch interruptions + // (i.e. debug break and preemption) here, so check the "real stack limit". + Label stack_overflow; + Generate_StackOverflowCheck(masm, kArgumentsLength, edx, &stack_overflow); // Push additional arguments onto the stack. { @@ -1712,14 +1798,14 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, __ Move(eax, Immediate(0)); Label done, push, loop; __ bind(&loop); - __ cmp(eax, ecx); + __ cmp(eax, kArgumentsLength); __ j(equal, &done, Label::kNear); // Turn the hole into undefined as we go. - __ mov(edi, - FieldOperand(ebx, eax, times_pointer_size, FixedArray::kHeaderSize)); - __ CompareRoot(edi, Heap::kTheHoleValueRootIndex); + __ mov(edi, FieldOperand(kArgumentsList, eax, times_pointer_size, + FixedArray::kHeaderSize)); + __ CompareRoot(edi, RootIndex::kTheHoleValue); __ j(not_equal, &push, Label::kNear); - __ LoadRoot(edi, Heap::kUndefinedValueRootIndex); + __ LoadRoot(edi, RootIndex::kUndefinedValue); __ bind(&push); __ Push(edi); __ inc(eax); @@ -1729,34 +1815,45 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, } // Restore eax, edi and edx. + __ movd(esi, xmm3); // Restore the context. __ movd(eax, xmm2); __ movd(edi, xmm1); __ movd(edx, xmm0); // Compute the actual parameter count. - __ add(eax, ecx); + __ add(eax, kArgumentsLength); // Tail-call to the actual Call or Construct builtin. __ Jump(code, RelocInfo::CODE_TARGET); + + __ bind(&stack_overflow); + __ movd(esi, xmm3); // Restore the context. + __ TailCallRuntime(Runtime::kThrowStackOverflow); } // static void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, CallOrConstructMode mode, Handle code) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); // ----------- S t a t e ------------- // -- eax : the number of arguments (not including the receiver) // -- edi : the target to call (can be any Object) + // -- esi : context for the Call / Construct builtin // -- edx : the new target (for [[Construct]] calls) // -- ecx : start index (to support rest parameters) // ----------------------------------- + __ movd(xmm0, esi); // Spill the context. + + Register scratch = esi; + // Check if new.target has a [[Construct]] internal method. if (mode == CallOrConstructMode::kConstruct) { Label new_target_constructor, new_target_not_constructor; __ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear); - __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset)); - __ test_b(FieldOperand(ebx, Map::kBitFieldOffset), + __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset)); + __ test_b(FieldOperand(scratch, Map::kBitFieldOffset), Immediate(Map::IsConstructorBit::kMask)); __ j(not_zero, &new_target_constructor, Label::kNear); __ bind(&new_target_not_constructor); @@ -1764,18 +1861,18 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, FrameScope scope(masm, StackFrame::MANUAL); __ EnterFrame(StackFrame::INTERNAL); __ Push(edx); + __ movd(esi, xmm0); // Restore the context. __ CallRuntime(Runtime::kThrowNotConstructor); } __ bind(&new_target_constructor); } - // Preserve new.target (in case of [[Construct]]). - __ movd(xmm0, edx); + __ movd(xmm1, edx); // Preserve new.target (in case of [[Construct]]). // Check if we have an arguments adaptor frame below the function frame. Label arguments_adaptor, arguments_done; - __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); - __ cmp(Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset), + __ mov(scratch, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + __ cmp(Operand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset), Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); __ j(equal, &arguments_adaptor, Label::kNear); { @@ -1783,39 +1880,23 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ mov(edx, FieldOperand(edx, JSFunction::kSharedFunctionInfoOffset)); __ movzx_w(edx, FieldOperand( edx, SharedFunctionInfo::kFormalParameterCountOffset)); - __ mov(ebx, ebp); + __ mov(scratch, ebp); } __ jmp(&arguments_done, Label::kNear); __ bind(&arguments_adaptor); { // Just load the length from the ArgumentsAdaptorFrame. - __ mov(edx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ mov(edx, + Operand(scratch, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ SmiUntag(edx); } __ bind(&arguments_done); - Label stack_done; + Label stack_done, stack_overflow; __ sub(edx, ecx); __ j(less_equal, &stack_done); { - // Check for stack overflow. - { - // Check the stack for overflow. We are not trying to catch interruptions - // (i.e. debug break and preemption) here, so check the "real stack - // limit". - Label done; - __ LoadRoot(ecx, Heap::kRealStackLimitRootIndex); - // Make ecx the space we have left. The stack might already be - // overflowed here which will cause ecx to become negative. - __ neg(ecx); - __ add(ecx, esp); - __ sar(ecx, kPointerSizeLog2); - // Check if the arguments will overflow the stack. - __ cmp(ecx, edx); - __ j(greater, &done, Label::kNear); // Signed comparison. - __ TailCallRuntime(Runtime::kThrowStackOverflow); - __ bind(&done); - } + Generate_StackOverflowCheck(masm, edx, ecx, &stack_overflow); // Forward the arguments from the caller frame. { @@ -1824,7 +1905,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ PopReturnAddressTo(ecx); __ bind(&loop); { - __ Push(Operand(ebx, edx, times_pointer_size, 1 * kPointerSize)); + __ Push(Operand(scratch, edx, times_pointer_size, 1 * kPointerSize)); __ dec(edx); __ j(not_zero, &loop); } @@ -1833,16 +1914,22 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, } __ bind(&stack_done); - // Restore new.target (in case of [[Construct]]). - __ movd(edx, xmm0); + __ movd(edx, xmm1); // Restore new.target (in case of [[Construct]]). + __ movd(esi, xmm0); // Restore the context. // Tail-call to the {code} handler. __ Jump(code, RelocInfo::CODE_TARGET); + + __ bind(&stack_overflow); + __ movd(esi, xmm0); // Restore the context. + __ TailCallRuntime(Runtime::kThrowStackOverflow); } // static void Builtins::Generate_CallFunction(MacroAssembler* masm, ConvertReceiverMode mode) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); + // ----------- S t a t e ------------- // -- eax : the number of arguments (not including the receiver) // -- edi : the function to call (checked to be a JSFunction) @@ -1883,13 +1970,15 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize)); __ JumpIfSmi(ecx, &convert_to_object, Label::kNear); STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); - __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ebx); + __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ecx); // Clobbers ecx. __ j(above_equal, &done_convert); + // Reload the receiver (it was clobbered by CmpObjectType). + __ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize)); if (mode != ConvertReceiverMode::kNotNullOrUndefined) { Label convert_global_proxy; - __ JumpIfRoot(ecx, Heap::kUndefinedValueRootIndex, - &convert_global_proxy, Label::kNear); - __ JumpIfNotRoot(ecx, Heap::kNullValueRootIndex, &convert_to_object, + __ JumpIfRoot(ecx, RootIndex::kUndefinedValue, &convert_global_proxy, + Label::kNear); + __ JumpIfNotRoot(ecx, RootIndex::kNullValue, &convert_to_object, Label::kNear); __ bind(&convert_global_proxy); { @@ -1932,9 +2021,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // ----------------------------------- __ movzx_w( - ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); + ecx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); ParameterCount actual(eax); - ParameterCount expected(ebx); + ParameterCount expected(ecx); __ InvokeFunctionCode(edi, no_reg, expected, actual, JUMP_FUNCTION); // The function is a "classConstructor", need to raise an exception. __ bind(&class_constructor); @@ -1948,40 +2037,43 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, namespace { void Generate_PushBoundArguments(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); // ----------- S t a t e ------------- // -- eax : the number of arguments (not including the receiver) // -- edx : new.target (only in case of [[Construct]]) // -- edi : target (checked to be a JSBoundFunction) // ----------------------------------- - // Load [[BoundArguments]] into ecx and length of that into ebx. + __ movd(xmm0, edx); // Spill edx. + + // Load [[BoundArguments]] into ecx and length of that into edx. Label no_bound_arguments; __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset)); - __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset)); - __ SmiUntag(ebx); - __ test(ebx, ebx); + __ mov(edx, FieldOperand(ecx, FixedArray::kLengthOffset)); + __ SmiUntag(edx); + __ test(edx, edx); __ j(zero, &no_bound_arguments); { // ----------- S t a t e ------------- - // -- eax : the number of arguments (not including the receiver) - // -- edx : new.target (only in case of [[Construct]]) - // -- edi : target (checked to be a JSBoundFunction) - // -- ecx : the [[BoundArguments]] (implemented as FixedArray) - // -- ebx : the number of [[BoundArguments]] + // -- eax : the number of arguments (not including the receiver) + // -- xmm0 : new.target (only in case of [[Construct]]) + // -- edi : target (checked to be a JSBoundFunction) + // -- ecx : the [[BoundArguments]] (implemented as FixedArray) + // -- edx : the number of [[BoundArguments]] // ----------------------------------- // Reserve stack space for the [[BoundArguments]]. { Label done; - __ lea(ecx, Operand(ebx, times_pointer_size, 0)); + __ lea(ecx, Operand(edx, times_pointer_size, 0)); __ sub(esp, ecx); // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack // limit". - __ CompareRoot(esp, ecx, Heap::kRealStackLimitRootIndex); - __ j(greater, &done, Label::kNear); // Signed comparison. + __ CompareRoot(esp, ecx, RootIndex::kRealStackLimit); + __ j(above_equal, &done, Label::kNear); // Restore the stack pointer. - __ lea(esp, Operand(esp, ebx, times_pointer_size, 0)); + __ lea(esp, Operand(esp, edx, times_pointer_size, 0)); { FrameScope scope(masm, StackFrame::MANUAL); __ EnterFrame(StackFrame::INTERNAL); @@ -1997,10 +2089,10 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { { Label loop; __ Set(ecx, 0); - __ lea(ebx, Operand(esp, ebx, times_pointer_size, 0)); + __ lea(edx, Operand(esp, edx, times_pointer_size, 0)); __ bind(&loop); - __ movd(xmm0, Operand(ebx, ecx, times_pointer_size, 0)); - __ movd(Operand(esp, ecx, times_pointer_size, 0), xmm0); + __ movd(xmm1, Operand(edx, ecx, times_pointer_size, 0)); + __ movd(Operand(esp, ecx, times_pointer_size, 0), xmm1); __ inc(ecx); __ cmp(ecx, eax); __ j(less, &loop); @@ -2010,13 +2102,13 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { { Label loop; __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset)); - __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset)); - __ SmiUntag(ebx); + __ mov(edx, FieldOperand(ecx, FixedArray::kLengthOffset)); + __ SmiUntag(edx); __ bind(&loop); - __ dec(ebx); - __ movd(xmm0, FieldOperand(ecx, ebx, times_pointer_size, + __ dec(edx); + __ movd(xmm1, FieldOperand(ecx, edx, times_pointer_size, FixedArray::kHeaderSize)); - __ movd(Operand(esp, eax, times_pointer_size, 0), xmm0); + __ movd(Operand(esp, eax, times_pointer_size, 0), xmm1); __ lea(eax, Operand(eax, 1)); __ j(greater, &loop); } @@ -2026,13 +2118,16 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // [[BoundArguments]]), so we need to subtract one for the return address. __ dec(eax); } + __ bind(&no_bound_arguments); + __ movd(edx, xmm0); // Reload edx. } } // namespace // static void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); // ----------- S t a t e ------------- // -- eax : the number of arguments (not including the receiver) // -- edi : the function to call (checked to be a JSBoundFunction) @@ -2040,8 +2135,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { __ AssertBoundFunction(edi); // Patch the receiver to [[BoundThis]]. - __ mov(ebx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset)); - __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ebx); + __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset)); + __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ecx); // Push the [[BoundArguments]] onto the stack. Generate_PushBoundArguments(masm); @@ -2054,6 +2149,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { // static void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); // ----------- S t a t e ------------- // -- eax : the number of arguments (not including the receiver) // -- edi : the target to call (can be any Object). @@ -2101,6 +2197,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // static void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); // ----------- S t a t e ------------- // -- eax : the number of arguments (not including the receiver) // -- edx : the new target (checked to be a constructor) @@ -2109,10 +2206,6 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { __ AssertConstructor(edi); __ AssertFunction(edi); - // Calling convention for function specific ConstructStubs require - // ebx to contain either an AllocationSite or undefined. - __ LoadRoot(ebx, Heap::kUndefinedValueRootIndex); - Label call_generic_stub; // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric. @@ -2121,16 +2214,23 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask)); __ j(zero, &call_generic_stub, Label::kNear); + // Calling convention for function specific ConstructStubs require + // ecx to contain either an AllocationSite or undefined. + __ LoadRoot(ecx, RootIndex::kUndefinedValue); __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub), RelocInfo::CODE_TARGET); __ bind(&call_generic_stub); + // Calling convention for function specific ConstructStubs require + // ecx to contain either an AllocationSite or undefined. + __ LoadRoot(ecx, RootIndex::kUndefinedValue); __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric), RelocInfo::CODE_TARGET); } // static void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); // ----------- S t a t e ------------- // -- eax : the number of arguments (not including the receiver) // -- edx : the new target (checked to be a constructor) @@ -2213,18 +2313,23 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- eax : actual number of arguments - // -- ebx : expected number of arguments + // -- ecx : expected number of arguments // -- edx : new target (passed through to callee) // -- edi : function (passed through to callee) // ----------------------------------- + Assembler::SupportsRootRegisterScope supports_root_register(masm); + + const Register kExpectedNumberOfArgumentsRegister = ecx; + Label invoke, dont_adapt_arguments, stack_overflow; __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1); Label enough, too_few; - __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel); + __ cmp(kExpectedNumberOfArgumentsRegister, + SharedFunctionInfo::kDontAdaptArgumentsSentinel); __ j(equal, &dont_adapt_arguments); - __ cmp(eax, ebx); + __ cmp(eax, kExpectedNumberOfArgumentsRegister); __ j(less, &too_few); { // Enough parameters: Actual >= expected. @@ -2232,7 +2337,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { EnterArgumentsAdaptorFrame(masm); // edi is used as a scratch register. It should be restored from the frame // when needed. - Generate_StackOverflowCheck(masm, ebx, ecx, edi, &stack_overflow); + Generate_StackOverflowCheck(masm, kExpectedNumberOfArgumentsRegister, edi, + &stack_overflow); // Copy receiver and all expected arguments. const int offset = StandardFrameConstants::kCallerSPOffset; @@ -2244,7 +2350,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ inc(eax); __ push(Operand(edi, 0)); __ sub(edi, Immediate(kPointerSize)); - __ cmp(eax, ebx); + __ cmp(eax, kExpectedNumberOfArgumentsRegister); __ j(less, ©); // eax now contains the expected number of arguments. __ jmp(&invoke); @@ -2255,16 +2361,17 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { EnterArgumentsAdaptorFrame(masm); // edi is used as a scratch register. It should be restored from the frame // when needed. - Generate_StackOverflowCheck(masm, ebx, ecx, edi, &stack_overflow); + Generate_StackOverflowCheck(masm, kExpectedNumberOfArgumentsRegister, edi, + &stack_overflow); - // Remember expected arguments in ecx. - __ mov(ecx, ebx); + // Remember expected arguments in xmm0. + __ movd(xmm0, kExpectedNumberOfArgumentsRegister); // Copy receiver and all actual arguments. const int offset = StandardFrameConstants::kCallerSPOffset; __ lea(edi, Operand(ebp, eax, times_4, offset)); - // ebx = expected - actual. - __ sub(ebx, eax); + // ecx = expected - actual. + __ sub(kExpectedNumberOfArgumentsRegister, eax); // eax = -actual - 1 __ neg(eax); __ sub(eax, Immediate(1)); @@ -2282,11 +2389,11 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ bind(&fill); __ inc(eax); __ push(Immediate(masm->isolate()->factory()->undefined_value())); - __ cmp(eax, ebx); + __ cmp(eax, kExpectedNumberOfArgumentsRegister); __ j(less, &fill); // Restore expected arguments. - __ mov(eax, ecx); + __ movd(eax, xmm0); } // Call the entry point. @@ -2325,15 +2432,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { } } -static void Generate_OnStackReplacementHelper(MacroAssembler* masm, - bool has_handler_frame) { +void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); + // Lookup the function in the JavaScript frame. - if (has_handler_frame) { - __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); - __ mov(eax, Operand(eax, JavaScriptFrameConstants::kFunctionOffset)); - } else { - __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); - } + __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + __ mov(eax, Operand(eax, JavaScriptFrameConstants::kFunctionOffset)); { FrameScope scope(masm, StackFrame::INTERNAL); @@ -2350,23 +2454,21 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm, __ bind(&skip); - // Drop any potential handler frame that is be sitting on top of the actual + // Drop the handler frame that is be sitting on top of the actual // JavaScript frame. This is the case then OSR is triggered from bytecode. - if (has_handler_frame) { - __ leave(); - } + __ leave(); // Load deoptimization data from the code object. - __ mov(ebx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag)); + __ mov(ecx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag)); // Load the OSR entrypoint offset from the deoptimization data. - __ mov(ebx, Operand(ebx, FixedArray::OffsetOfElementAt( + __ mov(ecx, Operand(ecx, FixedArray::OffsetOfElementAt( DeoptimizationData::kOsrPcOffsetIndex) - kHeapObjectTag)); - __ SmiUntag(ebx); + __ SmiUntag(ecx); // Compute the target address = code_obj + header_size + osr_offset - __ lea(eax, Operand(eax, ebx, times_1, Code::kHeaderSize - kHeapObjectTag)); + __ lea(eax, Operand(eax, ecx, times_1, Code::kHeaderSize - kHeapObjectTag)); // Overwrite the return address on the stack. __ mov(Operand(esp, 0), eax); @@ -2375,15 +2477,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm, __ ret(0); } -void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { - Generate_OnStackReplacementHelper(masm, false); -} - -void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - Generate_OnStackReplacementHelper(masm, true); -} - void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); + // The function index was put in edi by the jump table trampoline. // Convert to Smi for the runtime call. __ SmiTag(edi); @@ -2394,6 +2490,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // Save all parameter registers (see wasm-linkage.cc). They might be // overwritten in the runtime call below. We don't have any callee-saved // registers in wasm, so no need to store anything else. + Assembler::AllowExplicitEbxAccessScope root_is_spilled(masm); static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs == arraysize(wasm::kGpParamRegisters), "frame size mismatch"); @@ -2420,7 +2517,12 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // Initialize the JavaScript context with 0. CEntry will use it to // set the current context on the isolate. __ Move(kContextRegister, Smi::kZero); - __ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, ecx); + { + // At this point, ebx has been spilled to the stack but is not yet + // overwritten with another value. We can still use it as kRootRegister. + Assembler::SupportsRootRegisterScope root_is_unclobbered(masm); + __ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, ecx); + } // The entrypoint address is the return value. __ mov(edi, kReturnRegister0); @@ -2452,6 +2554,11 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // If argv_mode == kArgvInRegister: // ecx: pointer to the first argument +#ifdef V8_EMBEDDED_BUILTINS + // TODO(v8:6666): Remove the ifdef once branch load poisoning is removed. + Assembler::SupportsRootRegisterScope supports_root_register(masm); +#endif + STATIC_ASSERT(eax == kRuntimeCallArgCountRegister); STATIC_ASSERT(ecx == kRuntimeCallArgvRegister); STATIC_ASSERT(edx == kRuntimeCallFunctionRegister); @@ -2571,11 +2678,17 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi); __ bind(&skip); +#ifdef V8_EMBEDDED_BUILTINS + STATIC_ASSERT(kRootRegister == kSpeculationPoisonRegister); + CHECK(!FLAG_untrusted_code_mitigations); + CHECK(!FLAG_branch_load_poisoning); +#else // Reset the masking register. This is done independent of the underlying // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with // both configurations. It is safe to always do this, because the underlying // register is caller-saved and can be arbitrarily clobbered. __ ResetSpeculationPoisonRegister(); +#endif // Compute the handler entry address and jump to it. __ mov(edi, __ StaticVariable(pending_handler_entrypoint_address)); @@ -2583,6 +2696,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, } void Builtins::Generate_DoubleToI(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); + Label check_negative, process_64_bits, done; // Account for return address and saved regs. @@ -2596,6 +2711,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { MemOperand return_operand = mantissa_operand; Register scratch1 = ebx; + Assembler::AllowExplicitEbxAccessScope root_is_spilled(masm); // Since we must use ecx for shifts below, use some other register (eax) // to calculate the result. @@ -2676,6 +2792,8 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { } void Builtins::Generate_MathPowInternal(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); + const Register exponent = eax; const Register scratch = ecx; const XMMRegister double_result = xmm3; @@ -2843,9 +2961,10 @@ void GenerateInternalArrayConstructorCase(MacroAssembler* masm, RelocInfo::CODE_TARGET); __ bind(¬_one_case); - // TODO(v8:6666): When rewriting ia32 ASM builtins to not clobber the - // kRootRegister ebx, this useless move can be removed. - __ Move(kJavaScriptCallExtraArg1Register, ebx); + // Load undefined into the allocation site parameter as required by + // ArrayNArgumentsConstructor. + __ mov(kJavaScriptCallExtraArg1Register, + masm->isolate()->factory()->undefined_value()); Handle code = BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor); __ Jump(code, RelocInfo::CODE_TARGET); } @@ -2853,6 +2972,8 @@ void GenerateInternalArrayConstructorCase(MacroAssembler* masm, } // namespace void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) { + Assembler::SupportsRootRegisterScope supports_root_register(masm); + // ----------- S t a t e ------------- // -- eax : argc // -- edi : constructor diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc index 0c892c960f0fd3..3e8d7e9b807a91 100644 --- a/deps/v8/src/builtins/mips/builtins-mips.cc +++ b/deps/v8/src/builtins/mips/builtins-mips.cc @@ -56,7 +56,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { // Run the native code for the InternalArray function called as a normal // function. - __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); __ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl), RelocInfo::CODE_TARGET); } @@ -109,7 +108,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { __ SmiUntag(a0); // The receiver for the builtin/api call. - __ PushRoot(Heap::kTheHoleValueRootIndex); + __ PushRoot(RootIndex::kTheHoleValue); // Set up pointer to last argument. __ Addu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); @@ -176,7 +175,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Preserve the incoming parameters on the stack. __ SmiTag(a0); __ Push(cp, a0, a1); - __ PushRoot(Heap::kTheHoleValueRootIndex); + __ PushRoot(RootIndex::kTheHoleValue); __ Push(a3); // ----------- S t a t e ------------- @@ -201,7 +200,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Else: use TheHoleValue as receiver for constructor call __ bind(¬_create_implicit_receiver); - __ LoadRoot(v0, Heap::kTheHoleValueRootIndex); + __ LoadRoot(v0, RootIndex::kTheHoleValue); // ----------- S t a t e ------------- // -- v0: receiver @@ -291,7 +290,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { Label use_receiver, do_throw, leave_frame; // If the result is undefined, we jump out to using the implicit receiver. - __ JumpIfRoot(v0, Heap::kUndefinedValueRootIndex, &use_receiver); + __ JumpIfRoot(v0, RootIndex::kUndefinedValue, &use_receiver); // Otherwise we do a smi check and fall through to check if the return value // is a valid receiver. @@ -313,7 +312,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // on-stack receiver as the result. __ bind(&use_receiver); __ lw(v0, MemOperand(sp, 0 * kPointerSize)); - __ JumpIfRoot(v0, Heap::kTheHoleValueRootIndex, &do_throw); + __ JumpIfRoot(v0, RootIndex::kTheHoleValue, &do_throw); __ bind(&leave_frame); // Restore smi-tagged arguments count from the frame. @@ -342,7 +341,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) { // interruptions (e.g. debug break and preemption) here, so the "real stack // limit" is checked. Label okay; - __ LoadRoot(a2, Heap::kRealStackLimitRootIndex); + __ LoadRoot(a2, RootIndex::kRealStackLimit); // Make a2 the space we have left. The stack might already be overflowed // here which will cause a2 to become negative. __ Subu(a2, sp, a2); @@ -410,7 +409,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // Initialize all JavaScript callee-saved registers, since they will be seen // by the garbage collector as part of handlers. - __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); + __ LoadRoot(t0, RootIndex::kUndefinedValue); __ mov(s1, t0); __ mov(s2, t0); __ mov(s3, t0); @@ -491,7 +490,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack limit". Label stack_overflow; - __ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex); + __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit); __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg)); // Push receiver. @@ -558,7 +557,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { FrameScope scope(masm, StackFrame::INTERNAL); __ Push(a1, t0); // Push hole as receiver since we do not use it for stepping. - __ PushRoot(Heap::kTheHoleValueRootIndex); + __ PushRoot(RootIndex::kTheHoleValue); __ CallRuntime(Runtime::kDebugOnFunctionCall); __ Pop(a1); } @@ -854,7 +853,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Do a stack check to ensure we don't go over the limit. Label ok; __ Subu(t1, sp, Operand(t0)); - __ LoadRoot(a2, Heap::kRealStackLimitRootIndex); + __ LoadRoot(a2, RootIndex::kRealStackLimit); __ Branch(&ok, hs, t1, Operand(a2)); __ CallRuntime(Runtime::kThrowStackOverflow); __ bind(&ok); @@ -862,7 +861,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // If ok, push undefined as the initial value for all register file entries. Label loop_header; Label loop_check; - __ LoadRoot(t1, Heap::kUndefinedValueRootIndex); + __ LoadRoot(t1, RootIndex::kUndefinedValue); __ Branch(&loop_check); __ bind(&loop_header); // TODO(rmcilroy): Consider doing more than one push per loop iteration. @@ -886,7 +885,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ bind(&no_incoming_new_target_or_generator_register); // Load accumulator with undefined. - __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); // Load the dispatch table into a register and dispatch to the bytecode // handler at the current bytecode offset. @@ -934,7 +933,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, // Check the stack for overflow. We are not trying to catch // interruptions (e.g. debug break and preemption) here, so the "real stack // limit" is checked. - __ LoadRoot(scratch1, Heap::kRealStackLimitRootIndex); + __ LoadRoot(scratch1, RootIndex::kRealStackLimit); // Make scratch1 the space we have left. The stack might already be overflowed // here which will cause scratch1 to become negative. __ subu(scratch1, sp, scratch1); @@ -983,7 +982,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( // Push "undefined" as the receiver arg if we need to. if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); __ mov(t0, a0); // No receiver. } @@ -1191,7 +1190,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { __ push(t4); } for (int i = 0; i < 3 - j; ++i) { - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); } if (j < 3) { __ jmp(&args_done); @@ -1290,15 +1289,10 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { __ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove accumulator. } -static void Generate_OnStackReplacementHelper(MacroAssembler* masm, - bool has_handler_frame) { +void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { // Lookup the function in the JavaScript frame. - if (has_handler_frame) { - __ lw(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ lw(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset)); - } else { - __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - } + __ lw(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ lw(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset)); { FrameScope scope(masm, StackFrame::INTERNAL); @@ -1310,11 +1304,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm, // If the code object is null, just return to the caller. __ Ret(eq, v0, Operand(Smi::kZero)); - // Drop any potential handler frame that is be sitting on top of the actual + // Drop the handler frame that is be sitting on top of the actual // JavaScript frame. This is the case then OSR is triggered from bytecode. - if (has_handler_frame) { - __ LeaveFrame(StackFrame::STUB); - } + __ LeaveFrame(StackFrame::STUB); // Load deoptimization data from the code object. // = [#deoptimization_data_offset] @@ -1336,14 +1328,6 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm, __ Ret(); } -void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { - Generate_OnStackReplacementHelper(masm, false); -} - -void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - Generate_OnStackReplacementHelper(masm, true); -} - // static void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // ----------- S t a t e ------------- @@ -1359,7 +1343,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { { Label no_arg; Register scratch = t0; - __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); + __ LoadRoot(a2, RootIndex::kUndefinedValue); __ mov(a3, a2); // Lsa() cannot be used hare as scratch value used later. __ sll(scratch, a0, kPointerSizeLog2); @@ -1389,8 +1373,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // 3. Tail call with no arguments if argArray is null or undefined. Label no_arguments; - __ JumpIfRoot(a2, Heap::kNullValueRootIndex, &no_arguments); - __ JumpIfRoot(a2, Heap::kUndefinedValueRootIndex, &no_arguments); + __ JumpIfRoot(a2, RootIndex::kNullValue, &no_arguments); + __ JumpIfRoot(a2, RootIndex::kUndefinedValue, &no_arguments); // 4a. Apply the receiver to the given argArray. __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), @@ -1412,7 +1396,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { { Label done; __ Branch(&done, ne, a0, Operand(zero_reg)); - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); __ Addu(a0, a0, Operand(1)); __ bind(&done); } @@ -1462,7 +1446,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { { Label no_arg; Register scratch = t0; - __ LoadRoot(a1, Heap::kUndefinedValueRootIndex); + __ LoadRoot(a1, RootIndex::kUndefinedValue); __ mov(a2, a1); __ mov(a3, a1); __ sll(scratch, a0, kPointerSizeLog2); @@ -1514,7 +1498,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { { Label no_arg; Register scratch = t0; - __ LoadRoot(a1, Heap::kUndefinedValueRootIndex); + __ LoadRoot(a1, RootIndex::kUndefinedValue); __ mov(a2, a1); // Lsa() cannot be used hare as scratch value used later. __ sll(scratch, a0, kPointerSizeLog2); @@ -1603,32 +1587,20 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, } // Check for stack overflow. - { - // Check the stack for overflow. We are not trying to catch interruptions - // (i.e. debug break and preemption) here, so check the "real stack limit". - Label done; - __ LoadRoot(t1, Heap::kRealStackLimitRootIndex); - // Make ip the space we have left. The stack might already be overflowed - // here which will cause ip to become negative. - __ Subu(t1, sp, t1); - // Check if the arguments will overflow the stack. - __ sll(kScratchReg, t0, kPointerSizeLog2); - __ Branch(&done, gt, t1, Operand(kScratchReg)); // Signed comparison. - __ TailCallRuntime(Runtime::kThrowStackOverflow); - __ bind(&done); - } + Label stack_overflow; + Generate_StackOverflowCheck(masm, t0, kScratchReg, t1, &stack_overflow); // Push arguments onto the stack (thisArgument is already on the stack). { __ mov(t2, zero_reg); Label done, push, loop; - __ LoadRoot(t1, Heap::kTheHoleValueRootIndex); + __ LoadRoot(t1, RootIndex::kTheHoleValue); __ bind(&loop); __ Branch(&done, eq, t2, Operand(t0)); __ Lsa(kScratchReg, a2, t2, kPointerSizeLog2); __ lw(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize)); __ Branch(&push, ne, t1, Operand(kScratchReg)); - __ LoadRoot(kScratchReg, Heap::kUndefinedValueRootIndex); + __ LoadRoot(kScratchReg, RootIndex::kUndefinedValue); __ bind(&push); __ Push(kScratchReg); __ Addu(t2, t2, Operand(1)); @@ -1639,6 +1611,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Tail-call to the actual Call or Construct builtin. __ Jump(code, RelocInfo::CODE_TARGET); + + __ bind(&stack_overflow); + __ TailCallRuntime(Runtime::kThrowStackOverflow); } // static @@ -1772,9 +1747,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ Branch(&done_convert, hs, t0, Operand(FIRST_JS_RECEIVER_TYPE)); if (mode != ConvertReceiverMode::kNotNullOrUndefined) { Label convert_global_proxy; - __ JumpIfRoot(a3, Heap::kUndefinedValueRootIndex, - &convert_global_proxy); - __ JumpIfNotRoot(a3, Heap::kNullValueRootIndex, &convert_to_object); + __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy); + __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object); __ bind(&convert_global_proxy); { // Patch receiver to global proxy. @@ -1863,8 +1837,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { __ Subu(sp, sp, Operand(t1)); // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack limit". - __ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex); - __ Branch(&done, gt, sp, Operand(kScratchReg)); // Signed comparison. + __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit); + __ Branch(&done, hs, sp, Operand(kScratchReg)); // Restore the stack pointer. __ Addu(sp, sp, Operand(t1)); { @@ -1973,7 +1947,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // Calling convention for function specific ConstructStubs require // a2 to contain either an AllocationSite or undefined. - __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); + __ LoadRoot(a2, RootIndex::kUndefinedValue); Label call_generic_stub; @@ -2021,8 +1995,8 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { __ Subu(sp, sp, Operand(t1)); // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack limit". - __ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex); - __ Branch(&done, gt, sp, Operand(kScratchReg)); // Signed comparison. + __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit); + __ Branch(&done, hs, sp, Operand(kScratchReg)); // Restore the stack pointer. __ Addu(sp, sp, Operand(t1)); { @@ -2218,7 +2192,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // a1: function // a2: expected number of arguments // a3: new target (passed through to callee) - __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); + __ LoadRoot(t0, RootIndex::kUndefinedValue); __ sll(t2, a2, kPointerSizeLog2); __ Subu(t1, fp, Operand(t2)); // Adjust for frame. @@ -2391,7 +2365,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Check result for exception sentinel. Label exception_returned; - __ LoadRoot(t0, Heap::kExceptionRootIndex); + __ LoadRoot(t0, RootIndex::kException); __ Branch(&exception_returned, eq, t0, Operand(v0)); // Check that there is no pending exception, otherwise we @@ -2402,7 +2376,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, IsolateAddressId::kPendingExceptionAddress, masm->isolate()); __ li(a2, pending_exception_address); __ lw(a2, MemOperand(a2)); - __ LoadRoot(t0, Heap::kTheHoleValueRootIndex); + __ LoadRoot(t0, RootIndex::kTheHoleValue); // Cannot use check here as it attempts to generate call into runtime. __ Branch(&okay, eq, t0, Operand(a2)); __ stop("Unexpected pending exception"); @@ -2705,6 +2679,10 @@ namespace { void GenerateInternalArrayConstructorCase(MacroAssembler* masm, ElementsKind kind) { + // Load undefined into the allocation site parameter as required by + // ArrayNArgumentsConstructor. + __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue); + __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind) .code(), RelocInfo::CODE_TARGET, lo, a0, Operand(1)); diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc index d59f7c0ce5c37d..e7b9132238cb06 100644 --- a/deps/v8/src/builtins/mips64/builtins-mips64.cc +++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc @@ -56,7 +56,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { // Run the native code for the InternalArray function called as a normal // function. - __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); __ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl), RelocInfo::CODE_TARGET); } @@ -108,7 +107,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { __ SmiUntag(a0); // The receiver for the builtin/api call. - __ PushRoot(Heap::kTheHoleValueRootIndex); + __ PushRoot(RootIndex::kTheHoleValue); // Set up pointer to last argument. __ Daddu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); @@ -176,7 +175,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Preserve the incoming parameters on the stack. __ SmiTag(a0); __ Push(cp, a0, a1); - __ PushRoot(Heap::kTheHoleValueRootIndex); + __ PushRoot(RootIndex::kTheHoleValue); __ Push(a3); // ----------- S t a t e ------------- @@ -201,7 +200,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Else: use TheHoleValue as receiver for constructor call __ bind(¬_create_implicit_receiver); - __ LoadRoot(v0, Heap::kTheHoleValueRootIndex); + __ LoadRoot(v0, RootIndex::kTheHoleValue); // ----------- S t a t e ------------- // -- v0: receiver @@ -291,7 +290,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { Label use_receiver, do_throw, leave_frame; // If the result is undefined, we jump out to using the implicit receiver. - __ JumpIfRoot(v0, Heap::kUndefinedValueRootIndex, &use_receiver); + __ JumpIfRoot(v0, RootIndex::kUndefinedValue, &use_receiver); // Otherwise we do a smi check and fall through to check if the return value // is a valid receiver. @@ -313,7 +312,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // on-stack receiver as the result. __ bind(&use_receiver); __ Ld(v0, MemOperand(sp, 0 * kPointerSize)); - __ JumpIfRoot(v0, Heap::kTheHoleValueRootIndex, &do_throw); + __ JumpIfRoot(v0, RootIndex::kTheHoleValue, &do_throw); __ bind(&leave_frame); // Restore smi-tagged arguments count from the frame. @@ -382,7 +381,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack limit". Label stack_overflow; - __ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex); + __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit); __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg)); // Push receiver. @@ -451,7 +450,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { FrameScope scope(masm, StackFrame::INTERNAL); __ Push(a1, a4); // Push hole as receiver since we do not use it for stepping. - __ PushRoot(Heap::kTheHoleValueRootIndex); + __ PushRoot(RootIndex::kTheHoleValue); __ CallRuntime(Runtime::kDebugOnFunctionCall); __ Pop(a1); } @@ -488,7 +487,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) { // interruptions (e.g. debug break and preemption) here, so the "real stack // limit" is checked. Label okay; - __ LoadRoot(a2, Heap::kRealStackLimitRootIndex); + __ LoadRoot(a2, RootIndex::kRealStackLimit); // Make a2 the space we have left. The stack might already be overflowed // here which will cause r2 to become negative. __ dsubu(a2, sp, a2); @@ -555,7 +554,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // Initialize all JavaScript callee-saved registers, since they will be seen // by the garbage collector as part of handlers. - __ LoadRoot(a4, Heap::kUndefinedValueRootIndex); + __ LoadRoot(a4, RootIndex::kUndefinedValue); __ mov(s1, a4); __ mov(s2, a4); __ mov(s3, a4); @@ -853,7 +852,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Do a stack check to ensure we don't go over the limit. Label ok; __ Dsubu(a5, sp, Operand(a4)); - __ LoadRoot(a2, Heap::kRealStackLimitRootIndex); + __ LoadRoot(a2, RootIndex::kRealStackLimit); __ Branch(&ok, hs, a5, Operand(a2)); __ CallRuntime(Runtime::kThrowStackOverflow); __ bind(&ok); @@ -861,7 +860,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // If ok, push undefined as the initial value for all register file entries. Label loop_header; Label loop_check; - __ LoadRoot(a5, Heap::kUndefinedValueRootIndex); + __ LoadRoot(a5, RootIndex::kUndefinedValue); __ Branch(&loop_check); __ bind(&loop_header); // TODO(rmcilroy): Consider doing more than one push per loop iteration. @@ -885,7 +884,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ bind(&no_incoming_new_target_or_generator_register); // Load accumulator as undefined. - __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); // Load the dispatch table into a register and dispatch to the bytecode // handler at the current bytecode offset. @@ -933,7 +932,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, // Check the stack for overflow. We are not trying to catch // interruptions (e.g. debug break and preemption) here, so the "real stack // limit" is checked. - __ LoadRoot(scratch1, Heap::kRealStackLimitRootIndex); + __ LoadRoot(scratch1, RootIndex::kRealStackLimit); // Make scratch1 the space we have left. The stack might already be overflowed // here which will cause scratch1 to become negative. __ dsubu(scratch1, sp, scratch1); @@ -980,7 +979,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( // Push "undefined" as the receiver arg if we need to. if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); __ Dsubu(a3, a3, Operand(1)); // Subtract one for receiver. } @@ -1188,7 +1187,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { __ push(t2); } for (int i = 0; i < 3 - j; ++i) { - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); } if (j < 3) { __ jmp(&args_done); @@ -1287,15 +1286,10 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { __ Daddu(sp, sp, Operand(1 * kPointerSize)); // Remove state. } -static void Generate_OnStackReplacementHelper(MacroAssembler* masm, - bool has_handler_frame) { +void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { // Lookup the function in the JavaScript frame. - if (has_handler_frame) { - __ Ld(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ Ld(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset)); - } else { - __ Ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - } + __ Ld(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ Ld(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset)); { FrameScope scope(masm, StackFrame::INTERNAL); @@ -1307,11 +1301,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm, // If the code object is null, just return to the caller. __ Ret(eq, v0, Operand(Smi::kZero)); - // Drop any potential handler frame that is be sitting on top of the actual + // Drop the handler frame that is be sitting on top of the actual // JavaScript frame. This is the case then OSR is triggered from bytecode. - if (has_handler_frame) { - __ LeaveFrame(StackFrame::STUB); - } + __ LeaveFrame(StackFrame::STUB); // Load deoptimization data from the code object. // = [#deoptimization_data_offset] @@ -1332,14 +1324,6 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm, __ Ret(); } -void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { - Generate_OnStackReplacementHelper(masm, false); -} - -void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - Generate_OnStackReplacementHelper(masm, true); -} - // static void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // ----------- S t a t e ------------- @@ -1356,7 +1340,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { Register undefined_value = a3; Register scratch = a4; - __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex); + __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); // 1. Load receiver into a1, argArray into a2 (if present), remove all // arguments from the stack (including the receiver), and push thisArg (if @@ -1390,7 +1374,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // 3. Tail call with no arguments if argArray is null or undefined. Label no_arguments; - __ JumpIfRoot(arg_array, Heap::kNullValueRootIndex, &no_arguments); + __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments); __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value)); // 4a. Apply the receiver to the given argArray. @@ -1414,7 +1398,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { { Label done; __ Branch(&done, ne, a0, Operand(zero_reg)); - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); __ Daddu(a0, a0, Operand(1)); __ bind(&done); } @@ -1465,7 +1449,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { Register undefined_value = a3; Register scratch = a4; - __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex); + __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); // 1. Load target into a1 (if present), argumentsList into a2 (if present), // remove all arguments from the stack (including the receiver), and push @@ -1521,7 +1505,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { Register undefined_value = a4; Register scratch = a5; - __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex); + __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); // 1. Load target into a1 (if present), argumentsList into a2 (if present), // new.target into a3 (if present, otherwise use target), remove all @@ -1620,20 +1604,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Register len = a4; // Check for stack overflow. - { - // Check the stack for overflow. We are not trying to catch interruptions - // (i.e. debug break and preemption) here, so check the "real stack limit". - Label done; - __ LoadRoot(a5, Heap::kRealStackLimitRootIndex); - // Make ip the space we have left. The stack might already be overflowed - // here which will cause ip to become negative. - __ Dsubu(a5, sp, a5); - // Check if the arguments will overflow the stack. - __ dsll(kScratchReg, len, kPointerSizeLog2); - __ Branch(&done, gt, a5, Operand(kScratchReg)); // Signed comparison. - __ TailCallRuntime(Runtime::kThrowStackOverflow); - __ bind(&done); - } + Label stack_overflow; + Generate_StackOverflowCheck(masm, len, kScratchReg, a5, &stack_overflow); // Push arguments onto the stack (thisArgument is already on the stack). { @@ -1646,11 +1618,11 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, __ Daddu(a0, a0, len); // The 'len' argument for Call() or Construct(). __ dsll(scratch, len, kPointerSizeLog2); __ Dsubu(scratch, sp, Operand(scratch)); - __ LoadRoot(t1, Heap::kTheHoleValueRootIndex); + __ LoadRoot(t1, RootIndex::kTheHoleValue); __ bind(&loop); __ Ld(a5, MemOperand(src)); __ Branch(&push, ne, a5, Operand(t1)); - __ LoadRoot(a5, Heap::kUndefinedValueRootIndex); + __ LoadRoot(a5, RootIndex::kUndefinedValue); __ bind(&push); __ daddiu(src, src, kPointerSize); __ Push(a5); @@ -1660,6 +1632,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Tail-call to the actual Call or Construct builtin. __ Jump(code, RelocInfo::CODE_TARGET); + + __ bind(&stack_overflow); + __ TailCallRuntime(Runtime::kThrowStackOverflow); } // static @@ -1793,9 +1768,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ Branch(&done_convert, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE)); if (mode != ConvertReceiverMode::kNotNullOrUndefined) { Label convert_global_proxy; - __ JumpIfRoot(a3, Heap::kUndefinedValueRootIndex, - &convert_global_proxy); - __ JumpIfNotRoot(a3, Heap::kNullValueRootIndex, &convert_to_object); + __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy); + __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object); __ bind(&convert_global_proxy); { // Patch receiver to global proxy. @@ -1883,8 +1857,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { __ Dsubu(sp, sp, Operand(a5)); // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack limit". - __ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex); - __ Branch(&done, gt, sp, Operand(kScratchReg)); // Signed comparison. + __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit); + __ Branch(&done, hs, sp, Operand(kScratchReg)); // Restore the stack pointer. __ Daddu(sp, sp, Operand(a5)); { @@ -1990,7 +1964,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // Calling convention for function specific ConstructStubs require // a2 to contain either an AllocationSite or undefined. - __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); + __ LoadRoot(a2, RootIndex::kUndefinedValue); Label call_generic_stub; @@ -2037,8 +2011,8 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { __ Dsubu(sp, sp, Operand(a5)); // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack limit". - __ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex); - __ Branch(&done, gt, sp, Operand(kScratchReg)); // Signed comparison. + __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit); + __ Branch(&done, hs, sp, Operand(kScratchReg)); // Restore the stack pointer. __ Daddu(sp, sp, Operand(a5)); { @@ -2235,7 +2209,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // a1: function // a2: expected number of arguments // a3: new target (passed through to callee) - __ LoadRoot(a5, Heap::kUndefinedValueRootIndex); + __ LoadRoot(a5, RootIndex::kUndefinedValue); __ dsll(a6, a2, kPointerSizeLog2); __ Dsubu(a4, fp, Operand(a6)); // Adjust for frame. @@ -2409,7 +2383,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Check result for exception sentinel. Label exception_returned; - __ LoadRoot(a4, Heap::kExceptionRootIndex); + __ LoadRoot(a4, RootIndex::kException); __ Branch(&exception_returned, eq, a4, Operand(v0)); // Check that there is no pending exception, otherwise we @@ -2420,7 +2394,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, IsolateAddressId::kPendingExceptionAddress, masm->isolate()); __ li(a2, pending_exception_address); __ Ld(a2, MemOperand(a2)); - __ LoadRoot(a4, Heap::kTheHoleValueRootIndex); + __ LoadRoot(a4, RootIndex::kTheHoleValue); // Cannot use check here as it attempts to generate call into runtime. __ Branch(&okay, eq, a4, Operand(a2)); __ stop("Unexpected pending exception"); @@ -2724,6 +2698,10 @@ namespace { void GenerateInternalArrayConstructorCase(MacroAssembler* masm, ElementsKind kind) { + // Load undefined into the allocation site parameter as required by + // ArrayNArgumentsConstructor. + __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue); + __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind) .code(), RelocInfo::CODE_TARGET, lo, a0, Operand(1)); diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc index 01a0e4e371f37d..4bf5f20004c078 100644 --- a/deps/v8/src/builtins/ppc/builtins-ppc.cc +++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc @@ -53,8 +53,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { // Run the native code for the InternalArray function called as a normal // function. - // tail call a stub - __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); __ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl), RelocInfo::CODE_TARGET); } @@ -109,7 +107,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { __ Push(cp, r3); __ SmiUntag(r3, SetRC); // The receiver for the builtin/api call. - __ PushRoot(Heap::kTheHoleValueRootIndex); + __ PushRoot(RootIndex::kTheHoleValue); // Set up pointer to last argument. __ addi(r7, fp, Operand(StandardFrameConstants::kCallerSPOffset)); @@ -184,7 +182,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Preserve the incoming parameters on the stack. __ SmiTag(r3); __ Push(cp, r3, r4); - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); __ Push(r6); // ----------- S t a t e ------------- @@ -209,7 +207,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Else: use TheHoleValue as receiver for constructor call __ bind(¬_create_implicit_receiver); - __ LoadRoot(r3, Heap::kTheHoleValueRootIndex); + __ LoadRoot(r3, RootIndex::kTheHoleValue); // ----------- S t a t e ------------- // -- r3: receiver @@ -303,7 +301,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { Label use_receiver, do_throw, leave_frame; // If the result is undefined, we jump out to using the implicit receiver. - __ JumpIfRoot(r3, Heap::kUndefinedValueRootIndex, &use_receiver); + __ JumpIfRoot(r3, RootIndex::kUndefinedValue, &use_receiver); // Otherwise we do a smi check and fall through to check if the return value // is a valid receiver. @@ -325,7 +323,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // on-stack receiver as the result. __ bind(&use_receiver); __ LoadP(r3, MemOperand(sp)); - __ JumpIfRoot(r3, Heap::kTheHoleValueRootIndex, &do_throw); + __ JumpIfRoot(r3, RootIndex::kTheHoleValue, &do_throw); __ bind(&leave_frame); // Restore smi-tagged arguments count from the frame. @@ -402,7 +400,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack limit". Label stack_overflow; - __ CompareRoot(sp, Heap::kRealStackLimitRootIndex); + __ CompareRoot(sp, RootIndex::kRealStackLimit); __ blt(&stack_overflow); // Push receiver. @@ -468,7 +466,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); __ Push(r4, r7); // Push hole as receiver since we do not use it for stepping. - __ PushRoot(Heap::kTheHoleValueRootIndex); + __ PushRoot(RootIndex::kTheHoleValue); __ CallRuntime(Runtime::kDebugOnFunctionCall); __ Pop(r4); __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset)); @@ -505,7 +503,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) { // interruptions (e.g. debug break and preemption) here, so the "real stack // limit" is checked. Label okay; - __ LoadRoot(r5, Heap::kRealStackLimitRootIndex); + __ LoadRoot(r5, RootIndex::kRealStackLimit); // Make r5 the space we have left. The stack might already be overflowed // here which will cause r5 to become negative. __ sub(r5, sp, r5); @@ -573,7 +571,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // Initialize all JavaScript callee-saved registers, since they will be seen // by the garbage collector as part of handlers. - __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r7, RootIndex::kUndefinedValue); __ mr(r14, r7); __ mr(r15, r7); __ mr(r16, r7); @@ -887,7 +885,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Do a stack check to ensure we don't go over the limit. Label ok; __ sub(r8, sp, r5); - __ LoadRoot(r0, Heap::kRealStackLimitRootIndex); + __ LoadRoot(r0, RootIndex::kRealStackLimit); __ cmpl(r8, r0); __ bge(&ok); __ CallRuntime(Runtime::kThrowStackOverflow); @@ -896,7 +894,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // If ok, push undefined as the initial value for all register file entries. // TODO(rmcilroy): Consider doing more than one push per loop iteration. Label loop, no_args; - __ LoadRoot(r8, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r8, RootIndex::kUndefinedValue); __ ShiftRightImm(r5, r5, Operand(kPointerSizeLog2), SetRC); __ beq(&no_args, cr0); __ mtctr(r5); @@ -920,7 +918,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ bind(&no_incoming_new_target_or_generator_register); // Load accumulator with undefined. - __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); // Load the dispatch table into a register and dispatch to the bytecode // handler at the current bytecode offset. Label do_dispatch; @@ -968,7 +966,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, // Check the stack for overflow. We are not trying to catch // interruptions (e.g. debug break and preemption) here, so the "real stack // limit" is checked. - __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex); + __ LoadRoot(scratch, RootIndex::kRealStackLimit); // Make scratch the space we have left. The stack might already be overflowed // here which will cause scratch to become negative. __ sub(scratch, sp, scratch); @@ -1014,7 +1012,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( // Push "undefined" as the receiver arg if we need to. if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); __ mr(r6, r3); // Argument count is correct. } @@ -1227,7 +1225,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { __ push(r7); } for (int i = 0; i < 3 - j; ++i) { - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); } if (j < 3) { __ jmp(&args_done); @@ -1327,15 +1325,10 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { __ Ret(); } -static void Generate_OnStackReplacementHelper(MacroAssembler* masm, - bool has_handler_frame) { +void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { // Lookup the function in the JavaScript frame. - if (has_handler_frame) { - __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ LoadP(r3, MemOperand(r3, JavaScriptFrameConstants::kFunctionOffset)); - } else { - __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - } + __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ LoadP(r3, MemOperand(r3, JavaScriptFrameConstants::kFunctionOffset)); { FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); @@ -1352,11 +1345,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm, __ bind(&skip); - // Drop any potential handler frame that is be sitting on top of the actual + // Drop the handler frame that is be sitting on top of the actual // JavaScript frame. This is the case then OSR is triggered from bytecode. - if (has_handler_frame) { - __ LeaveFrame(StackFrame::STUB); - } + __ LeaveFrame(StackFrame::STUB); // Load deoptimization data from the code object. // = [#deoptimization_data_offset] @@ -1386,14 +1377,6 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm, } } -void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { - Generate_OnStackReplacementHelper(masm, false); -} - -void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - Generate_OnStackReplacementHelper(masm, true); -} - // static void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // ----------- S t a t e ------------- @@ -1413,7 +1396,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { Register scratch = r7; __ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2)); __ add(new_sp, sp, arg_size); - __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); + __ LoadRoot(scratch, RootIndex::kUndefinedValue); __ mr(r5, scratch); __ LoadP(r4, MemOperand(new_sp, 0)); // receiver __ cmpi(arg_size, Operand(kPointerSize)); @@ -1438,8 +1421,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // 3. Tail call with no arguments if argArray is null or undefined. Label no_arguments; - __ JumpIfRoot(r5, Heap::kNullValueRootIndex, &no_arguments); - __ JumpIfRoot(r5, Heap::kUndefinedValueRootIndex, &no_arguments); + __ JumpIfRoot(r5, RootIndex::kNullValue, &no_arguments); + __ JumpIfRoot(r5, RootIndex::kUndefinedValue, &no_arguments); // 4a. Apply the receiver to the given argArray. __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), @@ -1462,7 +1445,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { Label done; __ cmpi(r3, Operand::Zero()); __ bne(&done); - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); __ addi(r3, r3, Operand(1)); __ bind(&done); } @@ -1517,7 +1500,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { Register scratch = r7; __ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2)); __ add(new_sp, sp, arg_size); - __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r4, RootIndex::kUndefinedValue); __ mr(scratch, r4); __ mr(r5, r4); __ cmpi(arg_size, Operand(kPointerSize)); @@ -1567,7 +1550,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { Register new_sp = r7; __ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2)); __ add(new_sp, sp, arg_size); - __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r4, RootIndex::kUndefinedValue); __ mr(r5, r4); __ mr(r6, r4); __ StoreP(r4, MemOperand(new_sp, 0)); // receiver (undefined) @@ -1666,21 +1649,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, } // Check for stack overflow. - { - // Check the stack for overflow. We are not trying to catch interruptions - // (i.e. debug break and preemption) here, so check the "real stack limit". - Label done; - __ LoadRoot(ip, Heap::kRealStackLimitRootIndex); - // Make ip the space we have left. The stack might already be overflowed - // here which will cause ip to become negative. - __ sub(ip, sp, ip); - // Check if the arguments will overflow the stack. - __ ShiftLeftImm(r0, r7, Operand(kPointerSizeLog2)); - __ cmp(ip, r0); // Signed comparison. - __ bgt(&done); - __ TailCallRuntime(Runtime::kThrowStackOverflow); - __ bind(&done); - } + Label stack_overflow; + Generate_StackOverflowCheck(masm, r7, ip, &stack_overflow); // Push arguments onto the stack (thisArgument is already on the stack). { @@ -1692,9 +1662,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, __ mtctr(r7); __ bind(&loop); __ LoadPU(ip, MemOperand(r5, kPointerSize)); - __ CompareRoot(ip, Heap::kTheHoleValueRootIndex); + __ CompareRoot(ip, RootIndex::kTheHoleValue); __ bne(&skip); - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ LoadRoot(ip, RootIndex::kUndefinedValue); __ bind(&skip); __ push(ip); __ bdnz(&loop); @@ -1704,6 +1674,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Tail-call to the actual Call or Construct builtin. __ Jump(code, RelocInfo::CODE_TARGET); + + __ bind(&stack_overflow); + __ TailCallRuntime(Runtime::kThrowStackOverflow); } // static @@ -1840,9 +1813,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ bge(&done_convert); if (mode != ConvertReceiverMode::kNotNullOrUndefined) { Label convert_global_proxy; - __ JumpIfRoot(r6, Heap::kUndefinedValueRootIndex, - &convert_global_proxy); - __ JumpIfNotRoot(r6, Heap::kNullValueRootIndex, &convert_to_object); + __ JumpIfRoot(r6, RootIndex::kUndefinedValue, &convert_global_proxy); + __ JumpIfNotRoot(r6, RootIndex::kNullValue, &convert_to_object); __ bind(&convert_global_proxy); { // Patch receiver to global proxy. @@ -1930,7 +1902,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack // limit". - __ CompareRoot(sp, Heap::kRealStackLimitRootIndex); + __ CompareRoot(sp, RootIndex::kRealStackLimit); __ bgt(&done); // Signed comparison. // Restore the stack pointer. __ mr(sp, r9); @@ -2062,7 +2034,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // Calling convention for function specific ConstructStubs require // r5 to contain either an AllocationSite or undefined. - __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r5, RootIndex::kUndefinedValue); Label call_generic_stub; @@ -2246,7 +2218,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // r4: function // r5: expected number of arguments // r6: new target (passed through to callee) - __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r0, RootIndex::kUndefinedValue); __ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2)); __ sub(r7, fp, r7); // Adjust for frame. @@ -2435,7 +2407,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Check result for exception sentinel. Label exception_returned; - __ CompareRoot(r3, Heap::kExceptionRootIndex); + __ CompareRoot(r3, RootIndex::kException); __ beq(&exception_returned); // Check that there is no pending exception, otherwise we @@ -2447,7 +2419,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ Move(r6, pending_exception_address); __ LoadP(r6, MemOperand(r6)); - __ CompareRoot(r6, Heap::kTheHoleValueRootIndex); + __ CompareRoot(r6, RootIndex::kTheHoleValue); // Cannot use check here as it attempts to generate call into runtime. __ beq(&okay); __ stop("Unexpected pending exception"); @@ -2743,6 +2715,10 @@ namespace { void GenerateInternalArrayConstructorCase(MacroAssembler* masm, ElementsKind kind) { + // Load undefined into the allocation site parameter as required by + // ArrayNArgumentsConstructor. + __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue); + __ cmpli(r3, Operand(1)); __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind) diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc index b92011c38ba6a7..d935e5e562b809 100644 --- a/deps/v8/src/builtins/s390/builtins-s390.cc +++ b/deps/v8/src/builtins/s390/builtins-s390.cc @@ -53,8 +53,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { // Run the native code for the InternalArray function called as a normal // function. - // tail call a stub - __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); __ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl), RelocInfo::CODE_TARGET); } @@ -108,7 +106,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { __ Push(cp, r2); __ SmiUntag(r2); // The receiver for the builtin/api call. - __ PushRoot(Heap::kTheHoleValueRootIndex); + __ PushRoot(RootIndex::kTheHoleValue); // Set up pointer to last argument. __ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset)); @@ -178,7 +176,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Preserve the incoming parameters on the stack. __ SmiTag(r2); __ Push(cp, r2, r3); - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); __ Push(r5); // ----------- S t a t e ------------- @@ -203,7 +201,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Else: use TheHoleValue as receiver for constructor call __ bind(¬_create_implicit_receiver); - __ LoadRoot(r2, Heap::kTheHoleValueRootIndex); + __ LoadRoot(r2, RootIndex::kTheHoleValue); // ----------- S t a t e ------------- // -- r2: receiver @@ -295,7 +293,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { Label use_receiver, do_throw, leave_frame; // If the result is undefined, we jump out to using the implicit receiver. - __ JumpIfRoot(r2, Heap::kUndefinedValueRootIndex, &use_receiver); + __ JumpIfRoot(r2, RootIndex::kUndefinedValue, &use_receiver); // Otherwise we do a smi check and fall through to check if the return value // is a valid receiver. @@ -317,7 +315,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // on-stack receiver as the result. __ bind(&use_receiver); __ LoadP(r2, MemOperand(sp)); - __ JumpIfRoot(r2, Heap::kTheHoleValueRootIndex, &do_throw); + __ JumpIfRoot(r2, RootIndex::kTheHoleValue, &do_throw); __ bind(&leave_frame); // Restore smi-tagged arguments count from the frame. @@ -393,7 +391,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack limit". Label stack_overflow; - __ CompareRoot(sp, Heap::kRealStackLimitRootIndex); + __ CompareRoot(sp, RootIndex::kRealStackLimit); __ blt(&stack_overflow); // Push receiver. @@ -468,7 +466,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); __ Push(r3, r6); // Push hole as receiver since we do not use it for stepping. - __ PushRoot(Heap::kTheHoleValueRootIndex); + __ PushRoot(RootIndex::kTheHoleValue); __ CallRuntime(Runtime::kDebugOnFunctionCall); __ Pop(r3); __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset)); @@ -505,7 +503,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) { // interruptions (e.g. debug break and preemption) here, so the "real stack // limit" is checked. Label okay; - __ LoadRoot(r4, Heap::kRealStackLimitRootIndex); + __ LoadRoot(r4, RootIndex::kRealStackLimit); // Make r4 the space we have left. The stack might already be overflowed // here which will cause r4 to become negative. __ SubP(r4, sp, r4); @@ -581,7 +579,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // Initialize all JavaScript callee-saved registers, since they will be seen // by the garbage collector as part of handlers. - __ LoadRoot(r6, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r6, RootIndex::kUndefinedValue); __ LoadRR(r7, r6); __ LoadRR(r8, r6); __ LoadRR(r9, r6); @@ -890,7 +888,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Do a stack check to ensure we don't go over the limit. Label ok; __ SubP(r8, sp, r4); - __ LoadRoot(r0, Heap::kRealStackLimitRootIndex); + __ LoadRoot(r0, RootIndex::kRealStackLimit); __ CmpLogicalP(r8, r0); __ bge(&ok); __ CallRuntime(Runtime::kThrowStackOverflow); @@ -899,7 +897,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // If ok, push undefined as the initial value for all register file entries. // TODO(rmcilroy): Consider doing more than one push per loop iteration. Label loop, no_args; - __ LoadRoot(r8, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r8, RootIndex::kUndefinedValue); __ ShiftRightP(r4, r4, Operand(kPointerSizeLog2)); __ LoadAndTestP(r4, r4); __ beq(&no_args); @@ -924,7 +922,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ bind(&no_incoming_new_target_or_generator_register); // Load accumulator with undefined. - __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); // Load the dispatch table into a register and dispatch to the bytecode // handler at the current bytecode offset. Label do_dispatch; @@ -973,7 +971,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, // Check the stack for overflow. We are not trying to catch // interruptions (e.g. debug break and preemption) here, so the "real stack // limit" is checked. - __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex); + __ LoadRoot(scratch, RootIndex::kRealStackLimit); // Make scratch the space we have left. The stack might already be overflowed // here which will cause scratch to become negative. __ SubP(scratch, sp, scratch); @@ -1020,7 +1018,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( // Push "undefined" as the receiver arg if we need to. if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); __ LoadRR(r5, r2); // Argument count is correct. } @@ -1230,7 +1228,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { __ push(r6); } for (int i = 0; i < 3 - j; ++i) { - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); } if (j < 3) { __ jmp(&args_done); @@ -1329,15 +1327,10 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { __ Ret(); } -static void Generate_OnStackReplacementHelper(MacroAssembler* masm, - bool has_handler_frame) { +void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { // Lookup the function in the JavaScript frame. - if (has_handler_frame) { - __ LoadP(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ LoadP(r2, MemOperand(r2, JavaScriptFrameConstants::kFunctionOffset)); - } else { - __ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - } + __ LoadP(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ LoadP(r2, MemOperand(r2, JavaScriptFrameConstants::kFunctionOffset)); { FrameScope scope(masm, StackFrame::INTERNAL); @@ -1354,11 +1347,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm, __ bind(&skip); - // Drop any potential handler frame that is be sitting on top of the actual + // Drop the handler frame that is be sitting on top of the actual // JavaScript frame. This is the case then OSR is triggered from bytecode. - if (has_handler_frame) { - __ LeaveFrame(StackFrame::STUB); - } + __ LeaveFrame(StackFrame::STUB); // Load deoptimization data from the code object. // = [#deoptimization_data_offset] @@ -1380,14 +1371,6 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm, __ Ret(); } -void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { - Generate_OnStackReplacementHelper(masm, false); -} - -void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - Generate_OnStackReplacementHelper(masm, true); -} - // static void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // ----------- S t a t e ------------- @@ -1407,7 +1390,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { Register scratch = r6; __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2)); __ AddP(new_sp, sp, arg_size); - __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); + __ LoadRoot(scratch, RootIndex::kUndefinedValue); __ LoadRR(r4, scratch); __ LoadP(r3, MemOperand(new_sp, 0)); // receiver __ CmpP(arg_size, Operand(kPointerSize)); @@ -1432,8 +1415,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // 3. Tail call with no arguments if argArray is null or undefined. Label no_arguments; - __ JumpIfRoot(r4, Heap::kNullValueRootIndex, &no_arguments); - __ JumpIfRoot(r4, Heap::kUndefinedValueRootIndex, &no_arguments); + __ JumpIfRoot(r4, RootIndex::kNullValue, &no_arguments); + __ JumpIfRoot(r4, RootIndex::kUndefinedValue, &no_arguments); // 4a. Apply the receiver to the given argArray. __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), @@ -1456,7 +1439,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { Label done; __ CmpP(r2, Operand::Zero()); __ bne(&done, Label::kNear); - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); __ AddP(r2, Operand(1)); __ bind(&done); } @@ -1511,7 +1494,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { Register scratch = r6; __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2)); __ AddP(new_sp, sp, arg_size); - __ LoadRoot(r3, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r3, RootIndex::kUndefinedValue); __ LoadRR(scratch, r3); __ LoadRR(r4, r3); __ CmpP(arg_size, Operand(kPointerSize)); @@ -1561,7 +1544,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { Register new_sp = r6; __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2)); __ AddP(new_sp, sp, arg_size); - __ LoadRoot(r3, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r3, RootIndex::kUndefinedValue); __ LoadRR(r4, r3); __ LoadRR(r5, r3); __ StoreP(r3, MemOperand(new_sp, 0)); // receiver (undefined) @@ -1670,21 +1653,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, } // Check for stack overflow. - { - // Check the stack for overflow. We are not trying to catch interruptions - // (i.e. debug break and preemption) here, so check the "real stack limit". - Label done; - __ LoadRoot(ip, Heap::kRealStackLimitRootIndex); - // Make ip the space we have left. The stack might already be overflowed - // here which will cause ip to become negative. - __ SubP(ip, sp, ip); - // Check if the arguments will overflow the stack. - __ ShiftLeftP(r0, r6, Operand(kPointerSizeLog2)); - __ CmpP(ip, r0); // Signed comparison. - __ bgt(&done); - __ TailCallRuntime(Runtime::kThrowStackOverflow); - __ bind(&done); - } + Label stack_overflow; + Generate_StackOverflowCheck(masm, r6, ip, &stack_overflow); // Push arguments onto the stack (thisArgument is already on the stack). { @@ -1697,9 +1667,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, __ bind(&loop); __ LoadP(ip, MemOperand(r4, kPointerSize)); __ la(r4, MemOperand(r4, kPointerSize)); - __ CompareRoot(ip, Heap::kTheHoleValueRootIndex); + __ CompareRoot(ip, RootIndex::kTheHoleValue); __ bne(&skip, Label::kNear); - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ LoadRoot(ip, RootIndex::kUndefinedValue); __ bind(&skip); __ push(ip); __ BranchOnCount(r1, &loop); @@ -1709,6 +1679,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Tail-call to the actual Call or Construct builtin. __ Jump(code, RelocInfo::CODE_TARGET); + + __ bind(&stack_overflow); + __ TailCallRuntime(Runtime::kThrowStackOverflow); } // static @@ -1845,9 +1818,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ bge(&done_convert); if (mode != ConvertReceiverMode::kNotNullOrUndefined) { Label convert_global_proxy; - __ JumpIfRoot(r5, Heap::kUndefinedValueRootIndex, - &convert_global_proxy); - __ JumpIfNotRoot(r5, Heap::kNullValueRootIndex, &convert_to_object); + __ JumpIfRoot(r5, RootIndex::kUndefinedValue, &convert_global_proxy); + __ JumpIfNotRoot(r5, RootIndex::kNullValue, &convert_to_object); __ bind(&convert_global_proxy); { // Patch receiver to global proxy. @@ -1936,7 +1908,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack // limit". - __ CompareRoot(sp, Heap::kRealStackLimitRootIndex); + __ CompareRoot(sp, RootIndex::kRealStackLimit); __ bgt(&done); // Signed comparison. // Restore the stack pointer. __ LoadRR(sp, r8); @@ -2069,7 +2041,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // Calling convention for function specific ConstructStubs require // r4 to contain either an AllocationSite or undefined. - __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r4, RootIndex::kUndefinedValue); Label call_generic_stub; @@ -2251,7 +2223,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // Fill the remaining expected arguments with undefined. // r3: function // r4: expected number of argumentus - __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r0, RootIndex::kUndefinedValue); __ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2)); __ SubP(r6, fp, r6); // Adjust for frame. @@ -2408,6 +2380,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ LoadRR(r3, r2); __ la(r2, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize)); isolate_reg = r5; + // Clang doesn't preserve r2 (result buffer) + // write to r8 (preserved) before entry + __ LoadRR(r8, r2); } // Call C built-in. __ Move(isolate_reg, ExternalReference::isolate_address(masm->isolate())); @@ -2433,13 +2408,14 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // If return value is on the stack, pop it to registers. if (needs_return_buffer) { + __ LoadRR(r2, r8); __ LoadP(r3, MemOperand(r2, kPointerSize)); __ LoadP(r2, MemOperand(r2)); } // Check result for exception sentinel. Label exception_returned; - __ CompareRoot(r2, Heap::kExceptionRootIndex); + __ CompareRoot(r2, RootIndex::kException); __ beq(&exception_returned, Label::kNear); // Check that there is no pending exception, otherwise we @@ -2450,7 +2426,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, IsolateAddressId::kPendingExceptionAddress, masm->isolate()); __ Move(r1, pending_exception_address); __ LoadP(r1, MemOperand(r1)); - __ CompareRoot(r1, Heap::kTheHoleValueRootIndex); + __ CompareRoot(r1, RootIndex::kTheHoleValue); // Cannot use check here as it attempts to generate call into runtime. __ beq(&okay, Label::kNear); __ stop("Unexpected pending exception"); @@ -2729,6 +2705,10 @@ namespace { void GenerateInternalArrayConstructorCase(MacroAssembler* masm, ElementsKind kind) { + // Load undefined into the allocation site parameter as required by + // ArrayNArgumentsConstructor. + __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue); + __ CmpLogicalP(r2, Operand(1)); __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind) diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc index 93a2b8b5f3f08c..630473f407ad67 100644 --- a/deps/v8/src/builtins/setup-builtins-internal.cc +++ b/deps/v8/src/builtins/setup-builtins-internal.cc @@ -12,6 +12,7 @@ #include "src/interface-descriptors.h" #include "src/interpreter/bytecodes.h" #include "src/interpreter/interpreter-generator.h" +#include "src/interpreter/interpreter.h" #include "src/isolate.h" #include "src/objects-inl.h" #include "src/objects/shared-function-info.h" @@ -26,6 +27,7 @@ BUILTIN_LIST_C(FORWARD_DECLARE) #undef FORWARD_DECLARE namespace { + void PostBuildProfileAndTracing(Isolate* isolate, Code* code, const char* name) { PROFILE(isolate, CodeCreateEvent(CodeEventListener::BUILTIN_TAG, @@ -48,10 +50,11 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate, return options; } - CodeRange* code_range = isolate->heap()->memory_allocator()->code_range(); + const base::AddressRegion& code_range = + isolate->heap()->memory_allocator()->code_range(); bool pc_relative_calls_fit_in_code_range = - code_range->valid() && - code_range->size() <= kMaxPCRelativeCodeRangeInMB * MB; + !code_range.is_empty() && + code_range.size() <= kMaxPCRelativeCodeRangeInMB * MB; options.isolate_independent_code = true; options.use_pc_relative_calls_and_jumps = pc_relative_calls_fit_in_code_range; @@ -180,6 +183,7 @@ Code* BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index, PostBuildProfileAndTracing(isolate, *code, name); return *code; } + } // anonymous namespace // static @@ -246,26 +250,36 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) { } } -#ifdef V8_EMBEDDED_BYTECODE_HANDLERS namespace { + Code* GenerateBytecodeHandler(Isolate* isolate, int builtin_index, - const char* name, interpreter::Bytecode bytecode, - interpreter::OperandScale operand_scale) { - if (!interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) { - // TODO(v8:8068): Consider returning something else to avoid placeholders - // being serialized with the snapshot. - return nullptr; - } + const char* name, + interpreter::OperandScale operand_scale, + interpreter::Bytecode bytecode) { + DCHECK(interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale)); Handle code = interpreter::GenerateBytecodeHandler( - isolate, bytecode, operand_scale, builtin_index); + isolate, bytecode, operand_scale, builtin_index, + BuiltinAssemblerOptions(isolate, builtin_index)); PostBuildProfileAndTracing(isolate, *code, name); return *code; } + +Code* GenerateLazyBytecodeHandler(Isolate* isolate, int builtin_index, + const char* name, + interpreter::OperandScale operand_scale) { + Handle code = interpreter::GenerateDeserializeLazyHandler( + isolate, operand_scale, builtin_index, + BuiltinAssemblerOptions(isolate, builtin_index)); + + PostBuildProfileAndTracing(isolate, *code, name); + + return *code; +} + } // namespace -#endif // static void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) { @@ -309,19 +323,15 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) { CallDescriptors::InterfaceDescriptor, #Name, 1); \ AddBuiltin(builtins, index++, code); -#define BUILD_BCH_WITH_SCALE(Code, Scale) \ +#define BUILD_BCH(Name, OperandScale, Bytecode) \ code = GenerateBytecodeHandler(isolate, index, Builtins::name(index), \ - interpreter::Bytecode::k##Code, \ - interpreter::OperandScale::k##Scale); \ - if (code) { \ - AddBuiltin(builtins, index, code); \ - } \ - ++index; - -#define BUILD_BCH(Code, ...) \ - BUILD_BCH_WITH_SCALE(Code, Single) \ - BUILD_BCH_WITH_SCALE(Code, Double) \ - BUILD_BCH_WITH_SCALE(Code, Quadruple) + OperandScale, Bytecode); \ + AddBuiltin(builtins, index++, code); + +#define BUILD_DLH(Name, OperandScale) \ + code = GenerateLazyBytecodeHandler(isolate, index, Builtins::name(index), \ + OperandScale); \ + AddBuiltin(builtins, index++, code); #define BUILD_ASM(Name) \ code = BuildWithMacroAssembler(isolate, index, Builtins::Generate_##Name, \ @@ -329,7 +339,7 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) { AddBuiltin(builtins, index++, code); BUILTIN_LIST(BUILD_CPP, BUILD_API, BUILD_TFJ, BUILD_TFC, BUILD_TFS, BUILD_TFH, - BUILD_BCH, BUILD_ASM); + BUILD_BCH, BUILD_DLH, BUILD_ASM); #undef BUILD_CPP #undef BUILD_API @@ -338,7 +348,7 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) { #undef BUILD_TFS #undef BUILD_TFH #undef BUILD_BCH -#undef BUILD_BCH_WITH_SCALE +#undef BUILD_DLH #undef BUILD_ASM CHECK_EQ(Builtins::builtin_count, index); diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq index 7552b094e7da38..66d0d34bfcc40f 100644 --- a/deps/v8/src/builtins/typed-array.tq +++ b/deps/v8/src/builtins/typed-array.tq @@ -61,7 +61,7 @@ module typed_array { context: Context, array: JSTypedArray, index: Smi, value: Object): Object { const elements: FixedTypedArrayBase = - unsafe_cast(array.elements); + UnsafeCast(array.elements); StoreFixedTypedArrayElementFromTagged( context, elements, index, value, KindForArrayType(), SMI_PARAMETERS); return Undefined; @@ -86,37 +86,37 @@ module typed_array { // InsertionSort is used for smaller arrays. macro TypedArrayInsertionSort( - context: Context, array: JSTypedArray, from_arg: Smi, to_arg: Smi, - comparefn: Callable, Load: LoadFn, Store: StoreFn) + context: Context, array: JSTypedArray, fromArg: Smi, toArg: Smi, + comparefn: Callable, load: LoadFn, store: StoreFn) labels Detached { - let from: Smi = from_arg; - let to: Smi = to_arg; + let from: Smi = fromArg; + let to: Smi = toArg; if (IsDetachedBuffer(array.buffer)) goto Detached; for (let i: Smi = from + 1; i < to; ++i) { - const element: Object = Load(context, array, i); + const element: Object = load(context, array, i); let j: Smi = i - 1; for (; j >= from; --j) { - const tmp: Object = Load(context, array, j); + const tmp: Object = load(context, array, j); const order: Number = CallCompareWithDetachedCheck( context, array, comparefn, tmp, element) otherwise Detached; if (order > 0) { - Store(context, array, j + 1, tmp); + store(context, array, j + 1, tmp); } else { break; } } - Store(context, array, j + 1, element); + store(context, array, j + 1, element); } } macro TypedArrayQuickSortImpl( - context: Context, array: JSTypedArray, from_arg: Smi, to_arg: Smi, - comparefn: Callable, Load: LoadFn, Store: StoreFn) + context: Context, array: JSTypedArray, fromArg: Smi, toArg: Smi, + comparefn: Callable, load: LoadFn, store: StoreFn) labels Detached { - let from: Smi = from_arg; - let to: Smi = to_arg; + let from: Smi = fromArg; + let to: Smi = toArg; while (to - from > 1) { if (to - from <= 10) { @@ -124,21 +124,21 @@ module typed_array { // Currently it does not make any difference when the // benchmarks are run locally. TypedArrayInsertionSort( - context, array, from, to, comparefn, Load, Store) + context, array, from, to, comparefn, load, store) otherwise Detached; break; } - // TODO(szuend): Check if a more involved third_index calculation is + // TODO(szuend): Check if a more involved thirdIndex calculation is // worth it for very large arrays. - const third_index: Smi = from + ((to - from) >>> 1); + const thirdIndex: Smi = from + ((to - from) >>> 1); if (IsDetachedBuffer(array.buffer)) goto Detached; // Find a pivot as the median of first, last and middle element. - let v0: Object = Load(context, array, from); - let v1: Object = Load(context, array, to - 1); - let v2: Object = Load(context, array, third_index); + let v0: Object = load(context, array, from); + let v1: Object = load(context, array, to - 1); + let v2: Object = load(context, array, thirdIndex); const c01: Number = CallCompareWithDetachedCheck( context, array, comparefn, v0, v1) otherwise Detached; @@ -170,80 +170,80 @@ module typed_array { } // v0 <= v1 <= v2. - Store(context, array, from, v0); - Store(context, array, to - 1, v2); + store(context, array, from, v0); + store(context, array, to - 1, v2); const pivot: Object = v1; - let low_end: Smi = from + 1; // Upper bound of elems lower than pivot. - let high_start: Smi = to - 1; // Lower bound of elems greater than pivot. + let lowEnd: Smi = from + 1; // Upper bound of elems lower than pivot. + let highStart: Smi = to - 1; // Lower bound of elems greater than pivot. - let low_end_value: Object = Load(context, array, low_end); - Store(context, array, third_index, low_end_value); - Store(context, array, low_end, pivot); + let lowEndValue: Object = load(context, array, lowEnd); + store(context, array, thirdIndex, lowEndValue); + store(context, array, lowEnd, pivot); - // From low_end to idx are elements equal to pivot. - // From idx to high_start are elements that haven"t been compared yet. - for (let idx: Smi = low_end + 1; idx < high_start; idx++) { - let element: Object = Load(context, array, idx); + // From lowEnd to idx are elements equal to pivot. + // From idx to highStart are elements that haven"t been compared yet. + for (let idx: Smi = lowEnd + 1; idx < highStart; idx++) { + let element: Object = load(context, array, idx); let order: Number = CallCompareWithDetachedCheck( context, array, comparefn, element, pivot) otherwise Detached; if (order < 0) { - low_end_value = Load(context, array, low_end); - Store(context, array, idx, low_end_value); - Store(context, array, low_end, element); - low_end++; + lowEndValue = load(context, array, lowEnd); + store(context, array, idx, lowEndValue); + store(context, array, lowEnd, element); + lowEnd++; } else if (order > 0) { - let break_for: bool = false; + let breakFor: bool = false; while (order > 0) { - high_start--; - if (high_start == idx) { - break_for = true; + highStart--; + if (highStart == idx) { + breakFor = true; break; } - const top_elem: Object = Load(context, array, high_start); + const topElement: Object = load(context, array, highStart); order = CallCompareWithDetachedCheck( - context, array, comparefn, top_elem, pivot) otherwise Detached; + context, array, comparefn, topElement, pivot) otherwise Detached; } - if (break_for) { + if (breakFor) { break; } - const high_start_value: Object = Load(context, array, high_start); - Store(context, array, idx, high_start_value); - Store(context, array, high_start, element); + const highStartValue: Object = load(context, array, highStart); + store(context, array, idx, highStartValue); + store(context, array, highStart, element); if (order < 0) { - element = Load(context, array, idx); + element = load(context, array, idx); - low_end_value = Load(context, array, low_end); - Store(context, array, idx, low_end_value); - Store(context, array, low_end, element); - low_end++; + lowEndValue = load(context, array, lowEnd); + store(context, array, idx, lowEndValue); + store(context, array, lowEnd, element); + lowEnd++; } } } - if ((to - high_start) < (low_end - from)) { + if ((to - highStart) < (lowEnd - from)) { TypedArrayQuickSort( - context, array, high_start, to, comparefn, Load, Store); - to = low_end; + context, array, highStart, to, comparefn, load, store); + to = lowEnd; } else { TypedArrayQuickSort( - context, array, from, low_end, comparefn, Load, Store); - from = high_start; + context, array, from, lowEnd, comparefn, load, store); + from = highStart; } } } builtin TypedArrayQuickSort( context: Context, array: JSTypedArray, from: Smi, to: Smi, - comparefn: Callable, Load: LoadFn, Store: StoreFn): JSTypedArray { + comparefn: Callable, load: LoadFn, store: StoreFn): JSTypedArray { try { - TypedArrayQuickSortImpl(context, array, from, to, comparefn, Load, Store) + TypedArrayQuickSortImpl(context, array, from, to, comparefn, load, store) otherwise Detached; } label Detached { @@ -258,10 +258,10 @@ module typed_array { context: Context, receiver: Object, ...arguments): JSTypedArray { // 1. If comparefn is not undefined and IsCallable(comparefn) is false, // throw a TypeError exception. - const comparefn_obj: Object = + const comparefnObj: Object = arguments.length > 0 ? arguments[0] : Undefined; - if (comparefn_obj != Undefined && !TaggedIsCallable(comparefn_obj)) { - ThrowTypeError(context, kBadSortComparisonFunction, comparefn_obj); + if (comparefnObj != Undefined && !TaggedIsCallable(comparefnObj)) { + ThrowTypeError(context, kBadSortComparisonFunction, comparefnObj); } // 2. Let obj be the this value. @@ -273,7 +273,7 @@ module typed_array { ValidateTypedArray(context, obj, '%TypedArray%.prototype.sort'); // Default sorting is done in C++ using std::sort - if (comparefn_obj == Undefined) { + if (comparefnObj == Undefined) { return TypedArraySortFast(context, obj); } @@ -282,48 +282,48 @@ module typed_array { try { const comparefn: Callable = - cast(comparefn_obj) otherwise CastError; + Cast(comparefnObj) otherwise CastError; let loadfn: LoadFn; let storefn: StoreFn; - let elements_kind: ElementsKind = array.elements_kind; + let elementsKind: ElementsKind = array.elements_kind; - if (IsElementsKindGreaterThan(elements_kind, UINT32_ELEMENTS)) { - if (elements_kind == INT32_ELEMENTS) { + if (IsElementsKindGreaterThan(elementsKind, UINT32_ELEMENTS)) { + if (elementsKind == INT32_ELEMENTS) { loadfn = LoadFixedElement; storefn = StoreFixedElement; - } else if (elements_kind == FLOAT32_ELEMENTS) { + } else if (elementsKind == FLOAT32_ELEMENTS) { loadfn = LoadFixedElement; storefn = StoreFixedElement; - } else if (elements_kind == FLOAT64_ELEMENTS) { + } else if (elementsKind == FLOAT64_ELEMENTS) { loadfn = LoadFixedElement; storefn = StoreFixedElement; - } else if (elements_kind == UINT8_CLAMPED_ELEMENTS) { + } else if (elementsKind == UINT8_CLAMPED_ELEMENTS) { loadfn = LoadFixedElement; storefn = StoreFixedElement; - } else if (elements_kind == BIGUINT64_ELEMENTS) { + } else if (elementsKind == BIGUINT64_ELEMENTS) { loadfn = LoadFixedElement; storefn = StoreFixedElement; - } else if (elements_kind == BIGINT64_ELEMENTS) { + } else if (elementsKind == BIGINT64_ELEMENTS) { loadfn = LoadFixedElement; storefn = StoreFixedElement; } else { unreachable; } } else { - if (elements_kind == UINT8_ELEMENTS) { + if (elementsKind == UINT8_ELEMENTS) { loadfn = LoadFixedElement; storefn = StoreFixedElement; - } else if (elements_kind == INT8_ELEMENTS) { + } else if (elementsKind == INT8_ELEMENTS) { loadfn = LoadFixedElement; storefn = StoreFixedElement; - } else if (elements_kind == UINT16_ELEMENTS) { + } else if (elementsKind == UINT16_ELEMENTS) { loadfn = LoadFixedElement; storefn = StoreFixedElement; - } else if (elements_kind == INT16_ELEMENTS) { + } else if (elementsKind == INT16_ELEMENTS) { loadfn = LoadFixedElement; storefn = StoreFixedElement; - } else if (elements_kind == UINT32_ELEMENTS) { + } else if (elementsKind == UINT32_ELEMENTS) { loadfn = LoadFixedElement; storefn = StoreFixedElement; } else { diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc index 5c2094105c3c0c..50e3b9df88134c 100644 --- a/deps/v8/src/builtins/x64/builtins-x64.cc +++ b/deps/v8/src/builtins/x64/builtins-x64.cc @@ -87,7 +87,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { __ Push(rcx); // The receiver for the builtin/api call. - __ PushRoot(Heap::kTheHoleValueRootIndex); + __ PushRoot(RootIndex::kTheHoleValue); // Set up pointer to last argument. __ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset)); @@ -157,7 +157,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ Push(rsi); __ Push(rcx); __ Push(rdi); - __ PushRoot(Heap::kTheHoleValueRootIndex); + __ PushRoot(RootIndex::kTheHoleValue); __ Push(rdx); // ----------- S t a t e ------------- @@ -181,7 +181,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Else: use TheHoleValue as receiver for constructor call __ bind(¬_create_implicit_receiver); - __ LoadRoot(rax, Heap::kTheHoleValueRootIndex); + __ LoadRoot(rax, RootIndex::kTheHoleValue); // ----------- S t a t e ------------- // -- rax implicit receiver @@ -269,8 +269,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { Label use_receiver, do_throw, leave_frame; // If the result is undefined, we jump out to using the implicit receiver. - __ JumpIfRoot(rax, Heap::kUndefinedValueRootIndex, &use_receiver, - Label::kNear); + __ JumpIfRoot(rax, RootIndex::kUndefinedValue, &use_receiver, Label::kNear); // Otherwise we do a smi check and fall through to check if the return value // is a valid receiver. @@ -292,7 +291,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // on-stack receiver as the result. __ bind(&use_receiver); __ movp(rax, Operand(rsp, 0 * kPointerSize)); - __ JumpIfRoot(rax, Heap::kTheHoleValueRootIndex, &do_throw, Label::kNear); + __ JumpIfRoot(rax, RootIndex::kTheHoleValue, &do_throw, Label::kNear); __ bind(&leave_frame); // Restore the arguments count. @@ -324,7 +323,7 @@ static void Generate_StackOverflowCheck( // Check the stack for overflow. We are not trying to catch // interruptions (e.g. debug break and preemption) here, so the "real stack // limit" is checked. - __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex); + __ LoadRoot(kScratchRegister, RootIndex::kRealStackLimit); __ movp(scratch, rsp); // Make scratch the space we have left. The stack might already be overflowed // here which will cause scratch to become negative. @@ -533,7 +532,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack limit". Label stack_overflow; - __ CompareRoot(rsp, Heap::kRealStackLimitRootIndex); + __ CompareRoot(rsp, RootIndex::kRealStackLimit); __ j(below, &stack_overflow); // Pop return address. @@ -602,7 +601,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ Push(rdx); __ Push(rdi); // Push hole as receiver since we do not use it for stepping. - __ PushRoot(Heap::kTheHoleValueRootIndex); + __ PushRoot(RootIndex::kTheHoleValue); __ CallRuntime(Runtime::kDebugOnFunctionCall); __ Pop(rdx); __ movp(rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset)); @@ -905,7 +904,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { Label ok; __ movp(rax, rsp); __ subp(rax, rcx); - __ CompareRoot(rax, Heap::kRealStackLimitRootIndex); + __ CompareRoot(rax, RootIndex::kRealStackLimit); __ j(above_equal, &ok, Label::kNear); __ CallRuntime(Runtime::kThrowStackOverflow); __ bind(&ok); @@ -913,7 +912,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // If ok, push undefined as the initial value for all register file entries. Label loop_header; Label loop_check; - __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); + __ LoadRoot(rax, RootIndex::kUndefinedValue); __ j(always, &loop_check, Label::kNear); __ bind(&loop_header); // TODO(rmcilroy): Consider doing more than one push per loop iteration. @@ -937,7 +936,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ bind(&no_incoming_new_target_or_generator_register); // Load accumulator with undefined. - __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); // Load the dispatch table into a register and dispatch to the bytecode // handler at the current bytecode offset. @@ -1026,7 +1025,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( // Push "undefined" as the receiver arg if we need to. if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); __ decl(rcx); // Subtract one for receiver. } @@ -1251,7 +1250,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { rbp, StandardFrameConstants::kCallerSPOffset + i * kPointerSize)); } for (int i = 0; i < 3 - j; ++i) { - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); } if (j < 3) { __ jmp(&args_done, Label::kNear); @@ -1370,7 +1369,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { { Label no_arg_array, no_this_arg; StackArgumentsAccessor args(rsp, rax); - __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex); + __ LoadRoot(rdx, RootIndex::kUndefinedValue); __ movp(rbx, rdx); __ movp(rdi, args.GetReceiverOperand()); __ testp(rax, rax); @@ -1402,9 +1401,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // 3. Tail call with no arguments if argArray is null or undefined. Label no_arguments; - __ JumpIfRoot(rbx, Heap::kNullValueRootIndex, &no_arguments, Label::kNear); - __ JumpIfRoot(rbx, Heap::kUndefinedValueRootIndex, &no_arguments, - Label::kNear); + __ JumpIfRoot(rbx, RootIndex::kNullValue, &no_arguments, Label::kNear); + __ JumpIfRoot(rbx, RootIndex::kUndefinedValue, &no_arguments, Label::kNear); // 4a. Apply the receiver to the given argArray. __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), @@ -1438,7 +1436,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { __ testp(rax, rax); __ j(not_zero, &done, Label::kNear); __ PopReturnAddressTo(rbx); - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); __ PushReturnAddressFrom(rbx); __ incp(rax); __ bind(&done); @@ -1488,7 +1486,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { { Label done; StackArgumentsAccessor args(rsp, rax); - __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex); + __ LoadRoot(rdi, RootIndex::kUndefinedValue); __ movp(rdx, rdi); __ movp(rbx, rdi); __ cmpp(rax, Immediate(1)); @@ -1539,7 +1537,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { { Label done; StackArgumentsAccessor args(rsp, rax); - __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex); + __ LoadRoot(rdi, RootIndex::kUndefinedValue); __ movp(rdx, rdi); __ movp(rbx, rdi); __ cmpp(rax, Immediate(1)); @@ -1554,7 +1552,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ bind(&done); __ PopReturnAddressTo(rcx); __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize)); - __ PushRoot(Heap::kUndefinedValueRootIndex); + __ PushRoot(RootIndex::kUndefinedValue); __ PushReturnAddressFrom(rcx); } @@ -1601,7 +1599,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { // Run the native code for the InternalArray function called as a normal // function. - __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex); __ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl), RelocInfo::CODE_TARGET); } @@ -1701,7 +1698,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // Fill remaining expected arguments with undefined values. Label fill; - __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex); + __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue); __ bind(&fill); __ incp(r8); __ Push(kScratchRegister); @@ -1777,23 +1774,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, __ bind(&ok); } - // Check for stack overflow. - { - // Check the stack for overflow. We are not trying to catch interruptions - // (i.e. debug break and preemption) here, so check the "real stack limit". - Label done; - __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex); - __ movp(r8, rsp); - // Make r8 the space we have left. The stack might already be overflowed - // here which will cause r8 to become negative. - __ subp(r8, kScratchRegister); - __ sarp(r8, Immediate(kPointerSizeLog2)); - // Check if the arguments will overflow the stack. - __ cmpp(r8, rcx); - __ j(greater, &done, Label::kNear); // Signed comparison. - __ TailCallRuntime(Runtime::kThrowStackOverflow); - __ bind(&done); - } + Label stack_overflow; + Generate_StackOverflowCheck(masm, rcx, r8, &stack_overflow, Label::kNear); // Push additional arguments onto the stack. { @@ -1806,9 +1788,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Turn the hole into undefined as we go. __ movp(r11, FieldOperand(rbx, r9, times_pointer_size, FixedArray::kHeaderSize)); - __ CompareRoot(r11, Heap::kTheHoleValueRootIndex); + __ CompareRoot(r11, RootIndex::kTheHoleValue); __ j(not_equal, &push, Label::kNear); - __ LoadRoot(r11, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r11, RootIndex::kUndefinedValue); __ bind(&push); __ Push(r11); __ incl(r9); @@ -1820,6 +1802,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Tail-call to the actual Call or Construct builtin. __ Jump(code, RelocInfo::CODE_TARGET); + + __ bind(&stack_overflow); + __ TailCallRuntime(Runtime::kThrowStackOverflow); } // static @@ -1957,9 +1942,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ j(above_equal, &done_convert); if (mode != ConvertReceiverMode::kNotNullOrUndefined) { Label convert_global_proxy; - __ JumpIfRoot(rcx, Heap::kUndefinedValueRootIndex, - &convert_global_proxy, Label::kNear); - __ JumpIfNotRoot(rcx, Heap::kNullValueRootIndex, &convert_to_object, + __ JumpIfRoot(rcx, RootIndex::kUndefinedValue, &convert_global_proxy, + Label::kNear); + __ JumpIfNotRoot(rcx, RootIndex::kNullValue, &convert_to_object, Label::kNear); __ bind(&convert_global_proxy); { @@ -2049,8 +2034,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack // limit". - __ CompareRoot(rsp, Heap::kRealStackLimitRootIndex); - __ j(greater, &done, Label::kNear); // Signed comparison. + __ CompareRoot(rsp, RootIndex::kRealStackLimit); + __ j(above_equal, &done, Label::kNear); // Restore the stack pointer. __ leap(rsp, Operand(rsp, rbx, times_pointer_size, 0)); { @@ -2183,7 +2168,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // Calling convention for function specific ConstructStubs require // rbx to contain either an AllocationSite or undefined. - __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex); + __ LoadRoot(rbx, RootIndex::kUndefinedValue); // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric. __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); @@ -2277,15 +2262,10 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { RelocInfo::CODE_TARGET); } -static void Generate_OnStackReplacementHelper(MacroAssembler* masm, - bool has_handler_frame) { +void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { // Lookup the function in the JavaScript frame. - if (has_handler_frame) { - __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - __ movp(rax, Operand(rax, JavaScriptFrameConstants::kFunctionOffset)); - } else { - __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); - } + __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); + __ movp(rax, Operand(rax, JavaScriptFrameConstants::kFunctionOffset)); { FrameScope scope(masm, StackFrame::INTERNAL); @@ -2302,11 +2282,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm, __ bind(&skip); - // Drop any potential handler frame that is be sitting on top of the actual + // Drop the handler frame that is be sitting on top of the actual // JavaScript frame. This is the case then OSR is triggered from bytecode. - if (has_handler_frame) { - __ leave(); - } + __ leave(); // Load deoptimization data from the code object. __ movp(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag)); @@ -2326,14 +2304,6 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm, __ ret(0); } -void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { - Generate_OnStackReplacementHelper(masm, false); -} - -void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - Generate_OnStackReplacementHelper(masm, true); -} - void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // The function index was pushed to the stack by the caller as int32. __ Pop(r11); @@ -2486,14 +2456,14 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Check result for exception sentinel. Label exception_returned; - __ CompareRoot(rax, Heap::kExceptionRootIndex); + __ CompareRoot(rax, RootIndex::kException); __ j(equal, &exception_returned); // Check that there is no pending exception, otherwise we // should have returned the exception sentinel. if (FLAG_debug_code) { Label okay; - __ LoadRoot(r14, Heap::kTheHoleValueRootIndex); + __ LoadRoot(r14, RootIndex::kTheHoleValue); ExternalReference pending_exception_address = ExternalReference::Create( IsolateAddressId::kPendingExceptionAddress, masm->isolate()); Operand pending_exception_operand = @@ -2789,6 +2759,9 @@ void GenerateInternalArrayConstructorCase(MacroAssembler* masm, RelocInfo::CODE_TARGET); __ bind(¬_one_case); + // Load undefined into the allocation site parameter as required by + // ArrayNArgumentsConstructor. + __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue); Handle code = BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor); __ Jump(code, RelocInfo::CODE_TARGET); } diff --git a/deps/v8/src/callable.h b/deps/v8/src/callable.h index 3d9eb274b02246..c24c9ae5549387 100644 --- a/deps/v8/src/callable.h +++ b/deps/v8/src/callable.h @@ -14,7 +14,7 @@ namespace internal { class Code; // Associates a body of code with an interface descriptor. -class Callable final BASE_EMBEDDED { +class Callable final { public: Callable(Handle code, CallInterfaceDescriptor descriptor) : code_(code), descriptor_(descriptor) {} diff --git a/deps/v8/src/char-predicates-inl.h b/deps/v8/src/char-predicates-inl.h index 7e198d58089d37..3662514bcae017 100644 --- a/deps/v8/src/char-predicates-inl.h +++ b/deps/v8/src/char-predicates-inl.h @@ -18,23 +18,14 @@ inline int AsciiAlphaToLower(uc32 c) { return c | 0x20; } - inline bool IsCarriageReturn(uc32 c) { return c == 0x000D; } - inline bool IsLineFeed(uc32 c) { return c == 0x000A; } - -inline bool IsInRange(int value, int lower_limit, int higher_limit) { - DCHECK(lower_limit <= higher_limit); - return static_cast(value - lower_limit) <= - static_cast(higher_limit - lower_limit); -} - inline bool IsAsciiIdentifier(uc32 c) { return IsAlphaNumeric(c) || c == '$' || c == '_'; } @@ -58,6 +49,8 @@ inline bool IsOctalDigit(uc32 c) { return IsInRange(c, '0', '7'); } +inline bool IsNonOctalDecimalDigit(uc32 c) { return IsInRange(c, '8', '9'); } + inline bool IsBinaryDigit(uc32 c) { // ECMA-262, 6th, 7.8.3 return c == '0' || c == '1'; diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h index 8eb10f43dd7e97..4a0afa4f571a03 100644 --- a/deps/v8/src/checks.h +++ b/deps/v8/src/checks.h @@ -5,7 +5,7 @@ #ifndef V8_CHECKS_H_ #define V8_CHECKS_H_ -#include "include/v8.h" +#include "include/v8-internal.h" #include "src/base/logging.h" #include "src/globals.h" diff --git a/deps/v8/src/code-events.h b/deps/v8/src/code-events.h index ec07a2e107917b..07a883be0db7d9 100644 --- a/deps/v8/src/code-events.h +++ b/deps/v8/src/code-events.h @@ -66,7 +66,7 @@ class CodeEventListener { }; #undef DECLARE_ENUM - virtual ~CodeEventListener() {} + virtual ~CodeEventListener() = default; virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code, const char* comment) = 0; @@ -98,7 +98,7 @@ class CodeEventDispatcher { public: using LogEventsAndTags = CodeEventListener::LogEventsAndTags; - CodeEventDispatcher() {} + CodeEventDispatcher() = default; bool AddListener(CodeEventListener* listener) { base::LockGuard guard(&mutex_); diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc index b6eb03f81babd9..cffb16b7d4e1e5 100644 --- a/deps/v8/src/code-factory.cc +++ b/deps/v8/src/code-factory.cc @@ -166,27 +166,15 @@ Callable CodeFactory::OrdinaryToPrimitive(Isolate* isolate, } // static -Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags, - PretenureFlag pretenure_flag) { - if (pretenure_flag == NOT_TENURED) { - switch (flags) { - case STRING_ADD_CHECK_NONE: - return Builtins::CallableFor(isolate, - Builtins::kStringAdd_CheckNone_NotTenured); - case STRING_ADD_CONVERT_LEFT: - return Builtins::CallableFor( - isolate, Builtins::kStringAdd_ConvertLeft_NotTenured); - case STRING_ADD_CONVERT_RIGHT: - return Builtins::CallableFor( - isolate, Builtins::kStringAdd_ConvertRight_NotTenured); - } - } else { - CHECK_EQ(TENURED, pretenure_flag); - CHECK_EQ(STRING_ADD_CHECK_NONE, flags); - return Builtins::CallableFor(isolate, - Builtins::kStringAdd_CheckNone_Tenured); +Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags) { + switch (flags) { + case STRING_ADD_CHECK_NONE: + return Builtins::CallableFor(isolate, Builtins::kStringAdd_CheckNone); + case STRING_ADD_CONVERT_LEFT: + return Builtins::CallableFor(isolate, Builtins::kStringAdd_ConvertLeft); + case STRING_ADD_CONVERT_RIGHT: + return Builtins::CallableFor(isolate, Builtins::kStringAdd_ConvertRight); } - UNREACHABLE(); } @@ -218,7 +206,7 @@ Callable CodeFactory::FastNewFunctionContext(Isolate* isolate, // static Callable CodeFactory::ArgumentAdaptor(Isolate* isolate) { return Callable(BUILTIN_CODE(isolate, ArgumentsAdaptorTrampoline), - ArgumentAdaptorDescriptor{}); + ArgumentsAdaptorDescriptor{}); } // static diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h index cba6136a386f82..3e8bc3790ca153 100644 --- a/deps/v8/src/code-factory.h +++ b/deps/v8/src/code-factory.h @@ -59,8 +59,7 @@ class V8_EXPORT_PRIVATE CodeFactory final { OrdinaryToPrimitiveHint hint); static Callable StringAdd(Isolate* isolate, - StringAddFlags flags = STRING_ADD_CHECK_NONE, - PretenureFlag pretenure_flag = NOT_TENURED); + StringAddFlags flags = STRING_ADD_CHECK_NONE); static Callable FastNewFunctionContext(Isolate* isolate, ScopeType scope_type); diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc index 2527e89a25061e..039090623b5645 100644 --- a/deps/v8/src/code-stub-assembler.cc +++ b/deps/v8/src/code-stub-assembler.cc @@ -225,7 +225,7 @@ TNode CodeStubAssembler::NoContextConstant() { CodeStubAssembler::name##Constant() { \ return UncheckedCast().rootAccessorName())>::type>( \ - LoadRoot(Heap::k##rootIndexName##RootIndex)); \ + LoadRoot(RootIndex::k##rootIndexName)); \ } HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR); #undef HEAP_CONSTANT_ACCESSOR @@ -236,7 +236,7 @@ HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR); CodeStubAssembler::name##Constant() { \ return UncheckedCast().rootAccessorName())>::type>( \ - LoadRoot(Heap::k##rootIndexName##RootIndex)); \ + LoadRoot(RootIndex::k##rootIndexName)); \ } HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR); #undef HEAP_CONSTANT_ACCESSOR @@ -253,40 +253,6 @@ HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR); HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST); #undef HEAP_CONSTANT_TEST -TNode CodeStubAssembler::HashSeed() { - DCHECK(Is64()); - TNode hash_seed_root = - TNode::UncheckedCast(LoadRoot(Heap::kHashSeedRootIndex)); - return TNode::UncheckedCast(LoadObjectField( - hash_seed_root, ByteArray::kHeaderSize, MachineType::Int64())); -} - -TNode CodeStubAssembler::HashSeedHigh() { - DCHECK(!Is64()); -#ifdef V8_TARGET_BIG_ENDIAN - static int kOffset = 0; -#else - static int kOffset = kInt32Size; -#endif - TNode hash_seed_root = - TNode::UncheckedCast(LoadRoot(Heap::kHashSeedRootIndex)); - return TNode::UncheckedCast(LoadObjectField( - hash_seed_root, ByteArray::kHeaderSize + kOffset, MachineType::Int32())); -} - -TNode CodeStubAssembler::HashSeedLow() { - DCHECK(!Is64()); -#ifdef V8_TARGET_BIG_ENDIAN - static int kOffset = kInt32Size; -#else - static int kOffset = 0; -#endif - TNode hash_seed_root = - TNode::UncheckedCast(LoadRoot(Heap::kHashSeedRootIndex)); - return TNode::UncheckedCast(LoadObjectField( - hash_seed_root, ByteArray::kHeaderSize + kOffset, MachineType::Int32())); -} - Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) { if (mode == SMI_PARAMETERS) { return SmiConstant(value); @@ -940,6 +906,17 @@ TNode CodeStubAssembler::TrySmiDiv(TNode dividend, TNode divisor, return SmiFromInt32(untagged_result); } +TNode CodeStubAssembler::SmiLexicographicCompare(TNode x, + TNode y) { + TNode smi_lexicographic_compare = + ExternalConstant(ExternalReference::smi_lexicographic_compare_function()); + TNode isolate_ptr = + ExternalConstant(ExternalReference::isolate_address(isolate())); + return CAST(CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(), + MachineType::AnyTagged(), MachineType::AnyTagged(), + smi_lexicographic_compare, isolate_ptr, x, y)); +} + TNode CodeStubAssembler::TruncateIntPtrToInt32( SloppyTNode value) { if (Is64()) { @@ -996,9 +973,9 @@ void CodeStubAssembler::BranchIfPrototypesHaveNoElements( CSA_SLOW_ASSERT(this, IsMap(receiver_map)); VARIABLE(var_map, MachineRepresentation::kTagged, receiver_map); Label loop_body(this, &var_map); - Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex); + Node* empty_fixed_array = LoadRoot(RootIndex::kEmptyFixedArray); Node* empty_slow_element_dictionary = - LoadRoot(Heap::kEmptySlowElementDictionaryRootIndex); + LoadRoot(RootIndex::kEmptySlowElementDictionary); Goto(&loop_body); BIND(&loop_body); @@ -1073,7 +1050,7 @@ TNode CodeStubAssembler::IsFastJSArrayWithNoCustomIteration( { // Check that the Array.prototype hasn't been modified in a way that would // affect iteration. - Node* protector_cell = LoadRoot(Heap::kArrayIteratorProtectorRootIndex); + Node* protector_cell = LoadRoot(RootIndex::kArrayIteratorProtector); DCHECK(isolate()->heap()->array_iterator_protector()->IsPropertyCell()); var_result = WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset), @@ -1132,6 +1109,19 @@ void CodeStubAssembler::GotoIfForceSlowPath(Label* if_true) { #endif } +void CodeStubAssembler::GotoIfDebugExecutionModeChecksSideEffects( + Label* if_true) { + STATIC_ASSERT(sizeof(DebugInfo::ExecutionMode) >= sizeof(int32_t)); + + TNode execution_mode_address = ExternalConstant( + ExternalReference::debug_execution_mode_address(isolate())); + TNode execution_mode = + UncheckedCast(Load(MachineType::Int32(), execution_mode_address)); + + GotoIf(Word32Equal(execution_mode, Int32Constant(DebugInfo::kSideEffects)), + if_true); +} + Node* CodeStubAssembler::AllocateRaw(Node* size_in_bytes, AllocationFlags flags, Node* top_address, Node* limit_address) { // TODO(jgruber, chromium:848672): TNodeify AllocateRaw. @@ -1226,7 +1216,7 @@ Node* CodeStubAssembler::AllocateRaw(Node* size_in_bytes, AllocationFlags flags, BIND(&needs_filler); // Store a filler and increase the address by kPointerSize. StoreNoWriteBarrier(MachineRepresentation::kTagged, top, - LoadRoot(Heap::kOnePointerFillerMapRootIndex)); + LoadRoot(RootIndex::kOnePointerFillerMap)); address.Bind(IntPtrAdd(no_runtime_result, IntPtrConstant(4))); Goto(&done_filling); @@ -1457,19 +1447,19 @@ TNode CodeStubAssembler::LoadAndUntagSmi(Node* base, int index) { } TNode CodeStubAssembler::LoadAndUntagToWord32Root( - Heap::RootListIndex root_index) { + RootIndex root_index) { Node* roots_array_start = ExternalConstant(ExternalReference::roots_array_start(isolate())); - int index = root_index * kPointerSize; + int offset = static_cast(root_index) * kPointerSize; if (SmiValuesAre32Bits()) { #if V8_TARGET_LITTLE_ENDIAN - index += kPointerSize / 2; + offset += kPointerSize / 2; #endif return UncheckedCast( - Load(MachineType::Int32(), roots_array_start, IntPtrConstant(index))); + Load(MachineType::Int32(), roots_array_start, IntPtrConstant(offset))); } else { return SmiToInt32(Load(MachineType::AnyTagged(), roots_array_start, - IntPtrConstant(index))); + IntPtrConstant(offset))); } } @@ -1591,11 +1581,6 @@ TNode CodeStubAssembler::LoadAndUntagWeakFixedArrayLength( return LoadAndUntagObjectField(array, WeakFixedArray::kLengthOffset); } -TNode CodeStubAssembler::LoadTypedArrayLength( - TNode typed_array) { - return CAST(LoadObjectField(typed_array, JSTypedArray::kLengthOffset)); -} - TNode CodeStubAssembler::LoadMapBitField(SloppyTNode map) { CSA_SLOW_ASSERT(this, IsMap(map)); return UncheckedCast( @@ -1654,7 +1639,7 @@ TNode CodeStubAssembler::LoadMapPrototypeInfo( BIND(&if_strong_heap_object); GotoIfNot(WordEqual(LoadMap(CAST(prototype_info.value())), - LoadRoot(Heap::kPrototypeInfoMapRootIndex)), + LoadRoot(RootIndex::kPrototypeInfoMap)), if_no_proto_info); return CAST(prototype_info.value()); } @@ -1720,6 +1705,19 @@ TNode CodeStubAssembler::LoadMapBackPointer(SloppyTNode map) { [=] { return UndefinedConstant(); }); } +TNode CodeStubAssembler::EnsureOnlyHasSimpleProperties( + TNode map, TNode instance_type, Label* bailout) { + // This check can have false positives, since it applies to any JSValueType. + GotoIf(IsCustomElementsReceiverInstanceType(instance_type), bailout); + + TNode bit_field3 = LoadMapBitField3(map); + GotoIf(IsSetWord32(bit_field3, Map::IsDictionaryMapBit::kMask | + Map::HasHiddenPrototypeBit::kMask), + bailout); + + return bit_field3; +} + TNode CodeStubAssembler::LoadJSReceiverIdentityHash( SloppyTNode receiver, Label* if_no_hash) { TVARIABLE(IntPtrT, var_hash); @@ -1792,16 +1790,20 @@ TNode CodeStubAssembler::LoadNameHash(SloppyTNode name, return Unsigned(Word32Shr(hash_field, Int32Constant(Name::kHashShift))); } +TNode CodeStubAssembler::LoadStringLengthAsSmi( + SloppyTNode string) { + return SmiFromIntPtr(LoadStringLengthAsWord(string)); +} + TNode CodeStubAssembler::LoadStringLengthAsWord( - SloppyTNode object) { - return SmiUntag(LoadStringLengthAsSmi(object)); + SloppyTNode string) { + return Signed(ChangeUint32ToWord(LoadStringLengthAsWord32(string))); } -TNode CodeStubAssembler::LoadStringLengthAsSmi( - SloppyTNode object) { - CSA_ASSERT(this, IsString(object)); - return CAST(LoadObjectField(object, String::kLengthOffset, - MachineType::TaggedPointer())); +TNode CodeStubAssembler::LoadStringLengthAsWord32( + SloppyTNode string) { + CSA_ASSERT(this, IsString(string)); + return LoadObjectField(string, String::kLengthOffset); } Node* CodeStubAssembler::PointerToSeqStringData(Node* seq_string) { @@ -2522,7 +2524,7 @@ TNode CodeStubAssembler::LoadNativeContext( TNode CodeStubAssembler::LoadModuleContext( SloppyTNode context) { - Node* module_map = LoadRoot(Heap::kModuleContextMapRootIndex); + Node* module_map = LoadRoot(RootIndex::kModuleContextMap); Variable cur_context(this, MachineRepresentation::kTaggedPointer); cur_context.Bind(context); @@ -2705,8 +2707,8 @@ Node* CodeStubAssembler::StoreMap(Node* object, Node* map) { object, IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag), map); } -Node* CodeStubAssembler::StoreMapNoWriteBarrier( - Node* object, Heap::RootListIndex map_root_index) { +Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, + RootIndex map_root_index) { return StoreMapNoWriteBarrier(object, LoadRoot(map_root_index)); } @@ -2718,7 +2720,7 @@ Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) { } Node* CodeStubAssembler::StoreObjectFieldRoot(Node* object, int offset, - Heap::RootListIndex root_index) { + RootIndex root_index) { if (Heap::RootIsImmortalImmovable(root_index)) { return StoreObjectFieldNoWriteBarrier(object, offset, LoadRoot(root_index)); } else { @@ -2834,7 +2836,7 @@ void CodeStubAssembler::EnsureArrayLengthWritable(TNode map, TNode maybe_length = CAST(LoadWeakFixedArrayElement( descriptors, DescriptorArray::ToKeyIndex(length_index))); CSA_ASSERT(this, - WordEqual(maybe_length, LoadRoot(Heap::klength_stringRootIndex))); + WordEqual(maybe_length, LoadRoot(RootIndex::klength_string))); #endif TNode details = LoadDetailsByKeyIndex( @@ -2975,7 +2977,7 @@ void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* array, Node* CodeStubAssembler::AllocateCellWithValue(Node* value, WriteBarrierMode mode) { Node* result = Allocate(Cell::kSize, kNone); - StoreMapNoWriteBarrier(result, Heap::kCellMapRootIndex); + StoreMapNoWriteBarrier(result, RootIndex::kCellMap); StoreCellValue(result, value, mode); return result; } @@ -2999,7 +3001,7 @@ Node* CodeStubAssembler::StoreCellValue(Node* cell, Node* value, TNode CodeStubAssembler::AllocateHeapNumber() { Node* result = Allocate(HeapNumber::kSize, kNone); - Heap::RootListIndex heap_map_index = Heap::kHeapNumberMapRootIndex; + RootIndex heap_map_index = RootIndex::kHeapNumberMap; StoreMapNoWriteBarrier(result, heap_map_index); return UncheckedCast(result); } @@ -3013,7 +3015,7 @@ TNode CodeStubAssembler::AllocateHeapNumberWithValue( TNode CodeStubAssembler::AllocateMutableHeapNumber() { Node* result = Allocate(MutableHeapNumber::kSize, kNone); - Heap::RootListIndex heap_map_index = Heap::kMutableHeapNumberMapRootIndex; + RootIndex heap_map_index = RootIndex::kMutableHeapNumberMap; StoreMapNoWriteBarrier(result, heap_map_index); return UncheckedCast(result); } @@ -3039,7 +3041,7 @@ TNode CodeStubAssembler::AllocateRawBigInt(TNode length) { TNode size = IntPtrAdd(IntPtrConstant(BigInt::kHeaderSize), Signed(WordShl(length, kPointerSizeLog2))); Node* raw_result = Allocate(size, kNone); - StoreMapNoWriteBarrier(raw_result, Heap::kBigIntMapRootIndex); + StoreMapNoWriteBarrier(raw_result, RootIndex::kBigIntMap); return UncheckedCast(raw_result); } @@ -3069,20 +3071,20 @@ TNode CodeStubAssembler::LoadBigIntDigit(TNode bigint, } TNode CodeStubAssembler::AllocateSeqOneByteString( - int length, AllocationFlags flags) { + uint32_t length, AllocationFlags flags) { Comment("AllocateSeqOneByteString"); if (length == 0) { - return CAST(LoadRoot(Heap::kempty_stringRootIndex)); + return CAST(LoadRoot(RootIndex::kempty_string)); } Node* result = Allocate(SeqOneByteString::SizeFor(length), flags); - DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex)); - StoreMapNoWriteBarrier(result, Heap::kOneByteStringMapRootIndex); + DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kOneByteStringMap)); + StoreMapNoWriteBarrier(result, RootIndex::kOneByteStringMap); StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset, - SmiConstant(length), - MachineRepresentation::kTagged); - StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot, - IntPtrConstant(String::kEmptyHashField), - MachineType::PointerRepresentation()); + Uint32Constant(length), + MachineRepresentation::kWord32); + StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldOffset, + Int32Constant(String::kEmptyHashField), + MachineRepresentation::kWord32); return CAST(result); } @@ -3093,7 +3095,7 @@ TNode CodeStubAssembler::IsZeroOrContext(SloppyTNode object) { } TNode CodeStubAssembler::AllocateSeqOneByteString( - Node* context, TNode length, AllocationFlags flags) { + Node* context, TNode length, AllocationFlags flags) { Comment("AllocateSeqOneByteString"); CSA_SLOW_ASSERT(this, IsZeroOrContext(context)); VARIABLE(var_result, MachineRepresentation::kTagged); @@ -3101,10 +3103,10 @@ TNode CodeStubAssembler::AllocateSeqOneByteString( // Compute the SeqOneByteString size and check if it fits into new space. Label if_lengthiszero(this), if_sizeissmall(this), if_notsizeissmall(this, Label::kDeferred), if_join(this); - GotoIf(SmiEqual(length, SmiConstant(0)), &if_lengthiszero); + GotoIf(Word32Equal(length, Uint32Constant(0)), &if_lengthiszero); Node* raw_size = GetArrayAllocationSize( - SmiUntag(length), UINT8_ELEMENTS, INTPTR_PARAMETERS, + Signed(ChangeUint32ToWord(length)), UINT8_ELEMENTS, INTPTR_PARAMETERS, SeqOneByteString::kHeaderSize + kObjectAlignmentMask); Node* size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask)); Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)), @@ -3114,13 +3116,13 @@ TNode CodeStubAssembler::AllocateSeqOneByteString( { // Just allocate the SeqOneByteString in new space. Node* result = AllocateInNewSpace(size, flags); - DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex)); - StoreMapNoWriteBarrier(result, Heap::kOneByteStringMapRootIndex); + DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kOneByteStringMap)); + StoreMapNoWriteBarrier(result, RootIndex::kOneByteStringMap); StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset, - length, MachineRepresentation::kTagged); - StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot, - IntPtrConstant(String::kEmptyHashField), - MachineType::PointerRepresentation()); + length, MachineRepresentation::kWord32); + StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldOffset, + Int32Constant(String::kEmptyHashField), + MachineRepresentation::kWord32); var_result.Bind(result); Goto(&if_join); } @@ -3128,15 +3130,15 @@ TNode CodeStubAssembler::AllocateSeqOneByteString( BIND(&if_notsizeissmall); { // We might need to allocate in large object space, go to the runtime. - Node* result = - CallRuntime(Runtime::kAllocateSeqOneByteString, context, length); + Node* result = CallRuntime(Runtime::kAllocateSeqOneByteString, context, + ChangeUint32ToTagged(length)); var_result.Bind(result); Goto(&if_join); } BIND(&if_lengthiszero); { - var_result.Bind(LoadRoot(Heap::kempty_stringRootIndex)); + var_result.Bind(LoadRoot(RootIndex::kempty_string)); Goto(&if_join); } @@ -3145,25 +3147,25 @@ TNode CodeStubAssembler::AllocateSeqOneByteString( } TNode CodeStubAssembler::AllocateSeqTwoByteString( - int length, AllocationFlags flags) { + uint32_t length, AllocationFlags flags) { Comment("AllocateSeqTwoByteString"); if (length == 0) { - return CAST(LoadRoot(Heap::kempty_stringRootIndex)); + return CAST(LoadRoot(RootIndex::kempty_string)); } Node* result = Allocate(SeqTwoByteString::SizeFor(length), flags); - DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex)); - StoreMapNoWriteBarrier(result, Heap::kStringMapRootIndex); + DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kStringMap)); + StoreMapNoWriteBarrier(result, RootIndex::kStringMap); StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset, - SmiConstant(Smi::FromInt(length)), - MachineRepresentation::kTagged); - StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot, - IntPtrConstant(String::kEmptyHashField), - MachineType::PointerRepresentation()); + Uint32Constant(length), + MachineRepresentation::kWord32); + StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldOffset, + Int32Constant(String::kEmptyHashField), + MachineRepresentation::kWord32); return CAST(result); } TNode CodeStubAssembler::AllocateSeqTwoByteString( - Node* context, TNode length, AllocationFlags flags) { + Node* context, TNode length, AllocationFlags flags) { CSA_SLOW_ASSERT(this, IsZeroOrContext(context)); Comment("AllocateSeqTwoByteString"); VARIABLE(var_result, MachineRepresentation::kTagged); @@ -3171,10 +3173,10 @@ TNode CodeStubAssembler::AllocateSeqTwoByteString( // Compute the SeqTwoByteString size and check if it fits into new space. Label if_lengthiszero(this), if_sizeissmall(this), if_notsizeissmall(this, Label::kDeferred), if_join(this); - GotoIf(SmiEqual(length, SmiConstant(0)), &if_lengthiszero); + GotoIf(Word32Equal(length, Uint32Constant(0)), &if_lengthiszero); Node* raw_size = GetArrayAllocationSize( - SmiUntag(length), UINT16_ELEMENTS, INTPTR_PARAMETERS, + Signed(ChangeUint32ToWord(length)), UINT16_ELEMENTS, INTPTR_PARAMETERS, SeqOneByteString::kHeaderSize + kObjectAlignmentMask); Node* size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask)); Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)), @@ -3184,13 +3186,13 @@ TNode CodeStubAssembler::AllocateSeqTwoByteString( { // Just allocate the SeqTwoByteString in new space. Node* result = AllocateInNewSpace(size, flags); - DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex)); - StoreMapNoWriteBarrier(result, Heap::kStringMapRootIndex); + DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kStringMap)); + StoreMapNoWriteBarrier(result, RootIndex::kStringMap); StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset, - length, MachineRepresentation::kTagged); - StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot, - IntPtrConstant(String::kEmptyHashField), - MachineType::PointerRepresentation()); + length, MachineRepresentation::kWord32); + StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldOffset, + Int32Constant(String::kEmptyHashField), + MachineRepresentation::kWord32); var_result.Bind(result); Goto(&if_join); } @@ -3198,15 +3200,15 @@ TNode CodeStubAssembler::AllocateSeqTwoByteString( BIND(&if_notsizeissmall); { // We might need to allocate in large object space, go to the runtime. - Node* result = - CallRuntime(Runtime::kAllocateSeqTwoByteString, context, length); + Node* result = CallRuntime(Runtime::kAllocateSeqTwoByteString, context, + ChangeUint32ToTagged(length)); var_result.Bind(result); Goto(&if_join); } BIND(&if_lengthiszero); { - var_result.Bind(LoadRoot(Heap::kempty_stringRootIndex)); + var_result.Bind(LoadRoot(RootIndex::kempty_string)); Goto(&if_join); } @@ -3214,19 +3216,20 @@ TNode CodeStubAssembler::AllocateSeqTwoByteString( return CAST(var_result.value()); } -TNode CodeStubAssembler::AllocateSlicedString( - Heap::RootListIndex map_root_index, TNode length, TNode parent, - TNode offset) { - DCHECK(map_root_index == Heap::kSlicedOneByteStringMapRootIndex || - map_root_index == Heap::kSlicedStringMapRootIndex); +TNode CodeStubAssembler::AllocateSlicedString(RootIndex map_root_index, + TNode length, + TNode parent, + TNode offset) { + DCHECK(map_root_index == RootIndex::kSlicedOneByteStringMap || + map_root_index == RootIndex::kSlicedStringMap); Node* result = Allocate(SlicedString::kSize); DCHECK(Heap::RootIsImmortalImmovable(map_root_index)); StoreMapNoWriteBarrier(result, map_root_index); + StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldOffset, + Int32Constant(String::kEmptyHashField), + MachineRepresentation::kWord32); StoreObjectFieldNoWriteBarrier(result, SlicedString::kLengthOffset, length, - MachineRepresentation::kTagged); - StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldSlot, - IntPtrConstant(String::kEmptyHashField), - MachineType::PointerRepresentation()); + MachineRepresentation::kWord32); StoreObjectFieldNoWriteBarrier(result, SlicedString::kParentOffset, parent, MachineRepresentation::kTagged); StoreObjectFieldNoWriteBarrier(result, SlicedString::kOffsetOffset, offset, @@ -3235,30 +3238,32 @@ TNode CodeStubAssembler::AllocateSlicedString( } TNode CodeStubAssembler::AllocateSlicedOneByteString( - TNode length, TNode parent, TNode offset) { - return AllocateSlicedString(Heap::kSlicedOneByteStringMapRootIndex, length, + TNode length, TNode parent, TNode offset) { + return AllocateSlicedString(RootIndex::kSlicedOneByteStringMap, length, parent, offset); } TNode CodeStubAssembler::AllocateSlicedTwoByteString( - TNode length, TNode parent, TNode offset) { - return AllocateSlicedString(Heap::kSlicedStringMapRootIndex, length, parent, + TNode length, TNode parent, TNode offset) { + return AllocateSlicedString(RootIndex::kSlicedStringMap, length, parent, offset); } -TNode CodeStubAssembler::AllocateConsString( - Heap::RootListIndex map_root_index, TNode length, TNode first, - TNode second, AllocationFlags flags) { - DCHECK(map_root_index == Heap::kConsOneByteStringMapRootIndex || - map_root_index == Heap::kConsStringMapRootIndex); +TNode CodeStubAssembler::AllocateConsString(RootIndex map_root_index, + TNode length, + TNode first, + TNode second, + AllocationFlags flags) { + DCHECK(map_root_index == RootIndex::kConsOneByteStringMap || + map_root_index == RootIndex::kConsStringMap); Node* result = Allocate(ConsString::kSize, flags); DCHECK(Heap::RootIsImmortalImmovable(map_root_index)); StoreMapNoWriteBarrier(result, map_root_index); StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length, - MachineRepresentation::kTagged); - StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldSlot, - IntPtrConstant(String::kEmptyHashField), - MachineType::PointerRepresentation()); + MachineRepresentation::kWord32); + StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldOffset, + Int32Constant(String::kEmptyHashField), + MachineRepresentation::kWord32); bool const new_space = !(flags & kPretenured); if (new_space) { StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset, first, @@ -3273,24 +3278,23 @@ TNode CodeStubAssembler::AllocateConsString( } TNode CodeStubAssembler::AllocateOneByteConsString( - TNode length, TNode first, TNode second, + TNode length, TNode first, TNode second, AllocationFlags flags) { - return AllocateConsString(Heap::kConsOneByteStringMapRootIndex, length, first, + return AllocateConsString(RootIndex::kConsOneByteStringMap, length, first, second, flags); } TNode CodeStubAssembler::AllocateTwoByteConsString( - TNode length, TNode first, TNode second, + TNode length, TNode first, TNode second, AllocationFlags flags) { - return AllocateConsString(Heap::kConsStringMapRootIndex, length, first, - second, flags); + return AllocateConsString(RootIndex::kConsStringMap, length, first, second, + flags); } -TNode CodeStubAssembler::NewConsString(Node* context, TNode length, +TNode CodeStubAssembler::NewConsString(TNode length, TNode left, TNode right, AllocationFlags flags) { - CSA_ASSERT(this, IsContext(context)); // Added string can be a cons string. Comment("Allocating ConsString"); Node* left_instance_type = LoadInstanceType(left); @@ -3367,8 +3371,8 @@ TNode CodeStubAssembler::AllocateNameDictionaryWithCapacity( UncheckedCast(AllocateInNewSpace(store_size)); Comment("Initialize NameDictionary"); // Initialize FixedArray fields. - DCHECK(Heap::RootIsImmortalImmovable(Heap::kNameDictionaryMapRootIndex)); - StoreMapNoWriteBarrier(result, Heap::kNameDictionaryMapRootIndex); + DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kNameDictionaryMap)); + StoreMapNoWriteBarrier(result, RootIndex::kNameDictionaryMap); StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset, SmiFromIntPtr(length)); // Initialized HashTable fields. @@ -3432,8 +3436,8 @@ Node* CodeStubAssembler::AllocateOrderedHashTable() { // Allocate the table and add the proper map. const ElementsKind elements_kind = HOLEY_ELEMENTS; TNode length_intptr = IntPtrConstant(kFixedArrayLength); - TNode fixed_array_map = CAST(LoadRoot( - static_cast(CollectionType::GetMapRootIndex()))); + TNode fixed_array_map = + CAST(LoadRoot(static_cast(CollectionType::GetMapRootIndex()))); TNode table = CAST(AllocateFixedArray(elements_kind, length_intptr, kAllowLargeObjectAllocation, fixed_array_map)); @@ -3506,8 +3510,8 @@ TNode CodeStubAssembler::AllocateSmallOrderedHashTable( UncheckedCast(TimesPointerSize(total_size_word_aligned)); // Allocate the table and add the proper map. - TNode small_ordered_hash_map = CAST(LoadRoot( - static_cast(CollectionType::GetMapRootIndex()))); + TNode small_ordered_hash_map = + CAST(LoadRoot(static_cast(CollectionType::GetMapRootIndex()))); TNode table_obj = CAST(AllocateInNewSpace(total_size_word_aligned)); StoreMapNoWriteBarrier(table_obj, small_ordered_hash_map); TNode table = UncheckedCast(table_obj); @@ -3555,7 +3559,7 @@ CodeStubAssembler::AllocateSmallOrderedHashTable( template void CodeStubAssembler::FindOrderedHashTableEntry( Node* table, Node* hash, - std::function key_compare, + const std::function& key_compare, Variable* entry_start_position, Label* entry_found, Label* not_found) { // Get the index of the bucket. Node* const number_of_buckets = SmiUntag(CAST(LoadFixedArrayElement( @@ -3622,11 +3626,11 @@ void CodeStubAssembler::FindOrderedHashTableEntry( template void CodeStubAssembler::FindOrderedHashTableEntry( Node* table, Node* hash, - std::function key_compare, + const std::function& key_compare, Variable* entry_start_position, Label* entry_found, Label* not_found); template void CodeStubAssembler::FindOrderedHashTableEntry( Node* table, Node* hash, - std::function key_compare, + const std::function& key_compare, Variable* entry_start_position, Label* entry_found, Label* not_found); Node* CodeStubAssembler::AllocateStruct(Node* map, AllocationFlags flags) { @@ -3677,7 +3681,7 @@ void CodeStubAssembler::InitializeJSObjectFromMap( if (properties == nullptr) { CSA_ASSERT(this, Word32BinaryNot(IsDictionaryMap((map)))); StoreObjectFieldRoot(object, JSObject::kPropertiesOrHashOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); } else { CSA_ASSERT(this, Word32Or(Word32Or(IsPropertyArray(properties), IsNameDictionary(properties)), @@ -3687,7 +3691,7 @@ void CodeStubAssembler::InitializeJSObjectFromMap( } if (elements == nullptr) { StoreObjectFieldRoot(object, JSObject::kElementsOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); } else { CSA_ASSERT(this, IsFixedArray(elements)); StoreObjectFieldNoWriteBarrier(object, JSObject::kElementsOffset, elements); @@ -3706,7 +3710,7 @@ void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking( CSA_ASSERT( this, IsClearWord32(LoadMapBitField3(map))); InitializeFieldsWithRoot(object, IntPtrConstant(start_offset), instance_size, - Heap::kUndefinedValueRootIndex); + RootIndex::kUndefinedValue); } void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking( @@ -3745,11 +3749,11 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking( Comment("iInitialize filler fields"); InitializeFieldsWithRoot(object, used_size, instance_size, - Heap::kOnePointerFillerMapRootIndex); + RootIndex::kOnePointerFillerMap); Comment("Initialize undefined fields"); InitializeFieldsWithRoot(object, IntPtrConstant(start_offset), used_size, - Heap::kUndefinedValueRootIndex); + RootIndex::kUndefinedValue); STATIC_ASSERT(Map::kNoSlackTracking == 0); GotoIf(IsClearWord32(new_bit_field3), @@ -3826,9 +3830,9 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements( StoreObjectFieldNoWriteBarrier(array, JSObject::kElementsOffset, elements); // Setup elements object. STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize); - Heap::RootListIndex elements_map_index = - IsDoubleElementsKind(kind) ? Heap::kFixedDoubleArrayMapRootIndex - : Heap::kFixedArrayMapRootIndex; + RootIndex elements_map_index = IsDoubleElementsKind(kind) + ? RootIndex::kFixedDoubleArrayMap + : RootIndex::kFixedArrayMap; DCHECK(Heap::RootIsImmortalImmovable(elements_map_index)); StoreMapNoWriteBarrier(elements, elements_map_index); TNode capacity_smi = ParameterToTagged(capacity, capacity_mode); @@ -3854,7 +3858,7 @@ Node* CodeStubAssembler::AllocateUninitializedJSArray(Node* array_map, StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length); StoreObjectFieldRoot(array, JSArray::kPropertiesOrHashOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); if (allocation_site != nullptr) { InitializeAllocationMemento(array, IntPtrConstant(JSArray::kSize), @@ -3879,7 +3883,7 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map, array = AllocateUninitializedJSArrayWithoutElements(array_map, length, allocation_site); StoreObjectFieldRoot(array, JSArray::kElementsOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); } else if (TryGetIntPtrOrSmiConstantValue(capacity, &capacity_as_constant, capacity_mode) && capacity_as_constant > 0) { @@ -3889,7 +3893,7 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map, // Fill in the elements with holes. FillFixedArrayWithValue(kind, elements, IntPtrOrSmiConstant(0, capacity_mode), capacity, - Heap::kTheHoleValueRootIndex, capacity_mode); + RootIndex::kTheHoleValue, capacity_mode); } else { Label out(this), empty(this), nonempty(this); VARIABLE(var_array, MachineRepresentation::kTagged); @@ -3904,7 +3908,7 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map, var_array.Bind(AllocateUninitializedJSArrayWithoutElements( array_map, length, allocation_site)); StoreObjectFieldRoot(var_array.value(), JSArray::kElementsOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); Goto(&out); } @@ -3918,7 +3922,7 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map, // Fill in the elements with holes. FillFixedArrayWithValue(kind, elements, IntPtrOrSmiConstant(0, capacity_mode), capacity, - Heap::kTheHoleValueRootIndex, capacity_mode); + RootIndex::kTheHoleValue, capacity_mode); Goto(&out); } @@ -3953,6 +3957,10 @@ Node* CodeStubAssembler::ExtractFastJSArray(Node* context, Node* array, Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array, ParameterMode mode, Node* allocation_site) { + // TODO(dhai): we should be able to assert IsFastJSArray(array) here, but this + // function is also used to copy boilerplates even when the no-elements + // protector is invalid. This function should be renamed to reflect its uses. + CSA_ASSERT(this, IsJSArray(array)); Node* original_array_map = LoadMap(array); Node* elements_kind = LoadMapElementsKind(original_array_map); @@ -3997,9 +4005,9 @@ TNode CodeStubAssembler::AllocateFixedArray( StoreMap(array, fixed_array_map); } } else { - Heap::RootListIndex map_index = IsDoubleElementsKind(kind) - ? Heap::kFixedDoubleArrayMapRootIndex - : Heap::kFixedArrayMapRootIndex; + RootIndex map_index = IsDoubleElementsKind(kind) + ? RootIndex::kFixedDoubleArrayMap + : RootIndex::kFixedArrayMap; DCHECK(Heap::RootIsImmortalImmovable(map_index)); StoreMapNoWriteBarrier(array, map_index); } @@ -4008,62 +4016,58 @@ TNode CodeStubAssembler::AllocateFixedArray( return UncheckedCast(array); } -TNode CodeStubAssembler::ExtractFixedArray( - Node* fixed_array, Node* first, Node* count, Node* capacity, +TNode CodeStubAssembler::ExtractToFixedArray( + Node* source, Node* first, Node* count, Node* capacity, Node* source_map, + ElementsKind from_kind, AllocationFlags allocation_flags, ExtractFixedArrayFlags extract_flags, ParameterMode parameter_mode) { + DCHECK_NE(first, nullptr); + DCHECK_NE(count, nullptr); + DCHECK_NE(capacity, nullptr); + DCHECK(extract_flags & ExtractFixedArrayFlag::kFixedArrays); + CSA_ASSERT(this, + WordNotEqual(IntPtrOrSmiConstant(0, parameter_mode), capacity)); + CSA_ASSERT(this, WordEqual(source_map, LoadMap(source))); + VARIABLE(var_result, MachineRepresentation::kTagged); - VARIABLE(var_fixed_array_map, MachineRepresentation::kTagged); - const AllocationFlags flags = - (extract_flags & ExtractFixedArrayFlag::kNewSpaceAllocationOnly) - ? CodeStubAssembler::kNone - : CodeStubAssembler::kAllowLargeObjectAllocation; - if (first == nullptr) { - first = IntPtrOrSmiConstant(0, parameter_mode); - } - if (count == nullptr) { - count = - IntPtrOrSmiSub(TaggedToParameter(LoadFixedArrayBaseLength(fixed_array), - parameter_mode), - first, parameter_mode); + VARIABLE(var_target_map, MachineRepresentation::kTagged, source_map); - CSA_ASSERT( - this, IntPtrOrSmiLessThanOrEqual(IntPtrOrSmiConstant(0, parameter_mode), - count, parameter_mode)); - } - if (capacity == nullptr) { - capacity = count; - } else { - CSA_ASSERT(this, Word32BinaryNot(IntPtrOrSmiGreaterThan( - IntPtrOrSmiAdd(first, count, parameter_mode), capacity, - parameter_mode))); - } + Label done(this, {&var_result}), is_cow(this), + new_space_check(this, {&var_target_map}); - Label if_fixed_double_array(this), empty(this), cow(this), - done(this, {&var_result, &var_fixed_array_map}); - var_fixed_array_map.Bind(LoadMap(fixed_array)); - GotoIf(WordEqual(IntPtrOrSmiConstant(0, parameter_mode), capacity), &empty); + // If source_map is either FixedDoubleArrayMap, or FixedCOWArrayMap but + // we can't just use COW, use FixedArrayMap as the target map. Otherwise, use + // source_map as the target map. + if (IsDoubleElementsKind(from_kind)) { + CSA_ASSERT(this, IsFixedDoubleArrayMap(source_map)); + var_target_map.Bind(LoadRoot(RootIndex::kFixedArrayMap)); + Goto(&new_space_check); + } else { + CSA_ASSERT(this, Word32BinaryNot(IsFixedDoubleArrayMap(source_map))); + Branch(WordEqual(var_target_map.value(), + LoadRoot(RootIndex::kFixedCOWArrayMap)), + &is_cow, &new_space_check); - if (extract_flags & ExtractFixedArrayFlag::kFixedDoubleArrays) { - if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) { - GotoIf(IsFixedDoubleArrayMap(var_fixed_array_map.value()), - &if_fixed_double_array); - } else { - CSA_ASSERT(this, IsFixedDoubleArrayMap(var_fixed_array_map.value())); + BIND(&is_cow); + { + // |source| is a COW array, so we don't actually need to allocate a new + // array unless: + // 1) |extract_flags| forces us to, or + // 2) we're asked to extract only part of the |source| (|first| != 0). + if (extract_flags & ExtractFixedArrayFlag::kDontCopyCOW) { + Branch(WordNotEqual(IntPtrOrSmiConstant(0, parameter_mode), first), + &new_space_check, [&] { + var_result.Bind(source); + Goto(&done); + }); + } else { + var_target_map.Bind(LoadRoot(RootIndex::kFixedArrayMap)); + Goto(&new_space_check); + } } - } else { - DCHECK(extract_flags & ExtractFixedArrayFlag::kFixedArrays); - CSA_ASSERT(this, Word32BinaryNot( - IsFixedDoubleArrayMap(var_fixed_array_map.value()))); } - if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) { - Label new_space_check(this, {&var_fixed_array_map}); - Branch(WordEqual(var_fixed_array_map.value(), - LoadRoot(Heap::kFixedCOWArrayMapRootIndex)), - &cow, &new_space_check); - - BIND(&new_space_check); - + BIND(&new_space_check); + { bool handle_old_space = true; if (extract_flags & ExtractFixedArrayFlag::kNewSpaceAllocationOnly) { handle_old_space = false; @@ -4084,59 +4088,102 @@ TNode CodeStubAssembler::ExtractFixedArray( capacity, &old_space, FixedArray::kHeaderSize, parameter_mode); } - Comment("Copy PACKED_ELEMENTS new space"); - - ElementsKind kind = PACKED_ELEMENTS; + Comment("Copy FixedArray new space"); + // We use PACKED_ELEMENTS to tell AllocateFixedArray and + // CopyFixedArrayElements that we want a FixedArray. + ElementsKind to_kind = PACKED_ELEMENTS; Node* to_elements = - AllocateFixedArray(kind, capacity, parameter_mode, - AllocationFlag::kNone, var_fixed_array_map.value()); + AllocateFixedArray(to_kind, capacity, parameter_mode, + AllocationFlag::kNone, var_target_map.value()); var_result.Bind(to_elements); - CopyFixedArrayElements(kind, fixed_array, kind, to_elements, first, count, - capacity, SKIP_WRITE_BARRIER, parameter_mode); + CopyFixedArrayElements(from_kind, source, to_kind, to_elements, first, + count, capacity, SKIP_WRITE_BARRIER, parameter_mode); Goto(&done); if (handle_old_space) { BIND(&old_space); { - Comment("Copy PACKED_ELEMENTS old space"); + Comment("Copy FixedArray old space"); - to_elements = AllocateFixedArray(kind, capacity, parameter_mode, flags, - var_fixed_array_map.value()); + to_elements = + AllocateFixedArray(to_kind, capacity, parameter_mode, + allocation_flags, var_target_map.value()); var_result.Bind(to_elements); - CopyFixedArrayElements(kind, fixed_array, kind, to_elements, first, + CopyFixedArrayElements(from_kind, source, to_kind, to_elements, first, count, capacity, UPDATE_WRITE_BARRIER, parameter_mode); Goto(&done); } } + } - BIND(&cow); - { - if (extract_flags & ExtractFixedArrayFlag::kDontCopyCOW) { - Branch(WordNotEqual(IntPtrOrSmiConstant(0, parameter_mode), first), - &new_space_check, [&] { - var_result.Bind(fixed_array); - Goto(&done); - }); - } else { - var_fixed_array_map.Bind(LoadRoot(Heap::kFixedArrayMapRootIndex)); - Goto(&new_space_check); - } - } + BIND(&done); + return UncheckedCast(var_result.value()); +} + +TNode CodeStubAssembler::ExtractFixedArray( + Node* source, Node* first, Node* count, Node* capacity, + ExtractFixedArrayFlags extract_flags, ParameterMode parameter_mode) { + DCHECK(extract_flags & ExtractFixedArrayFlag::kFixedArrays || + extract_flags & ExtractFixedArrayFlag::kFixedDoubleArrays); + VARIABLE(var_result, MachineRepresentation::kTagged); + const AllocationFlags allocation_flags = + (extract_flags & ExtractFixedArrayFlag::kNewSpaceAllocationOnly) + ? CodeStubAssembler::kNone + : CodeStubAssembler::kAllowLargeObjectAllocation; + if (first == nullptr) { + first = IntPtrOrSmiConstant(0, parameter_mode); + } + if (count == nullptr) { + count = IntPtrOrSmiSub( + TaggedToParameter(LoadFixedArrayBaseLength(source), parameter_mode), + first, parameter_mode); + + CSA_ASSERT( + this, IntPtrOrSmiLessThanOrEqual(IntPtrOrSmiConstant(0, parameter_mode), + count, parameter_mode)); + } + if (capacity == nullptr) { + capacity = count; } else { - Goto(&if_fixed_double_array); + CSA_ASSERT(this, Word32BinaryNot(IntPtrOrSmiGreaterThan( + IntPtrOrSmiAdd(first, count, parameter_mode), capacity, + parameter_mode))); + } + + Label if_fixed_double_array(this), empty(this), done(this, {&var_result}); + Node* source_map = LoadMap(source); + GotoIf(WordEqual(IntPtrOrSmiConstant(0, parameter_mode), capacity), &empty); + + if (extract_flags & ExtractFixedArrayFlag::kFixedDoubleArrays) { + if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) { + GotoIf(IsFixedDoubleArrayMap(source_map), &if_fixed_double_array); + } else { + CSA_ASSERT(this, IsFixedDoubleArrayMap(source_map)); + } + } + + if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) { + // Here we can only get source as FixedArray, never FixedDoubleArray. + // PACKED_ELEMENTS is used to signify that the source is a FixedArray. + Node* to_elements = ExtractToFixedArray( + source, first, count, capacity, source_map, PACKED_ELEMENTS, + allocation_flags, extract_flags, parameter_mode); + var_result.Bind(to_elements); + Goto(&done); } if (extract_flags & ExtractFixedArrayFlag::kFixedDoubleArrays) { BIND(&if_fixed_double_array); - Comment("Copy PACKED_DOUBLE_ELEMENTS"); - + Comment("Copy FixedDoubleArray"); + // We use PACKED_DOUBLE_ELEMENTS to signify that the source is + // FixedDoubleArray. That it is PACKED or HOLEY does not matter. ElementsKind kind = PACKED_DOUBLE_ELEMENTS; Node* to_elements = AllocateFixedArray(kind, capacity, parameter_mode, - flags, var_fixed_array_map.value()); + allocation_flags, source_map); var_result.Bind(to_elements); - CopyFixedArrayElements(kind, fixed_array, kind, to_elements, first, count, + CopyFixedArrayElements(kind, source, kind, to_elements, first, count, capacity, SKIP_WRITE_BARRIER, parameter_mode); Goto(&done); @@ -4179,7 +4226,7 @@ Node* CodeStubAssembler::AllocatePropertyArray(Node* capacity_node, Node* total_size = GetPropertyArrayAllocationSize(capacity_node, mode); Node* array = Allocate(total_size, flags); - Heap::RootListIndex map_index = Heap::kPropertyArrayMapRootIndex; + RootIndex map_index = RootIndex::kPropertyArrayMap; DCHECK(Heap::RootIsImmortalImmovable(map_index)); StoreMapNoWriteBarrier(array, map_index); InitializePropertyArrayLength(array, capacity_node, mode); @@ -4204,14 +4251,15 @@ void CodeStubAssembler::FillPropertyArrayWithUndefined(Node* array, mode); } -void CodeStubAssembler::FillFixedArrayWithValue( - ElementsKind kind, Node* array, Node* from_node, Node* to_node, - Heap::RootListIndex value_root_index, ParameterMode mode) { +void CodeStubAssembler::FillFixedArrayWithValue(ElementsKind kind, Node* array, + Node* from_node, Node* to_node, + RootIndex value_root_index, + ParameterMode mode) { CSA_SLOW_ASSERT(this, MatchesParameterMode(from_node, mode)); CSA_SLOW_ASSERT(this, MatchesParameterMode(to_node, mode)); CSA_SLOW_ASSERT(this, IsFixedArrayWithKind(array, kind)); - DCHECK(value_root_index == Heap::kTheHoleValueRootIndex || - value_root_index == Heap::kUndefinedValueRootIndex); + DCHECK(value_root_index == RootIndex::kTheHoleValue || + value_root_index == RootIndex::kUndefinedValue); // Determine the value to initialize the {array} based // on the {value_root_index} and the elements {kind}. @@ -4234,6 +4282,33 @@ void CodeStubAssembler::FillFixedArrayWithValue( mode); } +void CodeStubAssembler::StoreFixedDoubleArrayHole( + TNode array, Node* index, ParameterMode parameter_mode) { + CSA_SLOW_ASSERT(this, MatchesParameterMode(index, parameter_mode)); + Node* offset = + ElementOffsetFromIndex(index, PACKED_DOUBLE_ELEMENTS, parameter_mode, + FixedArray::kHeaderSize - kHeapObjectTag); + CSA_ASSERT(this, IsOffsetInBounds( + offset, LoadAndUntagFixedArrayBaseLength(array), + FixedDoubleArray::kHeaderSize, PACKED_DOUBLE_ELEMENTS)); + Node* double_hole = + Is64() ? ReinterpretCast(Int64Constant(kHoleNanInt64)) + : ReinterpretCast(Int32Constant(kHoleNanLower32)); + // TODO(danno): When we have a Float32/Float64 wrapper class that + // preserves double bits during manipulation, remove this code/change + // this to an indexed Float64 store. + if (Is64()) { + StoreNoWriteBarrier(MachineRepresentation::kWord64, array, offset, + double_hole); + } else { + StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset, + double_hole); + StoreNoWriteBarrier(MachineRepresentation::kWord32, array, + IntPtrAdd(offset, IntPtrConstant(kPointerSize)), + double_hole); + } +} + void CodeStubAssembler::FillFixedArrayWithSmiZero(TNode array, TNode length) { CSA_ASSERT(this, WordEqual(length, LoadAndUntagFixedArrayBaseLength(array))); @@ -4312,10 +4387,10 @@ void CodeStubAssembler::CopyFixedArrayElements( // pre-initialized with holes to make sure that it's always in a // consistent state. FillFixedArrayWithValue(to_kind, to_array, IntPtrOrSmiConstant(0, mode), - capacity, Heap::kTheHoleValueRootIndex, mode); + capacity, RootIndex::kTheHoleValue, mode); } else if (element_count != capacity) { FillFixedArrayWithValue(to_kind, to_array, element_count, capacity, - Heap::kTheHoleValueRootIndex, mode); + RootIndex::kTheHoleValue, mode); } Node* first_from_element_offset = @@ -4427,9 +4502,8 @@ TNode CodeStubAssembler::HeapObjectToFixedArray( TNode base, Label* cast_fail) { Label fixed_array(this); TNode map = LoadMap(base); - GotoIf(WordEqual(map, LoadRoot(Heap::kFixedArrayMapRootIndex)), &fixed_array); - GotoIf(WordNotEqual(map, LoadRoot(Heap::kFixedCOWArrayMapRootIndex)), - cast_fail); + GotoIf(WordEqual(map, LoadRoot(RootIndex::kFixedArrayMap)), &fixed_array); + GotoIf(WordNotEqual(map, LoadRoot(RootIndex::kFixedCOWArrayMap)), cast_fail); Goto(&fixed_array); BIND(&fixed_array); return UncheckedCast(base); @@ -4637,7 +4711,7 @@ void CodeStubAssembler::InitializeAllocationMemento(Node* base, Node* allocation_site) { Comment("[Initialize AllocationMemento"); Node* memento = InnerAllocate(base, base_allocation_size); - StoreMapNoWriteBarrier(memento, Heap::kAllocationMementoMapRootIndex); + StoreMapNoWriteBarrier(memento, RootIndex::kAllocationMementoMap); StoreObjectFieldNoWriteBarrier( memento, AllocationMemento::kAllocationSiteOffset, allocation_site); if (FLAG_allocation_site_pretenuring) { @@ -4925,28 +4999,13 @@ TNode CodeStubAssembler::ChangeUint32ToTagged( if_join(this); TVARIABLE(Number, var_result); // If {value} > 2^31 - 1, we need to store it in a HeapNumber. - Branch(Uint32LessThan(Int32Constant(Smi::kMaxValue), value), &if_overflow, + Branch(Uint32LessThan(Uint32Constant(Smi::kMaxValue), value), &if_overflow, &if_not_overflow); BIND(&if_not_overflow); { - if (SmiValuesAre32Bits()) { - var_result = - SmiTag(ReinterpretCast(ChangeUint32ToUint64(value))); - } else { - DCHECK(SmiValuesAre31Bits()); - // If tagging {value} results in an overflow, we need to use a HeapNumber - // to represent it. - // TODO(tebbi): This overflow can never happen. - TNode> pair = Int32AddWithOverflow( - UncheckedCast(value), UncheckedCast(value)); - TNode overflow = Projection<1>(pair); - GotoIf(overflow, &if_overflow); - - TNode almost_tagged_value = - ChangeInt32ToIntPtr(Projection<0>(pair)); - var_result = BitcastWordToTaggedSigned(almost_tagged_value); - } + // The {value} is definitely in valid Smi range. + var_result = SmiTag(Signed(ChangeUint32ToWord(value))); } Goto(&if_join); @@ -4961,6 +5020,32 @@ TNode CodeStubAssembler::ChangeUint32ToTagged( return var_result.value(); } +TNode CodeStubAssembler::ChangeUintPtrToTagged(TNode value) { + Label if_overflow(this, Label::kDeferred), if_not_overflow(this), + if_join(this); + TVARIABLE(Number, var_result); + // If {value} > 2^31 - 1, we need to store it in a HeapNumber. + Branch(UintPtrLessThan(UintPtrConstant(Smi::kMaxValue), value), &if_overflow, + &if_not_overflow); + + BIND(&if_not_overflow); + { + // The {value} is definitely in valid Smi range. + var_result = SmiTag(Signed(value)); + } + Goto(&if_join); + + BIND(&if_overflow); + { + TNode float64_value = ChangeUintPtrToFloat64(value); + var_result = AllocateHeapNumberWithValue(float64_value); + } + Goto(&if_join); + + BIND(&if_join); + return var_result.value(); +} + TNode CodeStubAssembler::ToThisString(Node* context, Node* value, char const* method_name) { VARIABLE(var_value, MachineRepresentation::kTagged, value); @@ -5263,6 +5348,13 @@ TNode CodeStubAssembler::IsExtensibleMap(SloppyTNode map) { return IsSetWord32(LoadMapBitField2(map)); } +TNode CodeStubAssembler::IsExtensibleNonPrototypeMap(TNode map) { + int kMask = Map::IsExtensibleBit::kMask | Map::IsPrototypeMapBit::kMask; + int kExpected = Map::IsExtensibleBit::kMask; + return Word32Equal(Word32And(LoadMapBitField2(map), Int32Constant(kMask)), + Int32Constant(kExpected)); +} + TNode CodeStubAssembler::IsCallableMap(SloppyTNode map) { CSA_ASSERT(this, IsMap(map)); return IsSetWord32(LoadMapBitField(map)); @@ -5280,42 +5372,42 @@ TNode CodeStubAssembler::IsUndetectableMap(SloppyTNode map) { TNode CodeStubAssembler::IsNoElementsProtectorCellInvalid() { Node* invalid = SmiConstant(Isolate::kProtectorInvalid); - Node* cell = LoadRoot(Heap::kNoElementsProtectorRootIndex); + Node* cell = LoadRoot(RootIndex::kNoElementsProtector); Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); return WordEqual(cell_value, invalid); } TNode CodeStubAssembler::IsPromiseResolveProtectorCellInvalid() { Node* invalid = SmiConstant(Isolate::kProtectorInvalid); - Node* cell = LoadRoot(Heap::kPromiseResolveProtectorRootIndex); + Node* cell = LoadRoot(RootIndex::kPromiseResolveProtector); Node* cell_value = LoadObjectField(cell, Cell::kValueOffset); return WordEqual(cell_value, invalid); } TNode CodeStubAssembler::IsPromiseThenProtectorCellInvalid() { Node* invalid = SmiConstant(Isolate::kProtectorInvalid); - Node* cell = LoadRoot(Heap::kPromiseThenProtectorRootIndex); + Node* cell = LoadRoot(RootIndex::kPromiseThenProtector); Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); return WordEqual(cell_value, invalid); } TNode CodeStubAssembler::IsArraySpeciesProtectorCellInvalid() { Node* invalid = SmiConstant(Isolate::kProtectorInvalid); - Node* cell = LoadRoot(Heap::kArraySpeciesProtectorRootIndex); + Node* cell = LoadRoot(RootIndex::kArraySpeciesProtector); Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); return WordEqual(cell_value, invalid); } TNode CodeStubAssembler::IsTypedArraySpeciesProtectorCellInvalid() { Node* invalid = SmiConstant(Isolate::kProtectorInvalid); - Node* cell = LoadRoot(Heap::kTypedArraySpeciesProtectorRootIndex); + Node* cell = LoadRoot(RootIndex::kTypedArraySpeciesProtector); Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); return WordEqual(cell_value, invalid); } TNode CodeStubAssembler::IsPromiseSpeciesProtectorCellInvalid() { Node* invalid = SmiConstant(Isolate::kProtectorInvalid); - Node* cell = LoadRoot(Heap::kPromiseSpeciesProtectorRootIndex); + Node* cell = LoadRoot(RootIndex::kPromiseSpeciesProtector); Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); return WordEqual(cell_value, invalid); } @@ -5354,7 +5446,7 @@ TNode CodeStubAssembler::IsCallable(SloppyTNode object) { } TNode CodeStubAssembler::IsCell(SloppyTNode object) { - return WordEqual(LoadMap(object), LoadRoot(Heap::kCellMapRootIndex)); + return WordEqual(LoadMap(object), LoadRoot(RootIndex::kCellMap)); } TNode CodeStubAssembler::IsCode(SloppyTNode object) { @@ -5436,11 +5528,11 @@ TNode CodeStubAssembler::IsExternalStringInstanceType( Int32Constant(kExternalStringTag)); } -TNode CodeStubAssembler::IsShortExternalStringInstanceType( +TNode CodeStubAssembler::IsUncachedExternalStringInstanceType( SloppyTNode instance_type) { CSA_ASSERT(this, IsStringInstanceType(instance_type)); - STATIC_ASSERT(kShortExternalStringTag != 0); - return IsSetWord32(instance_type, kShortExternalStringMask); + STATIC_ASSERT(kUncachedExternalStringTag != 0); + return IsSetWord32(instance_type, kUncachedExternalStringMask); } TNode CodeStubAssembler::IsJSReceiverInstanceType( @@ -5653,6 +5745,16 @@ TNode CodeStubAssembler::IsHeapNumber(SloppyTNode object) { return IsHeapNumberMap(LoadMap(object)); } +TNode CodeStubAssembler::IsHeapNumberInstanceType( + SloppyTNode instance_type) { + return InstanceTypeEqual(instance_type, HEAP_NUMBER_TYPE); +} + +TNode CodeStubAssembler::IsOddballInstanceType( + SloppyTNode instance_type) { + return InstanceTypeEqual(instance_type, ODDBALL_TYPE); +} + TNode CodeStubAssembler::IsMutableHeapNumber( SloppyTNode object) { return IsMutableHeapNumberMap(LoadMap(object)); @@ -5668,8 +5770,12 @@ TNode CodeStubAssembler::IsFeedbackVector( } TNode CodeStubAssembler::IsName(SloppyTNode object) { - return Int32LessThanOrEqual(LoadInstanceType(object), - Int32Constant(LAST_NAME_TYPE)); + return IsNameInstanceType(LoadInstanceType(object)); +} + +TNode CodeStubAssembler::IsNameInstanceType( + SloppyTNode instance_type) { + return Int32LessThanOrEqual(instance_type, Int32Constant(LAST_NAME_TYPE)); } TNode CodeStubAssembler::IsString(SloppyTNode object) { @@ -5702,20 +5808,19 @@ TNode CodeStubAssembler::IsPrimitiveInstanceType( TNode CodeStubAssembler::IsPrivateSymbol( SloppyTNode object) { - return Select( - IsSymbol(object), - [=] { - TNode symbol = CAST(object); - TNode flags = - SmiToInt32(LoadObjectField(symbol, Symbol::kFlagsOffset)); - return IsSetWord32(flags, 1 << Symbol::kPrivateBit); - }, - [=] { return Int32FalseConstant(); }); + return Select(IsSymbol(object), + [=] { + TNode symbol = CAST(object); + TNode flags = LoadObjectField( + symbol, Symbol::kFlagsOffset); + return IsSetWord32(flags); + }, + [=] { return Int32FalseConstant(); }); } TNode CodeStubAssembler::IsNativeContext( SloppyTNode object) { - return WordEqual(LoadMap(object), LoadRoot(Heap::kNativeContextMapRootIndex)); + return WordEqual(LoadMap(object), LoadRoot(RootIndex::kNativeContextMap)); } TNode CodeStubAssembler::IsFixedDoubleArray( @@ -6002,7 +6107,7 @@ TNode CodeStubAssembler::StringFromSingleCharCode(TNode code) { { // Load the isolate wide single character string cache. TNode cache = - CAST(LoadRoot(Heap::kSingleCharacterStringCacheRootIndex)); + CAST(LoadRoot(RootIndex::kSingleCharacterStringCache)); TNode code_index = Signed(ChangeUint32ToWord(code)); // Check if we have an entry for the {code} in the single character string @@ -6055,7 +6160,7 @@ TNode CodeStubAssembler::StringFromSingleCharCode(TNode code) { // 0 <= |from_index| <= |from_index| + |character_count| < from_string.length. TNode CodeStubAssembler::AllocAndCopyStringCharacters( Node* from, Node* from_instance_type, TNode from_index, - TNode character_count) { + TNode character_count) { Label end(this), one_byte_sequential(this), two_byte_sequential(this); TVARIABLE(String, var_result); @@ -6065,10 +6170,10 @@ TNode CodeStubAssembler::AllocAndCopyStringCharacters( // The subject string is a sequential one-byte string. BIND(&one_byte_sequential); { - TNode result = - AllocateSeqOneByteString(NoContextConstant(), character_count); + TNode result = AllocateSeqOneByteString( + NoContextConstant(), Unsigned(TruncateIntPtrToInt32(character_count))); CopyStringCharacters(from, result, from_index, IntPtrConstant(0), - SmiUntag(character_count), String::ONE_BYTE_ENCODING, + character_count, String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING); var_result = result; Goto(&end); @@ -6077,10 +6182,10 @@ TNode CodeStubAssembler::AllocAndCopyStringCharacters( // The subject string is a sequential two-byte string. BIND(&two_byte_sequential); { - TNode result = - AllocateSeqTwoByteString(NoContextConstant(), character_count); + TNode result = AllocateSeqTwoByteString( + NoContextConstant(), Unsigned(TruncateIntPtrToInt32(character_count))); CopyStringCharacters(from, result, from_index, IntPtrConstant(0), - SmiUntag(character_count), String::TWO_BYTE_ENCODING, + character_count, String::TWO_BYTE_ENCODING, String::TWO_BYTE_ENCODING); var_result = result; Goto(&end); @@ -6143,15 +6248,17 @@ TNode CodeStubAssembler::SubString(TNode string, BIND(&one_byte_slice); { - var_result = AllocateSlicedOneByteString(SmiTag(substr_length), - direct_string, SmiTag(offset)); + var_result = AllocateSlicedOneByteString( + Unsigned(TruncateIntPtrToInt32(substr_length)), direct_string, + SmiTag(offset)); Goto(&end); } BIND(&two_byte_slice); { - var_result = AllocateSlicedTwoByteString(SmiTag(substr_length), - direct_string, SmiTag(offset)); + var_result = AllocateSlicedTwoByteString( + Unsigned(TruncateIntPtrToInt32(substr_length)), direct_string, + SmiTag(offset)); Goto(&end); } @@ -6163,7 +6270,7 @@ TNode CodeStubAssembler::SubString(TNode string, GotoIf(to_direct.is_external(), &external_string); var_result = AllocAndCopyStringCharacters(direct_string, instance_type, - offset, SmiTag(substr_length)); + offset, substr_length); Counters* counters = isolate()->counters(); IncrementCounter(counters->sub_string_native(), 1); @@ -6177,7 +6284,7 @@ TNode CodeStubAssembler::SubString(TNode string, Node* const fake_sequential_string = to_direct.PointerToString(&runtime); var_result = AllocAndCopyStringCharacters( - fake_sequential_string, instance_type, offset, SmiTag(substr_length)); + fake_sequential_string, instance_type, offset, substr_length); Counters* counters = isolate()->counters(); IncrementCounter(counters->sub_string_native(), 1); @@ -6348,7 +6455,7 @@ TNode ToDirectStringAssembler::TryToSequential( BIND(&if_isexternal); { - GotoIf(IsShortExternalStringInstanceType(var_instance_type_.value()), + GotoIf(IsUncachedExternalStringInstanceType(var_instance_type_.value()), if_bailout); TNode string = CAST(var_string_.value()); @@ -6458,36 +6565,37 @@ TNode CodeStubAssembler::StringAdd(Node* context, TNode left, done(this, &result), done_native(this, &result); Counters* counters = isolate()->counters(); - TNode left_length = LoadStringLengthAsSmi(left); - GotoIf(SmiNotEqual(SmiConstant(0), left_length), &check_right); + TNode left_length = LoadStringLengthAsWord32(left); + GotoIfNot(Word32Equal(left_length, Uint32Constant(0)), &check_right); result = right; Goto(&done_native); BIND(&check_right); - TNode right_length = LoadStringLengthAsSmi(right); - GotoIf(SmiNotEqual(SmiConstant(0), right_length), &cons); + TNode right_length = LoadStringLengthAsWord32(right); + GotoIfNot(Word32Equal(right_length, Uint32Constant(0)), &cons); result = left; Goto(&done_native); BIND(&cons); { - TNode new_length = SmiAdd(left_length, right_length); + TNode new_length = Uint32Add(left_length, right_length); // If new length is greater than String::kMaxLength, goto runtime to // throw. Note: we also need to invalidate the string length protector, so // can't just throw here directly. - GotoIf(SmiAbove(new_length, SmiConstant(String::kMaxLength)), &runtime); + GotoIf(Uint32GreaterThan(new_length, Uint32Constant(String::kMaxLength)), + &runtime); TVARIABLE(String, var_left, left); TVARIABLE(String, var_right, right); Variable* input_vars[2] = {&var_left, &var_right}; Label non_cons(this, 2, input_vars); Label slow(this, Label::kDeferred); - GotoIf(SmiLessThan(new_length, SmiConstant(ConsString::kMinLength)), + GotoIf(Uint32LessThan(new_length, Uint32Constant(ConsString::kMinLength)), &non_cons); - result = NewConsString(context, new_length, var_left.value(), - var_right.value(), flags); + result = + NewConsString(new_length, var_left.value(), var_right.value(), flags); Goto(&done_native); BIND(&non_cons); @@ -6506,8 +6614,8 @@ TNode CodeStubAssembler::StringAdd(Node* context, TNode left, GotoIf(IsSetWord32(xored_instance_types, kStringEncodingMask), &runtime); GotoIf(IsSetWord32(ored_instance_types, kStringRepresentationMask), &slow); - TNode word_left_length = SmiUntag(left_length); - TNode word_right_length = SmiUntag(right_length); + TNode word_left_length = Signed(ChangeUint32ToWord(left_length)); + TNode word_right_length = Signed(ChangeUint32ToWord(right_length)); Label two_byte(this); GotoIf(Word32Equal(Word32And(ored_instance_types, @@ -6647,7 +6755,7 @@ TNode CodeStubAssembler::NumberToString(TNode input) { done(this, &result); // Load the number string cache. - Node* number_string_cache = LoadRoot(Heap::kNumberStringCacheRootIndex); + Node* number_string_cache = LoadRoot(RootIndex::kNumberStringCache); // Make the hash mask from the length of the number string cache. It // contains two elements (number and string) for each cache entry. @@ -6722,52 +6830,6 @@ TNode CodeStubAssembler::NumberToString(TNode input) { return result.value(); } -TNode CodeStubAssembler::ToName(SloppyTNode context, - SloppyTNode value) { - Label end(this); - TVARIABLE(Name, var_result); - - Label is_number(this); - GotoIf(TaggedIsSmi(value), &is_number); - - Label not_name(this); - TNode value_instance_type = LoadInstanceType(CAST(value)); - STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE); - GotoIf(Int32GreaterThan(value_instance_type, Int32Constant(LAST_NAME_TYPE)), - ¬_name); - - var_result = CAST(value); - Goto(&end); - - BIND(&is_number); - { - var_result = CAST(CallBuiltin(Builtins::kNumberToString, context, value)); - Goto(&end); - } - - BIND(¬_name); - { - GotoIf(InstanceTypeEqual(value_instance_type, HEAP_NUMBER_TYPE), - &is_number); - - Label not_oddball(this); - GotoIfNot(InstanceTypeEqual(value_instance_type, ODDBALL_TYPE), - ¬_oddball); - - var_result = LoadObjectField(CAST(value), Oddball::kToStringOffset); - Goto(&end); - - BIND(¬_oddball); - { - var_result = CAST(CallRuntime(Runtime::kToName, context, value)); - Goto(&end); - } - } - - BIND(&end); - return var_result.value(); -} - Node* CodeStubAssembler::NonNumberToNumberOrNumeric( Node* context, Node* input, Object::Conversion mode, BigIntHandling bigint_handling) { @@ -7709,14 +7771,9 @@ template void CodeStubAssembler::NameDictionaryLookup( TNode, TNode, Label*, TVariable*, Label*, int, LookupMode); -Node* CodeStubAssembler::ComputeIntegerHash(Node* key) { - return ComputeIntegerHash(key, IntPtrConstant(kZeroHashSeed)); -} - -Node* CodeStubAssembler::ComputeIntegerHash(Node* key, Node* seed) { - // See v8::internal::ComputeIntegerHash() +Node* CodeStubAssembler::ComputeUnseededHash(Node* key) { + // See v8::internal::ComputeUnseededHash() Node* hash = TruncateIntPtrToInt32(key); - hash = Word32Xor(hash, seed); hash = Int32Add(Word32Xor(hash, Int32Constant(0xFFFFFFFF)), Word32Shl(hash, Int32Constant(15))); hash = Word32Xor(hash, Word32Shr(hash, Int32Constant(12))); @@ -7727,6 +7784,21 @@ Node* CodeStubAssembler::ComputeIntegerHash(Node* key, Node* seed) { return Word32And(hash, Int32Constant(0x3FFFFFFF)); } +Node* CodeStubAssembler::ComputeSeededHash(Node* key) { + Node* const function_addr = + ExternalConstant(ExternalReference::compute_integer_hash()); + Node* const isolate_ptr = + ExternalConstant(ExternalReference::isolate_address(isolate())); + + MachineType type_ptr = MachineType::Pointer(); + MachineType type_uint32 = MachineType::Uint32(); + + Node* const result = + CallCFunction2(type_uint32, type_ptr, type_uint32, function_addr, + isolate_ptr, TruncateIntPtrToInt32(key)); + return result; +} + void CodeStubAssembler::NumberDictionaryLookup( TNode dictionary, TNode intptr_index, Label* if_found, TVariable* var_entry, Label* if_not_found) { @@ -7737,16 +7809,7 @@ void CodeStubAssembler::NumberDictionaryLookup( TNode capacity = SmiUntag(GetCapacity(dictionary)); TNode mask = IntPtrSub(capacity, IntPtrConstant(1)); - TNode int32_seed; - - if (Is64()) { - int32_seed = TruncateInt64ToInt32(HashSeed()); - } else { - int32_seed = HashSeedLow(); - } - - TNode hash = - ChangeUint32ToWord(ComputeIntegerHash(intptr_index, int32_seed)); + TNode hash = ChangeUint32ToWord(ComputeSeededHash(intptr_index)); Node* key_as_float64 = RoundIntPtrToFloat64(intptr_index); // See Dictionary::FirstProbe(). @@ -8147,6 +8210,125 @@ void CodeStubAssembler::DescriptorArrayForEach( IndexAdvanceMode::kPost); } +void CodeStubAssembler::ForEachEnumerableOwnProperty( + TNode context, TNode map, TNode object, + const ForEachKeyValueFunction& body, Label* bailout) { + TNode type = LoadMapInstanceType(map); + TNode bit_field3 = EnsureOnlyHasSimpleProperties(map, type, bailout); + + TNode descriptors = LoadMapDescriptors(map); + TNode nof_descriptors = + DecodeWord32(bit_field3); + + TVARIABLE(BoolT, var_stable, Int32TrueConstant()); + VariableList list({&var_stable}, zone()); + + DescriptorArrayForEach( + list, Unsigned(Int32Constant(0)), nof_descriptors, + [=, &var_stable](TNode descriptor_key_index) { + TNode next_key = + CAST(LoadWeakFixedArrayElement(descriptors, descriptor_key_index)); + + TVARIABLE(Object, var_value, SmiConstant(0)); + Label callback(this), next_iteration(this); + + { + TVARIABLE(Map, var_map); + TVARIABLE(HeapObject, var_meta_storage); + TVARIABLE(IntPtrT, var_entry); + TVARIABLE(Uint32T, var_details); + Label if_found(this); + + Label if_found_fast(this), if_found_dict(this); + + Label if_stable(this), if_not_stable(this); + Branch(var_stable.value(), &if_stable, &if_not_stable); + BIND(&if_stable); + { + // Directly decode from the descriptor array if |object| did not + // change shape. + var_map = map; + var_meta_storage = descriptors; + var_entry = Signed(descriptor_key_index); + Goto(&if_found_fast); + } + BIND(&if_not_stable); + { + // If the map did change, do a slower lookup. We are still + // guaranteed that the object has a simple shape, and that the key + // is a name. + var_map = LoadMap(object); + TryLookupPropertyInSimpleObject( + object, var_map.value(), next_key, &if_found_fast, + &if_found_dict, &var_meta_storage, &var_entry, &next_iteration); + } + + BIND(&if_found_fast); + { + TNode descriptors = CAST(var_meta_storage.value()); + TNode name_index = var_entry.value(); + + // Skip non-enumerable properties. + var_details = LoadDetailsByKeyIndex(descriptors, name_index); + GotoIf(IsSetWord32(var_details.value(), + PropertyDetails::kAttributesDontEnumMask), + &next_iteration); + + LoadPropertyFromFastObject(object, var_map.value(), descriptors, + name_index, var_details.value(), + &var_value); + Goto(&if_found); + } + BIND(&if_found_dict); + { + TNode dictionary = CAST(var_meta_storage.value()); + TNode entry = var_entry.value(); + + TNode details = + LoadDetailsByKeyIndex(dictionary, entry); + // Skip non-enumerable properties. + GotoIf( + IsSetWord32(details, PropertyDetails::kAttributesDontEnumMask), + &next_iteration); + + var_details = details; + var_value = LoadValueByKeyIndex(dictionary, entry); + Goto(&if_found); + } + + // Here we have details and value which could be an accessor. + BIND(&if_found); + { + Label slow_load(this, Label::kDeferred); + + var_value = CallGetterIfAccessor(var_value.value(), + var_details.value(), context, + object, &slow_load, kCallJSGetter); + Goto(&callback); + + BIND(&slow_load); + var_value = + CallRuntime(Runtime::kGetProperty, context, object, next_key); + Goto(&callback); + + BIND(&callback); + body(next_key, var_value.value()); + + // Check if |object| is still stable, i.e. we can proceed using + // property details from preloaded |descriptors|. + var_stable = + Select(var_stable.value(), + [=] { return WordEqual(LoadMap(object), map); }, + [=] { return Int32FalseConstant(); }); + + Goto(&next_iteration); + } + } + + BIND(&next_iteration); + }); +} + void CodeStubAssembler::DescriptorLookup( SloppyTNode unique_name, SloppyTNode descriptors, SloppyTNode bitfield3, Label* if_found, @@ -8741,7 +8923,7 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map, Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset); GotoIf(IsDetachedBuffer(buffer), if_absent); - Node* length = SmiUntag(LoadTypedArrayLength(CAST(object))); + Node* length = SmiUntag(LoadJSTypedArrayLength(CAST(object))); Branch(UintPtrLessThan(intptr_index, length), if_found, if_absent); } BIND(&if_oob); @@ -9144,25 +9326,22 @@ void CodeStubAssembler::CheckForAssociatedProtector(Node* name, Label* if_protector) { // This list must be kept in sync with LookupIterator::UpdateProtector! // TODO(jkummerow): Would it be faster to have a bit in Symbol::flags()? - GotoIf(WordEqual(name, LoadRoot(Heap::kconstructor_stringRootIndex)), - if_protector); - GotoIf(WordEqual(name, LoadRoot(Heap::kiterator_symbolRootIndex)), - if_protector); - GotoIf(WordEqual(name, LoadRoot(Heap::knext_stringRootIndex)), if_protector); - GotoIf(WordEqual(name, LoadRoot(Heap::kspecies_symbolRootIndex)), + GotoIf(WordEqual(name, LoadRoot(RootIndex::kconstructor_string)), if_protector); - GotoIf(WordEqual(name, LoadRoot(Heap::kis_concat_spreadable_symbolRootIndex)), + GotoIf(WordEqual(name, LoadRoot(RootIndex::kiterator_symbol)), if_protector); + GotoIf(WordEqual(name, LoadRoot(RootIndex::knext_string)), if_protector); + GotoIf(WordEqual(name, LoadRoot(RootIndex::kspecies_symbol)), if_protector); + GotoIf(WordEqual(name, LoadRoot(RootIndex::kis_concat_spreadable_symbol)), if_protector); - GotoIf(WordEqual(name, LoadRoot(Heap::kresolve_stringRootIndex)), - if_protector); - GotoIf(WordEqual(name, LoadRoot(Heap::kthen_stringRootIndex)), if_protector); + GotoIf(WordEqual(name, LoadRoot(RootIndex::kresolve_string)), if_protector); + GotoIf(WordEqual(name, LoadRoot(RootIndex::kthen_string)), if_protector); // Fall through if no case matched. } TNode CodeStubAssembler::LoadReceiverMap(SloppyTNode receiver) { return Select( TaggedIsSmi(receiver), - [=] { return CAST(LoadRoot(Heap::kHeapNumberMapRootIndex)); }, + [=] { return CAST(LoadRoot(RootIndex::kHeapNumberMap)); }, [=] { return LoadMap(UncheckedCast(receiver)); }); } @@ -9581,7 +9760,7 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value, // Bounds check. Node* length = - TaggedToParameter(LoadTypedArrayLength(CAST(object)), parameter_mode); + TaggedToParameter(LoadJSTypedArrayLength(CAST(object)), parameter_mode); if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) { // Skip the store if we write beyond the length or @@ -9826,9 +10005,8 @@ void CodeStubAssembler::TrapAllocationMemento(Node* object, BIND(&map_check); { TNode memento_map = LoadObjectField(object, kMementoMapOffset); - Branch( - WordEqual(memento_map, LoadRoot(Heap::kAllocationMementoMapRootIndex)), - memento_found, &no_memento_found); + Branch(WordEqual(memento_map, LoadRoot(RootIndex::kAllocationMementoMap)), + memento_found, &no_memento_found); } BIND(&no_memento_found); Comment("] TrapAllocationMemento"); @@ -9842,7 +10020,7 @@ TNode CodeStubAssembler::CreateAllocationSiteInFeedbackVector( SloppyTNode feedback_vector, TNode slot) { TNode size = IntPtrConstant(AllocationSite::kSizeWithWeakNext); Node* site = Allocate(size, CodeStubAssembler::kPretenured); - StoreMapNoWriteBarrier(site, Heap::kAllocationSiteWithWeakNextMapRootIndex); + StoreMapNoWriteBarrier(site, RootIndex::kAllocationSiteWithWeakNextMap); // Should match AllocationSite::Initialize. TNode field = UpdateWord( IntPtrConstant(0), IntPtrConstant(GetInitialFastElementsKind())); @@ -9866,7 +10044,7 @@ TNode CodeStubAssembler::CreateAllocationSiteInFeedbackVector( // Store an empty fixed array for the code dependency. StoreObjectFieldRoot(site, AllocationSite::kDependentCodeOffset, - Heap::kEmptyWeakFixedArrayRootIndex); + RootIndex::kEmptyWeakFixedArray); // Link the object to the allocation site list TNode site_list = ExternalConstant( @@ -10034,9 +10212,10 @@ void CodeStubAssembler::GotoIfFixedArraySizeDoesntFitInNewSpace( doesnt_fit); } -void CodeStubAssembler::InitializeFieldsWithRoot( - Node* object, Node* start_offset, Node* end_offset, - Heap::RootListIndex root_index) { +void CodeStubAssembler::InitializeFieldsWithRoot(Node* object, + Node* start_offset, + Node* end_offset, + RootIndex root_index) { CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object)); start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag)); end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag)); @@ -10070,6 +10249,10 @@ void CodeStubAssembler::BranchIfNumberRelationalComparison( // Both {left} and {right} are Smi, so just perform a fast // Smi comparison. switch (op) { + case Operation::kEqual: + BranchIfSmiEqual(smi_left, smi_right, if_true, + if_false); + break; case Operation::kLessThan: BranchIfSmiLessThan(smi_left, smi_right, if_true, if_false); @@ -10116,6 +10299,10 @@ void CodeStubAssembler::BranchIfNumberRelationalComparison( BIND(&do_float_comparison); { switch (op) { + case Operation::kEqual: + Branch(Float64Equal(var_left_float.value(), var_right_float.value()), + if_true, if_false); + break; case Operation::kLessThan: Branch(Float64LessThan(var_left_float.value(), var_right_float.value()), if_true, if_false); @@ -10526,15 +10713,16 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left, } // If {left} is a receiver, call ToPrimitive(left, hint Number). - // Otherwise call ToNumeric(left) and then ToNumeric(right). + // Otherwise call ToNumeric(right) and then ToNumeric(left), the + // order here is important as it's observable by user code. STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); Label if_left_receiver(this, Label::kDeferred); GotoIf(IsJSReceiverInstanceType(left_instance_type), &if_left_receiver); + var_right.Bind(CallBuiltin(Builtins::kToNumeric, context, right)); var_left.Bind( CallBuiltin(Builtins::kNonNumberToNumeric, context, left)); - var_right.Bind(CallBuiltin(Builtins::kToNumeric, context, right)); Goto(&loop); BIND(&if_left_receiver); @@ -11481,7 +11669,7 @@ TNode CodeStubAssembler::HasProperty(SloppyTNode context, BIND(&if_proxy); { - TNode name = ToName(context, key); + TNode name = CAST(CallBuiltin(Builtins::kToName, context, key)); switch (mode) { case kHasProperty: GotoIf(IsPrivateSymbol(name), &return_false); @@ -11947,9 +12135,9 @@ TNode CodeStubAssembler::CreateArrayIterator( Node* iterator = Allocate(JSArrayIterator::kSize); StoreMapNoWriteBarrier(iterator, iterator_map); StoreObjectFieldRoot(iterator, JSArrayIterator::kPropertiesOrHashOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldRoot(iterator, JSArrayIterator::kElementsOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldNoWriteBarrier( iterator, JSArrayIterator::kIteratedObjectOffset, object); StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset, @@ -11969,9 +12157,9 @@ Node* CodeStubAssembler::AllocateJSIteratorResult(Node* context, Node* value, Node* result = Allocate(JSIteratorResult::kSize); StoreMapNoWriteBarrier(result, map); StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOrHashOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset, value); StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kDoneOffset, done); return result; @@ -11986,7 +12174,7 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context, TNode elements = UncheckedCast( Allocate(elements_size + JSArray::kSize + JSIteratorResult::kSize)); StoreObjectFieldRoot(elements, FixedArray::kMapOffset, - Heap::kFixedArrayMapRootIndex); + RootIndex::kFixedArrayMap); StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length); StoreFixedArrayElement(elements, 0, key); StoreFixedArrayElement(elements, 1, value); @@ -11995,7 +12183,7 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context, Node* array = InnerAllocate(elements, elements_size); StoreMapNoWriteBarrier(array, array_map); StoreObjectFieldRoot(array, JSArray::kPropertiesOrHashOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset, elements); StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length); Node* iterator_map = @@ -12003,12 +12191,12 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context, Node* result = InnerAllocate(array, JSArray::kSize); StoreMapNoWriteBarrier(result, iterator_map); StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOrHashOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset, array); StoreObjectFieldRoot(result, JSIteratorResult::kDoneOffset, - Heap::kFalseValueRootIndex); + RootIndex::kFalseValue); return result; } @@ -12021,12 +12209,19 @@ Node* CodeStubAssembler::ArraySpeciesCreate(TNode context, len); } +Node* CodeStubAssembler::InternalArrayCreate(TNode context, + TNode len) { + Node* native_context = LoadNativeContext(context); + Node* const constructor = LoadContextElement( + native_context, Context::INTERNAL_ARRAY_FUNCTION_INDEX); + return ConstructJS(CodeFactory::Construct(isolate()), context, constructor, + len); +} + Node* CodeStubAssembler::IsDetachedBuffer(Node* buffer) { CSA_ASSERT(this, HasInstanceType(buffer, JS_ARRAY_BUFFER_TYPE)); - - Node* buffer_bit_field = LoadObjectField( - buffer, JSArrayBuffer::kBitFieldOffset, MachineType::Uint32()); - return IsSetWord32(buffer_bit_field); + TNode buffer_bit_field = LoadJSArrayBufferBitField(CAST(buffer)); + return IsSetWord32(buffer_bit_field); } void CodeStubAssembler::ThrowIfArrayBufferIsDetached( @@ -12042,22 +12237,44 @@ void CodeStubAssembler::ThrowIfArrayBufferIsDetached( void CodeStubAssembler::ThrowIfArrayBufferViewBufferIsDetached( SloppyTNode context, TNode array_buffer_view, const char* method_name) { - TNode buffer = LoadArrayBufferViewBuffer(array_buffer_view); + TNode buffer = LoadJSArrayBufferViewBuffer(array_buffer_view); ThrowIfArrayBufferIsDetached(context, buffer, method_name); } -TNode CodeStubAssembler::LoadArrayBufferViewBuffer( - TNode array_buffer_view) { - return LoadObjectField(array_buffer_view, - JSArrayBufferView::kBufferOffset); +TNode CodeStubAssembler::LoadJSArrayBufferBitField( + TNode array_buffer) { + return LoadObjectField(array_buffer, JSArrayBuffer::kBitFieldOffset); } -TNode CodeStubAssembler::LoadArrayBufferBackingStore( +TNode CodeStubAssembler::LoadJSArrayBufferBackingStore( TNode array_buffer) { return LoadObjectField(array_buffer, JSArrayBuffer::kBackingStoreOffset); } +TNode CodeStubAssembler::LoadJSArrayBufferViewBuffer( + TNode array_buffer_view) { + return LoadObjectField(array_buffer_view, + JSArrayBufferView::kBufferOffset); +} + +TNode CodeStubAssembler::LoadJSArrayBufferViewByteLength( + TNode array_buffer_view) { + return LoadObjectField(array_buffer_view, + JSArrayBufferView::kByteLengthOffset); +} + +TNode CodeStubAssembler::LoadJSArrayBufferViewByteOffset( + TNode array_buffer_view) { + return LoadObjectField(array_buffer_view, + JSArrayBufferView::kByteOffsetOffset); +} + +TNode CodeStubAssembler::LoadJSTypedArrayLength( + TNode typed_array) { + return LoadObjectField(typed_array, JSTypedArray::kLengthOffset); +} + CodeStubArguments::CodeStubArguments( CodeStubAssembler* assembler, Node* argc, Node* fp, CodeStubAssembler::ParameterMode param_mode, ReceiverMode receiver_mode) @@ -12400,11 +12617,11 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map, STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize); StoreMapNoWriteBarrier(fun, map); StoreObjectFieldRoot(fun, JSObject::kPropertiesOrHashOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldRoot(fun, JSObject::kElementsOffset, - Heap::kEmptyFixedArrayRootIndex); + RootIndex::kEmptyFixedArray); StoreObjectFieldRoot(fun, JSFunction::kFeedbackCellOffset, - Heap::kManyClosuresCellRootIndex); + RootIndex::kManyClosuresCell); StoreObjectFieldNoWriteBarrier(fun, JSFunction::kSharedFunctionInfoOffset, shared_info); StoreObjectFieldNoWriteBarrier(fun, JSFunction::kContextOffset, context); @@ -12555,7 +12772,7 @@ void CodeStubAssembler::PerformStackCheck(TNode context) { void CodeStubAssembler::InitializeFunctionContext(Node* native_context, Node* context, int slots) { DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS); - StoreMapNoWriteBarrier(context, Heap::kFunctionContextMapRootIndex); + StoreMapNoWriteBarrier(context, RootIndex::kFunctionContextMap); StoreObjectFieldNoWriteBarrier(context, FixedArray::kLengthOffset, SmiConstant(slots)); diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h index 51ed647412b97f..21bcd045ac6932 100644 --- a/deps/v8/src/code-stub-assembler.h +++ b/deps/v8/src/code-stub-assembler.h @@ -450,10 +450,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST) #undef HEAP_CONSTANT_TEST - TNode HashSeed(); - TNode HashSeedHigh(); - TNode HashSeedLow(); - Node* IntPtrOrSmiConstant(int value, ParameterMode mode); TNode LanguageModeConstant(LanguageMode mode) { return SmiConstant(static_cast(mode)); @@ -596,6 +592,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { // if the division needs to be performed as a floating point operation. TNode TrySmiDiv(TNode dividend, TNode divisor, Label* bailout); + // Compares two Smis a and b as if they were converted to strings and then + // compared lexicographically. Returns: + // -1 iff x < y. + // 0 iff x == y. + // 1 iff x > y. + TNode SmiLexicographicCompare(TNode x, TNode y); + // Smi | HeapNumber operations. TNode NumberInc(SloppyTNode value); TNode NumberDec(SloppyTNode value); @@ -769,6 +772,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { // Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise. void GotoIfForceSlowPath(Label* if_true); + // Branches to {if_true} when Debug::ExecutionMode is DebugInfo::kSideEffect. + void GotoIfDebugExecutionModeChecksSideEffects(Label* if_true); + // Load value from current frame by given offset in bytes. Node* LoadFromFrame(int offset, MachineType rep = MachineType::AnyTagged()); // Load value from current parent frame by given offset in bytes. @@ -821,7 +827,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { // Load a SMI and untag it. TNode LoadAndUntagSmi(Node* base, int index); // Load a SMI root, untag it, and convert to Word32. - TNode LoadAndUntagToWord32Root(Heap::RootListIndex root_index); + TNode LoadAndUntagToWord32Root(RootIndex root_index); TNode LoadMaybeWeakObjectField(SloppyTNode object, int offset) { @@ -863,8 +869,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { TNode LoadWeakFixedArrayLength(TNode array); TNode LoadAndUntagWeakFixedArrayLength( SloppyTNode array); - // Load the length of a JSTypedArray instance. - TNode LoadTypedArrayLength(TNode typed_array); // Load the bit field of a Map. TNode LoadMapBitField(SloppyTNode map); // Load bit field 2 of a map. @@ -896,6 +900,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { Node* LoadMapEnumLength(SloppyTNode map); // Load the back-pointer of a Map. TNode LoadMapBackPointer(SloppyTNode map); + // Checks that |map| has only simple properties, returns bitfield3. + TNode EnsureOnlyHasSimpleProperties(TNode map, + TNode instance_type, + Label* bailout); // Load the identity hash of a JSRececiver. TNode LoadJSReceiverIdentityHash(SloppyTNode receiver, Label* if_no_hash = nullptr); @@ -916,10 +924,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { TNode LoadNameHash(SloppyTNode name, Label* if_hash_not_computed = nullptr); - // Load length field of a String object as intptr_t value. - TNode LoadStringLengthAsWord(SloppyTNode object); // Load length field of a String object as Smi value. - TNode LoadStringLengthAsSmi(SloppyTNode object); + TNode LoadStringLengthAsSmi(SloppyTNode string); + // Load length field of a String object as intptr_t value. + TNode LoadStringLengthAsWord(SloppyTNode string); + // Load length field of a String object as uint32_t value. + TNode LoadStringLengthAsWord32(SloppyTNode string); // Loads a pointer to the sequential String char array. Node* PointerToSeqStringData(Node* seq_string); // Load value field of a JSValue object. @@ -1155,11 +1165,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { MachineRepresentation rep = MachineRepresentation::kTagged); // Store the Map of an HeapObject. Node* StoreMap(Node* object, Node* map); - Node* StoreMapNoWriteBarrier(Node* object, - Heap::RootListIndex map_root_index); + Node* StoreMapNoWriteBarrier(Node* object, RootIndex map_root_index); Node* StoreMapNoWriteBarrier(Node* object, Node* map); - Node* StoreObjectFieldRoot(Node* object, int offset, - Heap::RootListIndex root); + Node* StoreObjectFieldRoot(Node* object, int offset, RootIndex root); // Store an array element to a FixedArray. void StoreFixedArrayElement( TNode object, int index, SloppyTNode value, @@ -1207,6 +1215,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { TNode object, Node* index, TNode value, ParameterMode parameter_mode = INTPTR_PARAMETERS); + void StoreFixedDoubleArrayElementSmi(TNode object, + TNode index, + TNode value) { + StoreFixedDoubleArrayElement(object, index, value, SMI_PARAMETERS); + } + + void StoreFixedDoubleArrayHole(TNode array, Node* index, + ParameterMode mode = INTPTR_PARAMETERS); + void StoreFixedDoubleArrayHoleSmi(TNode array, + TNode index) { + StoreFixedDoubleArrayHole(array, index, SMI_PARAMETERS); + } + Node* StoreFeedbackVectorSlot( Node* object, Node* index, Node* value, WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, @@ -1271,47 +1292,47 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { TNode LoadBigIntDigit(TNode bigint, int digit_index); // Allocate a SeqOneByteString with the given length. - TNode AllocateSeqOneByteString(int length, + TNode AllocateSeqOneByteString(uint32_t length, AllocationFlags flags = kNone); - TNode AllocateSeqOneByteString(Node* context, TNode length, + TNode AllocateSeqOneByteString(Node* context, TNode length, AllocationFlags flags = kNone); // Allocate a SeqTwoByteString with the given length. - TNode AllocateSeqTwoByteString(int length, + TNode AllocateSeqTwoByteString(uint32_t length, AllocationFlags flags = kNone); - TNode AllocateSeqTwoByteString(Node* context, TNode length, + TNode AllocateSeqTwoByteString(Node* context, TNode length, AllocationFlags flags = kNone); // Allocate a SlicedOneByteString with the given length, parent and offset. // |length| and |offset| are expected to be tagged. - TNode AllocateSlicedOneByteString(TNode length, + TNode AllocateSlicedOneByteString(TNode length, TNode parent, TNode offset); // Allocate a SlicedTwoByteString with the given length, parent and offset. // |length| and |offset| are expected to be tagged. - TNode AllocateSlicedTwoByteString(TNode length, + TNode AllocateSlicedTwoByteString(TNode length, TNode parent, TNode offset); // Allocate a one-byte ConsString with the given length, first and second // parts. |length| is expected to be tagged, and |first| and |second| are // expected to be one-byte strings. - TNode AllocateOneByteConsString(TNode length, + TNode AllocateOneByteConsString(TNode length, TNode first, TNode second, AllocationFlags flags = kNone); // Allocate a two-byte ConsString with the given length, first and second // parts. |length| is expected to be tagged, and |first| and |second| are // expected to be two-byte strings. - TNode AllocateTwoByteConsString(TNode length, + TNode AllocateTwoByteConsString(TNode length, TNode first, TNode second, AllocationFlags flags = kNone); // Allocate an appropriate one- or two-byte ConsString with the first and // second parts specified by |left| and |right|. - TNode NewConsString(Node* context, TNode length, - TNode left, TNode right, + TNode NewConsString(TNode length, TNode left, + TNode right, AllocationFlags flags = kNone); TNode AllocateNameDictionary(int at_least_space_for); @@ -1337,7 +1358,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { template void FindOrderedHashTableEntry( Node* table, Node* hash, - std::function key_compare, + const std::function& key_compare, Variable* entry_start_position, Label* entry_found, Label* not_found); template @@ -1422,7 +1443,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { TNode AllocateZeroedFixedDoubleArray( TNode capacity) { TNode result = UncheckedCast( - AllocateFixedArray(FLOAT64_ELEMENTS, capacity, + AllocateFixedArray(PACKED_DOUBLE_ELEMENTS, capacity, AllocationFlag::kAllowLargeObjectAllocation)); FillFixedDoubleArrayWithZero(result, capacity); return result; @@ -1442,10 +1463,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { Node* ArraySpeciesCreate(TNode context, TNode originalArray, TNode len); + Node* InternalArrayCreate(TNode context, TNode len); void FillFixedArrayWithValue(ElementsKind kind, Node* array, Node* from_index, - Node* to_index, - Heap::RootListIndex value_root_index, + Node* to_index, RootIndex value_root_index, ParameterMode mode = INTPTR_PARAMETERS); // Uses memset to effectively initialize the given FixedArray with zeroes. @@ -1511,12 +1532,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { TNode HeapObjectToFixedDoubleArray(TNode base, Label* cast_fail) { - GotoIf(WordNotEqual(LoadMap(base), - LoadRoot(Heap::kFixedDoubleArrayMapRootIndex)), - cast_fail); + GotoIf( + WordNotEqual(LoadMap(base), LoadRoot(RootIndex::kFixedDoubleArrayMap)), + cast_fail); return UncheckedCast(base); } + TNode ConvertElementsKindToInt(TNode elements_kind) { + return UncheckedCast(elements_kind); + } + enum class ExtractFixedArrayFlag { kFixedArrays = 1, kFixedDoubleArrays = 2, @@ -1529,8 +1554,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { typedef base::Flags ExtractFixedArrayFlags; // Copy a portion of an existing FixedArray or FixedDoubleArray into a new - // FixedArray, including special appropriate handling for empty arrays and COW - // arrays. + // array, including special appropriate handling for empty arrays and COW + // arrays. The result array will be of the same type as the original array. // // * |source| is either a FixedArray or FixedDoubleArray from which to copy // elements. @@ -1566,6 +1591,33 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { SMI_PARAMETERS); } + // Copy a portion of an existing FixedArray or FixedDoubleArray into a new + // FixedArray, including special appropriate handling for COW arrays. + // * |source| is either a FixedArray or FixedDoubleArray from which to copy + // elements. |source| is assumed to be non-empty. + // * |first| is the starting element index to copy from. + // * |count| is the number of elements to copy out of the source array + // starting from and including the element indexed by |start|. + // * |capacity| determines the size of the allocated result array, with + // |capacity| >= |count|. + // * |source_map| is the map of the |source|. + // * |from_kind| is the elements kind that is consistent with |source| being + // a FixedArray or FixedDoubleArray. This function only cares about double vs. + // non-double, so as to distinguish FixedDoubleArray vs. FixedArray. It does + // not care about holeyness. For example, when |source| is a FixedArray, + // PACKED/HOLEY_ELEMENTS can be used, but not PACKED_DOUBLE_ELEMENTS. + // * The function uses |allocation_flags| and |extract_flags| to decide how to + // allocate the result FixedArray. + // * |parameter_mode| determines the parameter mode of |first|, |count| and + // |capacity|. + TNode ExtractToFixedArray( + Node* source, Node* first, Node* count, Node* capacity, Node* source_map, + ElementsKind from_kind = PACKED_ELEMENTS, + AllocationFlags allocation_flags = AllocationFlag::kNone, + ExtractFixedArrayFlags extract_flags = + ExtractFixedArrayFlag::kAllFixedArrays, + ParameterMode parameter_mode = INTPTR_PARAMETERS); + // Copy the entire contents of a FixedArray or FixedDoubleArray to a new // array, including special appropriate handling for empty arrays and COW // arrays. @@ -1661,6 +1713,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { TNode ChangeFloat64ToTagged(SloppyTNode value); TNode ChangeInt32ToTagged(SloppyTNode value); TNode ChangeUint32ToTagged(SloppyTNode value); + TNode ChangeUintPtrToTagged(TNode value); TNode ChangeNumberToUint32(TNode value); TNode ChangeNumberToFloat64(SloppyTNode value); TNode ChangeNonnegativeNumberToUintPtr(TNode value); @@ -1740,6 +1793,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { TNode IsNameDictionary(SloppyTNode object); TNode IsGlobalDictionary(SloppyTNode object); TNode IsExtensibleMap(SloppyTNode map); + TNode IsExtensibleNonPrototypeMap(TNode map); TNode IsExternalStringInstanceType(SloppyTNode instance_type); TNode IsFastJSArray(SloppyTNode object, SloppyTNode context); @@ -1760,6 +1814,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { TNode IsHashTable(SloppyTNode object); TNode IsEphemeronHashTable(SloppyTNode object); TNode IsHeapNumber(SloppyTNode object); + TNode IsHeapNumberInstanceType(SloppyTNode instance_type); + TNode IsOddballInstanceType(SloppyTNode instance_type); TNode IsIndirectStringInstanceType(SloppyTNode instance_type); TNode IsJSArrayBuffer(SloppyTNode object); TNode IsJSDataView(TNode object); @@ -1792,6 +1848,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { TNode IsMap(SloppyTNode object); TNode IsMutableHeapNumber(SloppyTNode object); TNode IsName(SloppyTNode object); + TNode IsNameInstanceType(SloppyTNode instance_type); TNode IsNativeContext(SloppyTNode object); TNode IsNullOrJSReceiver(SloppyTNode object); TNode IsNullOrUndefined(SloppyTNode object); @@ -1808,7 +1865,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { SloppyTNode map); TNode IsSequentialStringInstanceType( SloppyTNode instance_type); - TNode IsShortExternalStringInstanceType( + TNode IsUncachedExternalStringInstanceType( SloppyTNode instance_type); TNode IsSpecialReceiverInstanceType(TNode instance_type); TNode IsCustomElementsReceiverInstanceType( @@ -1933,8 +1990,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { TNode StringToNumber(TNode input); // Convert a Number to a String. TNode NumberToString(TNode input); - // Convert an object to a name. - TNode ToName(SloppyTNode context, SloppyTNode value); // Convert a Non-Number object to a Number. TNode NonNumberToNumber( SloppyTNode context, SloppyTNode input, @@ -2288,8 +2343,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { int inlined_probes = kInlinedDictionaryProbes, LookupMode mode = kFindExisting); - Node* ComputeIntegerHash(Node* key); - Node* ComputeIntegerHash(Node* key, Node* seed); + Node* ComputeUnseededHash(Node* key); + Node* ComputeSeededHash(Node* key); void NumberDictionaryLookup(TNode dictionary, TNode intptr_index, Label* if_found, @@ -2557,6 +2612,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { ElementsKind to_kind, bool is_jsarray, Label* bailout); + void TransitionElementsKind(TNode object, TNode map, + ElementsKind from_kind, ElementsKind to_kind, + Label* bailout) { + TransitionElementsKind(object, map, from_kind, to_kind, true, bailout); + } + void TrapAllocationMemento(Node* object, Label* memento_found); TNode PageFromAddress(TNode address); @@ -2639,7 +2700,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { ParameterMode mode); void InitializeFieldsWithRoot(Node* object, Node* start_offset, - Node* end_offset, Heap::RootListIndex root); + Node* end_offset, RootIndex root); Node* RelationalComparison(Operation op, Node* left, Node* right, Node* context, @@ -2648,26 +2709,32 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { void BranchIfNumberRelationalComparison(Operation op, Node* left, Node* right, Label* if_true, Label* if_false); - void BranchIfNumberLessThan(Node* left, Node* right, Label* if_true, - Label* if_false) { + void BranchIfNumberEqual(TNode left, TNode right, + Label* if_true, Label* if_false) { + BranchIfNumberRelationalComparison(Operation::kEqual, left, right, if_true, + if_false); + } + + void BranchIfNumberLessThan(TNode left, TNode right, + Label* if_true, Label* if_false) { BranchIfNumberRelationalComparison(Operation::kLessThan, left, right, if_true, if_false); } - void BranchIfNumberLessThanOrEqual(Node* left, Node* right, Label* if_true, - Label* if_false) { + void BranchIfNumberLessThanOrEqual(TNode left, TNode right, + Label* if_true, Label* if_false) { BranchIfNumberRelationalComparison(Operation::kLessThanOrEqual, left, right, if_true, if_false); } - void BranchIfNumberGreaterThan(Node* left, Node* right, Label* if_true, - Label* if_false) { + void BranchIfNumberGreaterThan(TNode left, TNode right, + Label* if_true, Label* if_false) { BranchIfNumberRelationalComparison(Operation::kGreaterThan, left, right, if_true, if_false); } - void BranchIfNumberGreaterThanOrEqual(Node* left, Node* right, Label* if_true, - Label* if_false) { + void BranchIfNumberGreaterThanOrEqual(TNode left, TNode right, + Label* if_true, Label* if_false) { BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual, left, right, if_true, if_false); } @@ -2698,6 +2765,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { SloppyTNode key, HasPropertyLookupMode mode); + // Due to naming conflict with the builtin function namespace. + TNode HasProperty_Inline(TNode context, + TNode object, + TNode key) { + return HasProperty(context, object, key, + HasPropertyLookupMode::kHasProperty); + } + Node* Typeof(Node* value); TNode GetSuperConstructor(SloppyTNode context, @@ -2714,17 +2789,28 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { TNode IsRuntimeCallStatsEnabled(); - // TypedArray/ArrayBuffer helpers + // JSArrayBuffer helpers + TNode LoadJSArrayBufferBitField(TNode array_buffer); + TNode LoadJSArrayBufferBackingStore( + TNode array_buffer); Node* IsDetachedBuffer(Node* buffer); void ThrowIfArrayBufferIsDetached(SloppyTNode context, TNode array_buffer, const char* method_name); + + // JSArrayBufferView helpers + TNode LoadJSArrayBufferViewBuffer( + TNode array_buffer_view); + TNode LoadJSArrayBufferViewByteLength( + TNode array_buffer_view); + TNode LoadJSArrayBufferViewByteOffset( + TNode array_buffer_view); void ThrowIfArrayBufferViewBufferIsDetached( SloppyTNode context, TNode array_buffer_view, const char* method_name); - TNode LoadArrayBufferViewBuffer( - TNode array_buffer_view); - TNode LoadArrayBufferBackingStore(TNode array_buffer); + + // JSTypedArray helpers + TNode LoadJSTypedArrayLength(TNode typed_array); TNode ElementOffsetFromIndex(Node* index, ElementsKind kind, ParameterMode mode, int base_size = 0); @@ -2855,6 +2941,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { TNode end_descriptor, const ForEachDescriptorBodyFunction& body); + typedef std::function key, TNode value)> + ForEachKeyValueFunction; + + // For each JSObject property (in DescriptorArray order), check if the key is + // enumerable, and if so, load the value from the receiver and evaluate the + // closure. + void ForEachEnumerableOwnProperty(TNode context, TNode map, + TNode object, + const ForEachKeyValueFunction& body, + Label* bailout); + TNode CallGetterIfAccessor(Node* value, Node* details, Node* context, Node* receiver, Label* if_bailout, GetOwnPropertyMode mode = kCallJSGetter); @@ -2893,12 +2990,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { Node* EmitKeyedSloppyArguments(Node* receiver, Node* key, Node* value, Label* bailout); - TNode AllocateSlicedString(Heap::RootListIndex map_root_index, - TNode length, TNode parent, - TNode offset); + TNode AllocateSlicedString(RootIndex map_root_index, + TNode length, + TNode parent, TNode offset); - TNode AllocateConsString(Heap::RootListIndex map_root_index, - TNode length, TNode first, + TNode AllocateConsString(RootIndex map_root_index, + TNode length, TNode first, TNode second, AllocationFlags flags); // Allocate a MutableHeapNumber without initializing its value. @@ -2922,7 +3019,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { TNode AllocAndCopyStringCharacters(Node* from, Node* from_instance_type, TNode from_index, - TNode character_count); + TNode character_count); static const int kElementLoopUnrollThreshold = 8; diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index 745aa1aa24ecb4..94d4f69bea04be 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -114,7 +114,7 @@ class CodeStub : public ZoneObject { static const char* MajorName(Major major_key); explicit CodeStub(Isolate* isolate) : minor_key_(0), isolate_(isolate) {} - virtual ~CodeStub() {} + virtual ~CodeStub() = default; static void GenerateStubsAheadOfTime(Isolate* isolate); @@ -299,7 +299,9 @@ class CodeStubDescriptor { DCHECK(!stack_parameter_count_.is_valid()); } - void set_call_descriptor(CallInterfaceDescriptor d) { call_descriptor_ = d; } + void set_call_descriptor(CallInterfaceDescriptor d) { + call_descriptor_ = std::move(d); + } CallInterfaceDescriptor call_descriptor() const { return call_descriptor_; } int GetRegisterParameterCount() const { diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index 10dfdbbd4adc70..198ee8f57265e7 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -12,19 +12,17 @@ namespace v8 { namespace internal { -#define UNARY_MATH_FUNCTION(name, generator) \ - static UnaryMathFunctionWithIsolate fast_##name##_function = nullptr; \ - double std_##name(double x, Isolate* isolate) { return std::name(x); } \ - void init_fast_##name##_function(Isolate* isolate) { \ - if (FLAG_fast_math) fast_##name##_function = generator(isolate); \ - if (!fast_##name##_function) fast_##name##_function = std_##name; \ - } \ - void lazily_initialize_fast_##name(Isolate* isolate) { \ - if (!fast_##name##_function) init_fast_##name##_function(isolate); \ - } \ - double fast_##name(double x, Isolate* isolate) { \ - return (*fast_##name##_function)(x, isolate); \ - } +#define UNARY_MATH_FUNCTION(name, generator) \ + static UnaryMathFunction fast_##name##_function = nullptr; \ + double std_##name(double x) { return std::name(x); } \ + void init_fast_##name##_function() { \ + if (FLAG_fast_math) fast_##name##_function = generator(); \ + if (!fast_##name##_function) fast_##name##_function = std_##name; \ + } \ + void lazily_initialize_fast_##name() { \ + if (!fast_##name##_function) init_fast_##name##_function(); \ + } \ + double fast_##name(double x) { return (*fast_##name##_function)(x); } UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction) diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h index 1b57c744477dc2..3e07c86fc28e3b 100644 --- a/deps/v8/src/codegen.h +++ b/deps/v8/src/codegen.h @@ -5,21 +5,19 @@ #ifndef V8_CODEGEN_H_ #define V8_CODEGEN_H_ -#include "src/globals.h" - namespace v8 { namespace internal { // Results of the library implementation of transcendental functions may differ // from the one we use in our generated code. Therefore we use the same // generated code both in runtime and compiled code. -typedef double (*UnaryMathFunctionWithIsolate)(double x, Isolate* isolate); +typedef double (*UnaryMathFunction)(double x); -UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate); +UnaryMathFunction CreateSqrtFunction(); // Custom implementation of math functions. -double fast_sqrt(double input, Isolate* isolate); -void lazily_initialize_fast_sqrt(Isolate* isolate); +double fast_sqrt(double input); +void lazily_initialize_fast_sqrt(); } // namespace internal } // namespace v8 diff --git a/deps/v8/src/collector.h b/deps/v8/src/collector.h index a3e940663f992f..bfaa9d42ce340a 100644 --- a/deps/v8/src/collector.h +++ b/deps/v8/src/collector.h @@ -184,7 +184,7 @@ class SequenceCollector : public Collector { : Collector(initial_capacity), sequence_start_(kNoSequence) {} - virtual ~SequenceCollector() {} + ~SequenceCollector() override = default; void StartSequence() { DCHECK_EQ(sequence_start_, kNoSequence); @@ -208,7 +208,7 @@ class SequenceCollector : public Collector { sequence_start_ = kNoSequence; } - virtual void Reset() { + void Reset() override { sequence_start_ = kNoSequence; this->Collector::Reset(); } @@ -218,7 +218,7 @@ class SequenceCollector : public Collector { int sequence_start_; // Move the currently active sequence to the new chunk. - virtual void NewChunk(int new_capacity) { + void NewChunk(int new_capacity) override { if (sequence_start_ == kNoSequence) { // Fall back on default behavior if no sequence has been started. this->Collector::NewChunk(new_capacity); diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc index 61b83b1a1853b2..0068c833622303 100644 --- a/deps/v8/src/compilation-cache.cc +++ b/deps/v8/src/compilation-cache.cc @@ -34,8 +34,6 @@ CompilationCache::CompilationCache(Isolate* isolate) } } -CompilationCache::~CompilationCache() {} - Handle CompilationSubCache::GetTable(int generation) { DCHECK(generation < generations_); Handle result; diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h index 3a4fe2e7b5ef56..ed3f1986b6adf4 100644 --- a/deps/v8/src/compilation-cache.h +++ b/deps/v8/src/compilation-cache.h @@ -213,7 +213,7 @@ class CompilationCache { private: explicit CompilationCache(Isolate* isolate); - ~CompilationCache(); + ~CompilationCache() = default; base::HashMap* EagerOptimizingSet(); diff --git a/deps/v8/src/compilation-statistics.h b/deps/v8/src/compilation-statistics.h index cb66f86532262a..bfd9a5c66a3bbf 100644 --- a/deps/v8/src/compilation-statistics.h +++ b/deps/v8/src/compilation-statistics.h @@ -24,7 +24,7 @@ struct AsPrintableStatistics { class CompilationStatistics final : public Malloced { public: - CompilationStatistics() {} + CompilationStatistics() = default; class BasicStats { public: diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h index aed4960119e40f..827a2aa18d6703 100644 --- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h +++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h @@ -21,16 +21,14 @@ class V8_EXPORT_PRIVATE CompilerDispatcherJob { enum class Status { kInitial, - kPrepared, - kCompiled, - kHasErrorsToReport, + kReadyToFinalize, kDone, kFailed, }; CompilerDispatcherJob(Type type) : type_(type), status_(Status::kInitial) {} - virtual ~CompilerDispatcherJob() {} + virtual ~CompilerDispatcherJob() = default; Type type() const { return type_; } @@ -48,26 +46,19 @@ class V8_EXPORT_PRIVATE CompilerDispatcherJob { // Return true if the next step can be run on any thread. bool NextStepCanRunOnAnyThread() const { - return status() == Status::kPrepared; + return status() == Status::kInitial; } // Casts to implementations. const UnoptimizedCompileJob* AsUnoptimizedCompileJob() const; - // Transition from kInitial to kPrepared. Must only be invoked on the - // main thread. - virtual void PrepareOnMainThread(Isolate* isolate) = 0; - - // Transition from kPrepared to kCompiled (or kReportErrors). + // Transition from kInitial to kReadyToFinalize. virtual void Compile(bool on_background_thread) = 0; - // Transition from kCompiled to kDone (or kFailed). Must only be invoked on - // the main thread. - virtual void FinalizeOnMainThread(Isolate* isolate) = 0; - - // Transition from kReportErrors to kFailed. Must only be invoked on the main - // thread. - virtual void ReportErrorsOnMainThread(Isolate* isolate) = 0; + // Transition from kReadyToFinalize to kDone (or kFailed). Must only be + // invoked on the main thread. + virtual void FinalizeOnMainThread(Isolate* isolate, + Handle shared) = 0; // Free all resources. Must only be invoked on the main thread. virtual void ResetOnMainThread(Isolate* isolate) = 0; @@ -75,9 +66,6 @@ class V8_EXPORT_PRIVATE CompilerDispatcherJob { // Estimate how long the next step will take using the tracer. virtual double EstimateRuntimeOfNextStepInMs() const = 0; - // Print short description of job. Must only be invoked on the main thread. - virtual void ShortPrintOnMainThread() = 0; - protected: void set_status(Status status) { status_ = status; } diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc index 862efda83e5b56..ab8bc5adec15bf 100644 --- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc +++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc @@ -63,7 +63,7 @@ CompilerDispatcherTracer::CompilerDispatcherTracer(Isolate* isolate) } } -CompilerDispatcherTracer::~CompilerDispatcherTracer() {} +CompilerDispatcherTracer::~CompilerDispatcherTracer() = default; void CompilerDispatcherTracer::RecordPrepare(double duration_ms) { base::LockGuard lock(&mutex_); diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc index 6bbcefa781be14..b8dfaf142f24d7 100644 --- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc +++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc @@ -4,8 +4,7 @@ #include "src/compiler-dispatcher/compiler-dispatcher.h" -#include "include/v8-platform.h" -#include "include/v8.h" +#include "src/ast/ast.h" #include "src/base/platform/time.h" #include "src/base/template-utils.h" #include "src/cancelable-task.h" @@ -13,6 +12,7 @@ #include "src/compiler-dispatcher/compiler-dispatcher-tracer.h" #include "src/compiler-dispatcher/unoptimized-compile-job.h" #include "src/flags.h" +#include "src/global-handles.h" #include "src/objects-inl.h" namespace v8 { @@ -22,47 +22,17 @@ namespace { enum class ExceptionHandling { kSwallow, kThrow }; -bool DoNextStepOnMainThread(Isolate* isolate, CompilerDispatcherJob* job, - ExceptionHandling exception_handling) { +void FinalizeJobOnMainThread(Isolate* isolate, CompilerDispatcherJob* job, + Handle shared, + ExceptionHandling exception_handling) { DCHECK(ThreadId::Current().Equals(isolate->thread_id())); - TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), - "V8.CompilerDispatcherForgroundStep"); - switch (job->status()) { - case CompilerDispatcherJob::Status::kInitial: - job->PrepareOnMainThread(isolate); - break; - case CompilerDispatcherJob::Status::kPrepared: - job->Compile(false); - break; - case CompilerDispatcherJob::Status::kCompiled: - job->FinalizeOnMainThread(isolate); - break; - case CompilerDispatcherJob::Status::kHasErrorsToReport: - job->ReportErrorsOnMainThread(isolate); - break; - case CompilerDispatcherJob::Status::kFailed: - case CompilerDispatcherJob::Status::kDone: - UNREACHABLE(); - } + DCHECK_EQ(job->status(), CompilerDispatcherJob::Status::kReadyToFinalize); + job->FinalizeOnMainThread(isolate, shared); DCHECK_EQ(job->IsFailed(), isolate->has_pending_exception()); if (job->IsFailed() && exception_handling == ExceptionHandling::kSwallow) { isolate->clear_pending_exception(); } - return job->IsFailed(); -} - -void DoNextStepOnBackgroundThread(CompilerDispatcherJob* job) { - DCHECK(job->NextStepCanRunOnAnyThread()); - TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), - "V8.CompilerDispatcherBackgroundStep"); - switch (job->status()) { - case CompilerDispatcherJob::Status::kPrepared: - job->Compile(true); - break; - default: - UNREACHABLE(); - } } // Theoretically we get 50ms of idle time max, however it's unlikely that @@ -88,7 +58,7 @@ MemoryPressureTask::MemoryPressureTask(CancelableTaskManager* task_manager, CompilerDispatcher* dispatcher) : CancelableTask(task_manager), dispatcher_(dispatcher) {} -MemoryPressureTask::~MemoryPressureTask() {} +MemoryPressureTask::~MemoryPressureTask() = default; void MemoryPressureTask::RunInternal() { dispatcher_->AbortAll(BlockingBehavior::kDontBlock); @@ -115,7 +85,7 @@ CompilerDispatcher::AbortTask::AbortTask(CancelableTaskManager* task_manager, CompilerDispatcher* dispatcher) : CancelableTask(task_manager), dispatcher_(dispatcher) {} -CompilerDispatcher::AbortTask::~AbortTask() {} +CompilerDispatcher::AbortTask::~AbortTask() = default; void CompilerDispatcher::AbortTask::RunInternal() { dispatcher_->AbortInactiveJobs(); @@ -140,7 +110,7 @@ CompilerDispatcher::WorkerTask::WorkerTask(CancelableTaskManager* task_manager, CompilerDispatcher* dispatcher) : CancelableTask(task_manager), dispatcher_(dispatcher) {} -CompilerDispatcher::WorkerTask::~WorkerTask() {} +CompilerDispatcher::WorkerTask::~WorkerTask() = default; void CompilerDispatcher::WorkerTask::RunInternal() { dispatcher_->DoBackgroundWork(); @@ -164,7 +134,7 @@ CompilerDispatcher::IdleTask::IdleTask(CancelableTaskManager* task_manager, CompilerDispatcher* dispatcher) : CancelableIdleTask(task_manager), dispatcher_(dispatcher) {} -CompilerDispatcher::IdleTask::~IdleTask() {} +CompilerDispatcher::IdleTask::~IdleTask() = default; void CompilerDispatcher::IdleTask::RunInternal(double deadline_in_seconds) { dispatcher_->DoIdleWork(deadline_in_seconds); @@ -173,6 +143,9 @@ void CompilerDispatcher::IdleTask::RunInternal(double deadline_in_seconds) { CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform, size_t max_stack_size) : isolate_(isolate), + allocator_(isolate->allocator()), + worker_thread_runtime_call_stats_( + isolate->counters()->worker_thread_runtime_call_stats()), platform_(platform), max_stack_size_(max_stack_size), trace_compiler_dispatcher_(FLAG_trace_compiler_dispatcher), @@ -201,6 +174,8 @@ CompilerDispatcher::~CompilerDispatcher() { bool CompilerDispatcher::CanEnqueue() { if (!IsEnabled()) return false; + // TODO(rmcilroy): Investigate if MemoryPressureLevel::kNone is ever sent on + // Android, if not, remove this check. if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) { return false; } @@ -213,86 +188,64 @@ bool CompilerDispatcher::CanEnqueue() { return true; } -bool CompilerDispatcher::CanEnqueue(Handle function) { - if (!CanEnqueue()) return false; - - // We only handle functions (no eval / top-level code / native) that are - // attached to a script. - if (!function->script()->IsScript() || function->is_toplevel() || - function->native()) { - return false; - } - - return true; -} +base::Optional CompilerDispatcher::Enqueue( + const ParseInfo* outer_parse_info, const AstRawString* function_name, + const FunctionLiteral* function_literal) { + TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), + "V8.CompilerDispatcherEnqueue"); + RuntimeCallTimerScope runtimeTimer( + isolate_, RuntimeCallCounterId::kCompileEnqueueOnDispatcher); -CompilerDispatcher::JobId CompilerDispatcher::Enqueue( - std::unique_ptr job) { - DCHECK(!job->IsFinished()); - JobMap::const_iterator it = InsertJob(std::move(job)); - ConsiderJobForBackgroundProcessing(it->second.get()); - ScheduleIdleTaskIfNeeded(); - return it->first; -} + if (!CanEnqueue()) return base::nullopt; -CompilerDispatcher::JobId CompilerDispatcher::EnqueueAndStep( - std::unique_ptr job) { - DCHECK(!job->IsFinished()); + std::unique_ptr job(new UnoptimizedCompileJob( + tracer_.get(), allocator_, outer_parse_info, function_name, + function_literal, worker_thread_runtime_call_stats_, max_stack_size_)); JobMap::const_iterator it = InsertJob(std::move(job)); + JobId id = it->first; if (trace_compiler_dispatcher_) { - PrintF("CompilerDispatcher: stepping "); - it->second->ShortPrintOnMainThread(); - PrintF("\n"); + PrintF("CompilerDispatcher: enqueued job %zu for function literal id %d\n", + id, function_literal->function_literal_id()); } - DoNextStepOnMainThread(isolate_, it->second.get(), - ExceptionHandling::kSwallow); + + // Post a idle task and a background worker task to perform the compilation + // either on the worker thread or during idle time (whichever is first). ConsiderJobForBackgroundProcessing(it->second.get()); - RemoveIfFinished(it); ScheduleIdleTaskIfNeeded(); - return it->first; + return base::make_optional(id); } -bool CompilerDispatcher::Enqueue(Handle function) { - TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), - "V8.CompilerDispatcherEnqueue"); - if (!CanEnqueue(function)) return false; - if (IsEnqueued(function)) return true; - - if (trace_compiler_dispatcher_) { - PrintF("CompilerDispatcher: enqueuing "); - function->ShortPrint(); - PrintF(" for parse and compile\n"); - } +bool CompilerDispatcher::IsEnabled() const { return FLAG_compiler_dispatcher; } - std::unique_ptr job(new UnoptimizedCompileJob( - isolate_, tracer_.get(), function, max_stack_size_)); - Enqueue(std::move(job)); - return true; +bool CompilerDispatcher::IsEnqueued(Handle function) const { + if (jobs_.empty()) return false; + return GetJobFor(function) != jobs_.end(); } -bool CompilerDispatcher::EnqueueAndStep(Handle function) { - TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), - "V8.CompilerDispatcherEnqueueAndStep"); - if (!CanEnqueue(function)) return false; - if (IsEnqueued(function)) return true; +bool CompilerDispatcher::IsEnqueued(JobId job_id) const { + return jobs_.find(job_id) != jobs_.end(); +} +void CompilerDispatcher::RegisterSharedFunctionInfo( + JobId job_id, SharedFunctionInfo* function) { + DCHECK_NE(jobs_.find(job_id), jobs_.end()); + DCHECK_EQ(job_id_to_shared_.find(job_id), job_id_to_shared_.end()); if (trace_compiler_dispatcher_) { - PrintF("CompilerDispatcher: enqueuing "); + PrintF("CompilerDispatcher: registering "); function->ShortPrint(); - PrintF(" for parse and compile\n"); + PrintF(" with job id %zu\n", job_id); } - std::unique_ptr job(new UnoptimizedCompileJob( - isolate_, tracer_.get(), function, max_stack_size_)); - EnqueueAndStep(std::move(job)); - return true; -} + // Make a global handle to the function. + Handle function_handle = + isolate_->global_handles()->Create(function); -bool CompilerDispatcher::IsEnabled() const { return FLAG_compiler_dispatcher; } + // Register mapping. + job_id_to_shared_.insert(std::make_pair(job_id, function_handle)); + shared_to_unoptimized_job_id_.Set(function_handle, job_id); -bool CompilerDispatcher::IsEnqueued(Handle function) const { - if (jobs_.empty()) return false; - return GetJobFor(function) != jobs_.end(); + // Schedule an idle task to finalize job if it is ready. + ScheduleIdleTaskIfNeeded(); } void CompilerDispatcher::WaitForJobIfRunningOnBackground( @@ -316,54 +269,41 @@ void CompilerDispatcher::WaitForJobIfRunningOnBackground( DCHECK(running_background_jobs_.find(job) == running_background_jobs_.end()); } -bool CompilerDispatcher::FinishNow(CompilerDispatcherJob* job) { +bool CompilerDispatcher::FinishNow(Handle function) { + TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), + "V8.CompilerDispatcherFinishNow"); + RuntimeCallTimerScope runtimeTimer( + isolate_, RuntimeCallCounterId::kCompileFinishNowOnDispatcher); if (trace_compiler_dispatcher_) { PrintF("CompilerDispatcher: finishing "); - job->ShortPrintOnMainThread(); + function->ShortPrint(); PrintF(" now\n"); } + + JobMap::const_iterator it = GetJobFor(function); + CHECK(it != jobs_.end()); + CompilerDispatcherJob* job = it->second.get(); WaitForJobIfRunningOnBackground(job); while (!job->IsFinished()) { - DoNextStepOnMainThread(isolate_, job, ExceptionHandling::kThrow); - } - return !job->IsFailed(); -} - -bool CompilerDispatcher::FinishNow(Handle function) { - TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), - "V8.CompilerDispatcherFinishNow"); - JobMap::const_iterator job = GetJobFor(function); - CHECK(job != jobs_.end()); - bool result = FinishNow(job->second.get()); - RemoveIfFinished(job); - return result; -} - -void CompilerDispatcher::FinishAllNow() { - // First finish all jobs not running in background - for (auto it = jobs_.cbegin(); it != jobs_.cend();) { - CompilerDispatcherJob* job = it->second.get(); - bool is_running_in_background; - { - base::LockGuard lock(&mutex_); - is_running_in_background = - running_background_jobs_.find(job) != running_background_jobs_.end(); - pending_background_jobs_.erase(job); - } - if (!is_running_in_background) { - while (!job->IsFinished()) { - DoNextStepOnMainThread(isolate_, job, ExceptionHandling::kThrow); + switch (job->status()) { + case CompilerDispatcherJob::Status::kInitial: + job->Compile(false); + break; + case CompilerDispatcherJob::Status::kReadyToFinalize: { + FinalizeJobOnMainThread(isolate_, job, function, + ExceptionHandling::kThrow); + break; } - it = RemoveIfFinished(it); - } else { - ++it; + case CompilerDispatcherJob::Status::kFailed: + case CompilerDispatcherJob::Status::kDone: + UNREACHABLE(); } } - // Potentially wait for jobs that were running in background - for (auto it = jobs_.cbegin(); it != jobs_.cend(); - it = RemoveIfFinished(it)) { - FinishNow(it->second.get()); - } + DCHECK_EQ(job->IsFailed(), isolate_->has_pending_exception()); + DCHECK(job->IsFinished()); + bool result = !job->IsFailed(); + RemoveJob(it); + return result; } void CompilerDispatcher::AbortAll(BlockingBehavior blocking) { @@ -373,9 +313,7 @@ void CompilerDispatcher::AbortAll(BlockingBehavior blocking) { for (auto& it : jobs_) { WaitForJobIfRunningOnBackground(it.second.get()); if (trace_compiler_dispatcher_) { - PrintF("CompilerDispatcher: aborted "); - it.second->ShortPrintOnMainThread(); - PrintF("\n"); + PrintF("CompilerDispatcher: aborted job %zu\n", it.first); } it.second->ResetOnMainThread(isolate_); } @@ -394,6 +332,7 @@ void CompilerDispatcher::AbortAll(BlockingBehavior blocking) { base::LockGuard lock(&mutex_); abort_ = true; pending_background_jobs_.clear(); + idle_task_scheduled_ = false; // Idle task cancelled by TryAbortAll. } AbortInactiveJobs(); @@ -421,9 +360,7 @@ void CompilerDispatcher::AbortInactiveJobs() { } } if (trace_compiler_dispatcher_) { - PrintF("CompilerDispatcher: aborted "); - job->second->ShortPrintOnMainThread(); - PrintF("\n"); + PrintF("CompilerDispatcher: aborted job %zu\n", job->first); } it = RemoveJob(job); } @@ -470,8 +407,6 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor( JobMap::const_iterator job = jobs_.end(); if (job_id_ptr) { job = jobs_.find(*job_id_ptr); - DCHECK(job == jobs_.end() || - job->second->AsUnoptimizedCompileJob()->IsAssociatedWith(shared)); } return job; } @@ -481,7 +416,7 @@ void CompilerDispatcher::ScheduleIdleTaskFromAnyThread() { if (!platform_->IdleTasksEnabled(v8_isolate)) return; { base::LockGuard lock(&mutex_); - if (idle_task_scheduled_) return; + if (idle_task_scheduled_ || abort_) return; idle_task_scheduled_ = true; } platform_->CallIdleOnForegroundThread( @@ -525,6 +460,8 @@ void CompilerDispatcher::ScheduleMoreWorkerTasksIfNeeded() { } void CompilerDispatcher::DoBackgroundWork() { + TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), + "V8.CompilerDispatcherDoBackgroundWork"); for (;;) { CompilerDispatcherJob* job = nullptr; { @@ -547,7 +484,10 @@ void CompilerDispatcher::DoBackgroundWork() { PrintF("CompilerDispatcher: doing background work\n"); } - DoNextStepOnBackgroundThread(job); + DCHECK(job->NextStepCanRunOnAnyThread()); + DCHECK_EQ(job->status(), CompilerDispatcherJob::Status::kInitial); + job->Compile(true); + // Unconditionally schedule an idle task, as all background steps have to be // followed by a main thread step. ScheduleIdleTaskFromAnyThread(); @@ -579,6 +519,8 @@ void CompilerDispatcher::DoBackgroundWork() { } void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) { + TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), + "V8.CompilerDispatcherDoIdleWork"); bool aborted = false; { base::LockGuard lock(&mutex_); @@ -593,11 +535,11 @@ void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) { // Number of jobs that are unlikely to make progress during any idle callback // due to their estimated duration. - size_t too_long_jobs = 0; + size_t jobs_unlikely_to_progress = 0; // Iterate over all available jobs & remaining time. For each job, decide // whether to 1) skip it (if it would take too long), 2) erase it (if it's - // finished), or 3) make progress on it. + // finished), or 3) make progress on it if possible. double idle_time_in_seconds = deadline_in_seconds - platform_->MonotonicallyIncreasingTime(); @@ -620,6 +562,7 @@ void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) { ++job; continue; } + DCHECK(!job->second->IsFinished()); auto it = pending_background_jobs_.find(job->second.get()); double estimate_in_ms = job->second->EstimateRuntimeOfNextStepInMs(); if (idle_time_in_seconds < @@ -628,29 +571,44 @@ void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) { // If there's not enough time left, try to estimate whether we would // have managed to finish the job in a large idle task to assess // whether we should ask for another idle callback. - if (estimate_in_ms > kMaxIdleTimeToExpectInMs) ++too_long_jobs; + // TODO(rmcilroy): Consider running the job anyway when we have a long + // idle time since this would probably be the best time to run. + if (estimate_in_ms > kMaxIdleTimeToExpectInMs) + ++jobs_unlikely_to_progress; if (it == pending_background_jobs_.end()) { lock.reset(); ConsiderJobForBackgroundProcessing(job->second.get()); } ++job; - } else if (job->second->IsFinished()) { - DCHECK(it == pending_background_jobs_.end()); - lock.reset(); - job = RemoveJob(job); - continue; - } else { - // Do one step, and keep processing the job (as we don't advance the - // iterator). + } else if (job->second->status() == + CompilerDispatcherJob::Status::kInitial) { if (it != pending_background_jobs_.end()) { pending_background_jobs_.erase(it); } lock.reset(); - DoNextStepOnMainThread(isolate_, job->second.get(), - ExceptionHandling::kSwallow); + job->second->Compile(false); + // Don't update job so we can immediately finalize it on the next loop. + } else { + DCHECK_EQ(job->second->status(), + CompilerDispatcherJob::Status::kReadyToFinalize); + DCHECK(it == pending_background_jobs_.end()); + lock.reset(); + + auto shared_it = job_id_to_shared_.find(job->first); + if (shared_it != job_id_to_shared_.end()) { + Handle shared = shared_it->second; + FinalizeJobOnMainThread(isolate_, job->second.get(), shared, + ExceptionHandling::kSwallow); + DCHECK(job->second->IsFinished()); + job = RemoveJob(job); + } else { + // If we can't step the job yet, go to the next job. + ++jobs_unlikely_to_progress; + ++job; + } } } - if (jobs_.size() > too_long_jobs) ScheduleIdleTaskIfNeeded(); + if (jobs_.size() > jobs_unlikely_to_progress) ScheduleIdleTaskIfNeeded(); } CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveIfFinished( @@ -661,9 +619,8 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveIfFinished( if (trace_compiler_dispatcher_) { bool result = !job->second->IsFailed(); - PrintF("CompilerDispatcher: finished working on "); - job->second->ShortPrintOnMainThread(); - PrintF(": %s\n", result ? "success" : "failure"); + PrintF("CompilerDispatcher: finished working on job %zu: %s\n", job->first, + result ? "success" : "failure"); tracer_->DumpStatistics(); } @@ -677,44 +634,34 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::InsertJob( std::tie(it, added) = jobs_.insert(std::make_pair(next_job_id_++, std::move(job))); DCHECK(added); - - JobId id = it->first; - CompilerDispatcherJob* inserted_job = it->second.get(); - - // Maps unoptimized jobs' SFIs to their job id. - if (inserted_job->type() == - CompilerDispatcherJob::Type::kUnoptimizedCompile) { - Handle shared = - inserted_job->AsUnoptimizedCompileJob()->shared(); - if (!shared.is_null()) { - shared_to_unoptimized_job_id_.Set(shared, id); - } - } - return it; } CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveJob( CompilerDispatcher::JobMap::const_iterator it) { CompilerDispatcherJob* job = it->second.get(); - job->ResetOnMainThread(isolate_); - // Unmaps unoptimized jobs' SFIs to their job id. - if (job->type() == CompilerDispatcherJob::Type::kUnoptimizedCompile) { - Handle shared = - job->AsUnoptimizedCompileJob()->shared(); - if (!shared.is_null()) { - JobId deleted_id; - shared_to_unoptimized_job_id_.Delete(shared, &deleted_id); - DCHECK_EQ(it->first, deleted_id); - } + // Delete SFI associated with job if its been registered. + auto shared_it = job_id_to_shared_.find(it->first); + if (shared_it != job_id_to_shared_.end()) { + Handle shared = shared_it->second; + + JobId deleted_id; + shared_to_unoptimized_job_id_.Delete(shared, &deleted_id); + DCHECK_EQ(it->first, deleted_id); + + job_id_to_shared_.erase(shared_it); + GlobalHandles::Destroy(Handle::cast(shared).location()); } + job->ResetOnMainThread(isolate_); + it = jobs_.erase(it); if (jobs_.empty()) { base::LockGuard lock(&mutex_); if (num_worker_tasks_ == 0) abort_ = false; } + return it; } diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h index d7b2dc802ff667..56cf047e9502d4 100644 --- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h +++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h @@ -13,6 +13,7 @@ #include "src/base/atomic-utils.h" #include "src/base/macros.h" +#include "src/base/optional.h" #include "src/base/platform/condition-variable.h" #include "src/base/platform/mutex.h" #include "src/base/platform/semaphore.h" @@ -27,6 +28,7 @@ enum class MemoryPressureLevel; namespace internal { +class AstRawString; class AstValueFactory; class CancelableTaskManager; class CompilerDispatcherJob; @@ -37,6 +39,7 @@ class FunctionLiteral; class Isolate; class ParseInfo; class SharedFunctionInfo; +class WorkerThreadRuntimeCallStats; class Zone; template @@ -79,24 +82,23 @@ class V8_EXPORT_PRIVATE CompilerDispatcher { // Returns true if the compiler dispatcher is enabled. bool IsEnabled() const; - // Enqueue a job for parse and compile. Returns true if a job was enqueued. - bool Enqueue(Handle function); + base::Optional Enqueue(const ParseInfo* outer_parse_info, + const AstRawString* function_name, + const FunctionLiteral* function_literal); - // Like Enqueue, but also advances the job so that it can potentially - // continue running on a background thread (if at all possible). Returns - // true if the job was enqueued. - bool EnqueueAndStep(Handle function); + // Registers the given |function| with the compilation job |job_id|. + void RegisterSharedFunctionInfo(JobId job_id, SharedFunctionInfo* function); - // Returns true if there is a pending job for the given function. + // Returns true if there is a pending job with the given id. + bool IsEnqueued(JobId job_id) const; + + // Returns true if there is a pending job registered for the given function. bool IsEnqueued(Handle function) const; // Blocks until the given function is compiled (and does so as fast as // possible). Returns true if the compile job was successful. bool FinishNow(Handle function); - // Blocks until all jobs are finished. - void FinishAllNow(); - // Aborts a given job. Blocks if requested. void Abort(Handle function, BlockingBehavior blocking); @@ -124,15 +126,15 @@ class V8_EXPORT_PRIVATE CompilerDispatcher { FRIEND_TEST(CompilerDispatcherTest, CompileMultipleOnBackgroundThread); typedef std::map> JobMap; + typedef std::map> JobIdToSharedMap; typedef IdentityMap SharedToJobIdMap; class AbortTask; class WorkerTask; class IdleTask; + bool CanEnqueue(); void WaitForJobIfRunningOnBackground(CompilerDispatcherJob* job); void AbortInactiveJobs(); - bool CanEnqueue(); - bool CanEnqueue(Handle function); JobMap::const_iterator GetJobFor(Handle shared) const; void ConsiderJobForBackgroundProcessing(CompilerDispatcherJob* job); void ScheduleMoreWorkerTasksIfNeeded(); @@ -141,17 +143,16 @@ class V8_EXPORT_PRIVATE CompilerDispatcher { void ScheduleAbortTask(); void DoBackgroundWork(); void DoIdleWork(double deadline_in_seconds); - JobId Enqueue(std::unique_ptr job); - JobId EnqueueAndStep(std::unique_ptr job); // Returns job if not removed otherwise iterator following the removed job. JobMap::const_iterator RemoveIfFinished(JobMap::const_iterator job); // Returns iterator to the inserted job. JobMap::const_iterator InsertJob(std::unique_ptr job); // Returns iterator following the removed job. JobMap::const_iterator RemoveJob(JobMap::const_iterator job); - bool FinishNow(CompilerDispatcherJob* job); Isolate* isolate_; + AccountingAllocator* allocator_; + WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats_; Platform* platform_; size_t max_stack_size_; @@ -168,6 +169,9 @@ class V8_EXPORT_PRIVATE CompilerDispatcher { // Mapping from job_id to job. JobMap jobs_; + // Mapping from job_id to SharedFunctionInfo. + JobIdToSharedMap job_id_to_shared_; + // Mapping from SharedFunctionInfo to the corresponding unoptimized // compilation's JobId; SharedToJobIdMap shared_to_unoptimized_job_id_; diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc index 47b2181a88bf50..cfdd91db14ae31 100644 --- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc +++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc @@ -41,12 +41,16 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask { public: explicit CompileTask(Isolate* isolate, OptimizingCompileDispatcher* dispatcher) - : CancelableTask(isolate), isolate_(isolate), dispatcher_(dispatcher) { + : CancelableTask(isolate), + isolate_(isolate), + worker_thread_runtime_call_stats_( + isolate->counters()->worker_thread_runtime_call_stats()), + dispatcher_(dispatcher) { base::LockGuard lock_guard(&dispatcher_->ref_count_mutex_); ++dispatcher_->ref_count_; } - virtual ~CompileTask() {} + ~CompileTask() override = default; private: // v8::Task overrides. @@ -56,8 +60,13 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask { DisallowHandleDereference no_deref; { - TimerEventScope timer(isolate_); + WorkerThreadRuntimeCallStatsScope runtime_call_stats_scope( + worker_thread_runtime_call_stats_); + RuntimeCallTimerScope runtimeTimer( + runtime_call_stats_scope.Get(), + RuntimeCallCounterId::kRecompileConcurrent); + TimerEventScope timer(isolate_); TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.RecompileConcurrent"); @@ -77,6 +86,7 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask { } Isolate* isolate_; + WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats_; OptimizingCompileDispatcher* dispatcher_; DISALLOW_COPY_AND_ASSIGN(CompileTask); diff --git a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc index 2e8065ed11b1d7..7deddd4f8f38a1 100644 --- a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc +++ b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc @@ -5,11 +5,9 @@ #include "src/compiler-dispatcher/unoptimized-compile-job.h" #include "src/assert-scope.h" -#include "src/base/optional.h" #include "src/compiler-dispatcher/compiler-dispatcher-tracer.h" #include "src/compiler.h" #include "src/flags.h" -#include "src/global-handles.h" #include "src/interpreter/interpreter.h" #include "src/isolate.h" #include "src/objects-inl.h" @@ -25,218 +23,135 @@ namespace internal { namespace { -class OneByteWrapper : public v8::String::ExternalOneByteStringResource { +// A scope object that ensures a parse info's runtime call stats field is set +// correctly during worker-thread compile, and restores it after going out of +// scope. +class OffThreadRuntimeCallStatsScope { public: - OneByteWrapper(const void* data, int length) : data_(data), length_(length) {} - ~OneByteWrapper() override = default; - - const char* data() const override { - return reinterpret_cast(data_); + OffThreadRuntimeCallStatsScope( + ParseInfo* parse_info, + WorkerThreadRuntimeCallStats* worker_thread_runtime_stats) + : parse_info_(parse_info), + original_runtime_call_stats_(parse_info_->runtime_call_stats()), + worker_thread_scope_(worker_thread_runtime_stats) { + parse_info_->set_runtime_call_stats(worker_thread_scope_.Get()); } - size_t length() const override { return static_cast(length_); } - - private: - const void* data_; - int length_; - - DISALLOW_COPY_AND_ASSIGN(OneByteWrapper); -}; - -class TwoByteWrapper : public v8::String::ExternalStringResource { - public: - TwoByteWrapper(const void* data, int length) : data_(data), length_(length) {} - ~TwoByteWrapper() override = default; - - const uint16_t* data() const override { - return reinterpret_cast(data_); + ~OffThreadRuntimeCallStatsScope() { + parse_info_->set_runtime_call_stats(original_runtime_call_stats_); } - size_t length() const override { return static_cast(length_); } - private: - const void* data_; - int length_; - - DISALLOW_COPY_AND_ASSIGN(TwoByteWrapper); + ParseInfo* parse_info_; + RuntimeCallStats* original_runtime_call_stats_; + WorkerThreadRuntimeCallStatsScope worker_thread_scope_; }; } // namespace -UnoptimizedCompileJob::UnoptimizedCompileJob(Isolate* isolate, - CompilerDispatcherTracer* tracer, - Handle shared, - size_t max_stack_size) +UnoptimizedCompileJob::UnoptimizedCompileJob( + CompilerDispatcherTracer* tracer, AccountingAllocator* allocator, + const ParseInfo* outer_parse_info, const AstRawString* function_name, + const FunctionLiteral* function_literal, + WorkerThreadRuntimeCallStats* worker_thread_runtime_stats, + size_t max_stack_size) : CompilerDispatcherJob(Type::kUnoptimizedCompile), - main_thread_id_(isolate->thread_id().ToInteger()), tracer_(tracer), - allocator_(isolate->allocator()), - context_(isolate->global_handles()->Create(isolate->context())), - shared_(isolate->global_handles()->Create(*shared)), + allocator_(allocator), + worker_thread_runtime_stats_(worker_thread_runtime_stats), max_stack_size_(max_stack_size), trace_compiler_dispatcher_jobs_(FLAG_trace_compiler_dispatcher_jobs) { - DCHECK(!shared_->is_toplevel()); - // TODO(rmcilroy): Handle functions with non-empty outer scope info. - DCHECK(!shared_->HasOuterScopeInfo()); - HandleScope scope(isolate); - Handle